2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
|
2009-01-06 11:05:12 +08:00
|
|
|
* Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
|
2005-04-17 06:20:36 +08:00
|
|
|
*
|
|
|
|
* This file is released under the GPL.
|
|
|
|
*/
|
|
|
|
|
2016-05-13 04:28:10 +08:00
|
|
|
#include "dm-core.h"
|
|
|
|
#include "dm-rq.h"
|
2007-10-20 05:48:00 +08:00
|
|
|
#include "dm-uevent.h"
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/module.h>
|
2006-03-27 17:18:20 +08:00
|
|
|
#include <linux/mutex.h>
|
2024-06-11 20:26:44 +08:00
|
|
|
#include <linux/sched/mm.h>
|
2017-02-03 02:15:33 +08:00
|
|
|
#include <linux/sched/signal.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <linux/blkpg.h>
|
|
|
|
#include <linux/bio.h>
|
|
|
|
#include <linux/mempool.h>
|
2017-04-13 03:35:44 +08:00
|
|
|
#include <linux/dax.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/idr.h>
|
2017-05-30 03:57:56 +08:00
|
|
|
#include <linux/uio.h>
|
2006-03-27 17:17:54 +08:00
|
|
|
#include <linux/hdreg.h>
|
dm: separate device deletion from dm_put
This patch separates the device deletion code from dm_put()
to make sure the deletion happens in the process context.
By this patch, device deletion always occurs in an ioctl (process)
context and dm_put() can be called in interrupt context.
As a result, the request-based dm's bad dm_put() usage pointed out
by Mikulas below disappears.
http://marc.info/?l=dm-devel&m=126699981019735&w=2
Without this patch, I confirmed there is a case to crash the system:
dm_put() => dm_table_destroy() => vfree() => BUG_ON(in_interrupt())
Some more backgrounds and details:
In request-based dm, a device opener can remove a mapped_device
while the last request is still completing, because bios in the last
request complete first and then the device opener can close and remove
the mapped_device before the last request completes:
CPU0 CPU1
=================================================================
<<INTERRUPT>>
blk_end_request_all(clone_rq)
blk_update_request(clone_rq)
bio_endio(clone_bio) == end_clone_bio
blk_update_request(orig_rq)
bio_endio(orig_bio)
<<I/O completed>>
dm_blk_close()
dev_remove()
dm_put(md)
<<Free md>>
blk_finish_request(clone_rq)
....
dm_end_request(clone_rq)
free_rq_clone(clone_rq)
blk_end_request_all(orig_rq)
rq_completed(md)
So request-based dm used dm_get()/dm_put() to hold md for each I/O
until its request completion handling is fully done.
However, the final dm_put() can call the device deletion code which
must not be run in interrupt context and may cause kernel panic.
To solve the problem, this patch moves the device deletion code,
dm_destroy(), to predetermined places that is actually deleting
the mapped_device in ioctl (process) context, and changes dm_put()
just to decrement the reference count of the mapped_device.
By this change, dm_put() can be used in any context and the symmetric
model below is introduced:
dm_create(): create a mapped_device
dm_destroy(): destroy a mapped_device
dm_get(): increment the reference count of a mapped_device
dm_put(): decrement the reference count of a mapped_device
dm_destroy() waits for all references of the mapped_device to disappear,
then deletes the mapped_device.
dm_destroy() uses active waiting with msleep(1), since deleting
the mapped_device isn't performance-critical task.
And since at this point, nobody opens the mapped_device and no new
reference will be taken, the pending counts are just for racing
completing activity and will eventually decrease to zero.
For the unlikely case of the forced module unload, dm_destroy_immediate(),
which doesn't wait and forcibly deletes the mapped_device, is also
introduced and used in dm_hash_remove_all(). Otherwise, "rmmod -f"
may be stuck and never return.
And now, because the mapped_device is deleted at this point, subsequent
accesses to the mapped_device may cause NULL pointer references.
Cc: stable@kernel.org
Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com>
Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
2010-08-12 11:13:56 +08:00
|
|
|
#include <linux/delay.h>
|
2014-10-29 06:34:52 +08:00
|
|
|
#include <linux/wait.h>
|
2015-10-15 20:10:51 +08:00
|
|
|
#include <linux/pr.h>
|
2017-10-20 15:37:39 +08:00
|
|
|
#include <linux/refcount.h>
|
2024-06-11 20:08:33 +08:00
|
|
|
#include <linux/blk-cgroup.h>
|
tracing/events: convert block trace points to TRACE_EVENT()
TRACE_EVENT is a more generic way to define tracepoints. Doing so adds
these new capabilities to this tracepoint:
- zero-copy and per-cpu splice() tracing
- binary tracing without printf overhead
- structured logging records exposed under /debug/tracing/events
- trace events embedded in function tracer output and other plugins
- user-defined, per tracepoint filter expressions
...
Cons:
- no dev_t info for the output of plug, unplug_timer and unplug_io events.
no dev_t info for getrq and sleeprq events if bio == NULL.
no dev_t info for rq_abort,...,rq_requeue events if rq->rq_disk == NULL.
This is mainly because we can't get the deivce from a request queue.
But this may change in the future.
- A packet command is converted to a string in TP_assign, not TP_print.
While blktrace do the convertion just before output.
Since pc requests should be rather rare, this is not a big issue.
- In blktrace, an event can have 2 different print formats, but a TRACE_EVENT
has a unique format, which means we have some unused data in a trace entry.
The overhead is minimized by using __dynamic_array() instead of __array().
I've benchmarked the ioctl blktrace vs the splice based TRACE_EVENT tracing:
dd dd + ioctl blktrace dd + TRACE_EVENT (splice)
1 7.36s, 42.7 MB/s 7.50s, 42.0 MB/s 7.41s, 42.5 MB/s
2 7.43s, 42.3 MB/s 7.48s, 42.1 MB/s 7.43s, 42.4 MB/s
3 7.38s, 42.6 MB/s 7.45s, 42.2 MB/s 7.41s, 42.5 MB/s
So the overhead of tracing is very small, and no regression when using
those trace events vs blktrace.
And the binary output of TRACE_EVENT is much smaller than blktrace:
# ls -l -h
-rw-r--r-- 1 root root 8.8M 06-09 13:24 sda.blktrace.0
-rw-r--r-- 1 root root 195K 06-09 13:24 sda.blktrace.1
-rw-r--r-- 1 root root 2.7M 06-09 13:25 trace_splice.out
Following are some comparisons between TRACE_EVENT and blktrace:
plug:
kjournald-480 [000] 303.084981: block_plug: [kjournald]
kjournald-480 [000] 303.084981: 8,0 P N [kjournald]
unplug_io:
kblockd/0-118 [000] 300.052973: block_unplug_io: [kblockd/0] 1
kblockd/0-118 [000] 300.052974: 8,0 U N [kblockd/0] 1
remap:
kjournald-480 [000] 303.085042: block_remap: 8,0 W 102736992 + 8 <- (8,8) 33384
kjournald-480 [000] 303.085043: 8,0 A W 102736992 + 8 <- (8,8) 33384
bio_backmerge:
kjournald-480 [000] 303.085086: block_bio_backmerge: 8,0 W 102737032 + 8 [kjournald]
kjournald-480 [000] 303.085086: 8,0 M W 102737032 + 8 [kjournald]
getrq:
kjournald-480 [000] 303.084974: block_getrq: 8,0 W 102736984 + 8 [kjournald]
kjournald-480 [000] 303.084975: 8,0 G W 102736984 + 8 [kjournald]
bash-2066 [001] 1072.953770: 8,0 G N [bash]
bash-2066 [001] 1072.953773: block_getrq: 0,0 N 0 + 0 [bash]
rq_complete:
konsole-2065 [001] 300.053184: block_rq_complete: 8,0 W () 103669040 + 16 [0]
konsole-2065 [001] 300.053191: 8,0 C W 103669040 + 16 [0]
ksoftirqd/1-7 [001] 1072.953811: 8,0 C N (5a 00 08 00 00 00 00 00 24 00) [0]
ksoftirqd/1-7 [001] 1072.953813: block_rq_complete: 0,0 N (5a 00 08 00 00 00 00 00 24 00) 0 + 0 [0]
rq_insert:
kjournald-480 [000] 303.084985: block_rq_insert: 8,0 W 0 () 102736984 + 8 [kjournald]
kjournald-480 [000] 303.084986: 8,0 I W 102736984 + 8 [kjournald]
Changelog from v2 -> v3:
- use the newly introduced __dynamic_array().
Changelog from v1 -> v2:
- use __string() instead of __array() to minimize the memory required
to store hex dump of rq->cmd().
- support large pc requests.
- add missing blk_fill_rwbs_rq() in block_rq_requeue TRACE_EVENT.
- some cleanups.
Signed-off-by: Li Zefan <lizf@cn.fujitsu.com>
LKML-Reference: <4A2DF669.5070905@cn.fujitsu.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2009-06-09 13:43:05 +08:00
|
|
|
|
2006-06-26 15:27:35 +08:00
|
|
|
#define DM_MSG_PREFIX "core"
|
|
|
|
|
2009-06-22 17:12:30 +08:00
|
|
|
/*
|
|
|
|
* Cookies are numeric values sent with CHANGE and REMOVE
|
|
|
|
* uevents while resuming, removing or renaming the device.
|
|
|
|
*/
|
|
|
|
#define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE"
|
|
|
|
#define DM_COOKIE_LENGTH 24
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
static const char *_name = DM_NAME;
|
|
|
|
|
|
|
|
static unsigned int major = 0;
|
|
|
|
static unsigned int _major = 0;
|
|
|
|
|
2011-08-02 19:32:01 +08:00
|
|
|
static DEFINE_IDR(_minor_idr);
|
|
|
|
|
2006-06-26 15:27:22 +08:00
|
|
|
static DEFINE_SPINLOCK(_minor_lock);
|
2013-11-02 06:27:41 +08:00
|
|
|
|
|
|
|
static void do_deferred_remove(struct work_struct *w);
|
|
|
|
|
|
|
|
static DECLARE_WORK(deferred_remove_work, do_deferred_remove);
|
|
|
|
|
2014-06-15 01:44:31 +08:00
|
|
|
static struct workqueue_struct *deferred_remove_workqueue;
|
|
|
|
|
2017-01-17 05:05:59 +08:00
|
|
|
atomic_t dm_global_event_nr = ATOMIC_INIT(0);
|
|
|
|
DECLARE_WAIT_QUEUE_HEAD(dm_global_eventq);
|
|
|
|
|
2017-09-20 19:29:49 +08:00
|
|
|
void dm_issue_global_event(void)
|
|
|
|
{
|
|
|
|
atomic_inc(&dm_global_event_nr);
|
|
|
|
wake_up(&dm_global_eventq);
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
2017-12-12 12:17:47 +08:00
|
|
|
* One of these is allocated (on-stack) per original bio.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2017-12-12 12:17:47 +08:00
|
|
|
struct clone_info {
|
|
|
|
struct dm_table *map;
|
|
|
|
struct bio *bio;
|
|
|
|
struct dm_io *io;
|
|
|
|
sector_t sector;
|
|
|
|
unsigned sector_count;
|
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* One of these is allocated per clone bio.
|
|
|
|
*/
|
|
|
|
#define DM_TIO_MAGIC 7282014
|
|
|
|
struct dm_target_io {
|
|
|
|
unsigned magic;
|
|
|
|
struct dm_io *io;
|
|
|
|
struct dm_target *ti;
|
|
|
|
unsigned target_bio_nr;
|
|
|
|
unsigned *len_ptr;
|
|
|
|
bool inside_dm_io;
|
|
|
|
struct bio clone;
|
|
|
|
};
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
2017-12-12 09:51:50 +08:00
|
|
|
* One of these is allocated per original bio.
|
2017-12-12 12:17:47 +08:00
|
|
|
* It contains the first clone used for that original.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2017-12-12 12:17:47 +08:00
|
|
|
#define DM_IO_MAGIC 5191977
|
2005-04-17 06:20:36 +08:00
|
|
|
struct dm_io {
|
2017-12-12 12:17:47 +08:00
|
|
|
unsigned magic;
|
2005-04-17 06:20:36 +08:00
|
|
|
struct mapped_device *md;
|
2017-06-03 15:38:06 +08:00
|
|
|
blk_status_t status;
|
2005-04-17 06:20:36 +08:00
|
|
|
atomic_t io_count;
|
2017-12-12 09:51:50 +08:00
|
|
|
struct bio *orig_bio;
|
2006-02-01 19:04:53 +08:00
|
|
|
unsigned long start_time;
|
2009-10-17 06:18:15 +08:00
|
|
|
spinlock_t endio_lock;
|
2013-08-16 22:54:23 +08:00
|
|
|
struct dm_stats_aux stats_aux;
|
2017-12-12 12:17:47 +08:00
|
|
|
/* last member of dm_target_io is 'struct bio' */
|
|
|
|
struct dm_target_io tio;
|
2005-04-17 06:20:36 +08:00
|
|
|
};
|
|
|
|
|
2017-12-12 12:17:47 +08:00
|
|
|
void *dm_per_bio_data(struct bio *bio, size_t data_size)
|
|
|
|
{
|
|
|
|
struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
|
|
|
|
if (!tio->inside_dm_io)
|
|
|
|
return (char *)bio - offsetof(struct dm_target_io, clone) - data_size;
|
|
|
|
return (char *)bio - offsetof(struct dm_target_io, clone) - offsetof(struct dm_io, tio) - data_size;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(dm_per_bio_data);
|
|
|
|
|
|
|
|
struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size)
|
|
|
|
{
|
|
|
|
struct dm_io *io = (struct dm_io *)((char *)data + data_size);
|
|
|
|
if (io->magic == DM_IO_MAGIC)
|
|
|
|
return (struct bio *)((char *)io + offsetof(struct dm_io, tio) + offsetof(struct dm_target_io, clone));
|
|
|
|
BUG_ON(io->magic != DM_TIO_MAGIC);
|
|
|
|
return (struct bio *)((char *)io + offsetof(struct dm_target_io, clone));
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(dm_bio_from_per_bio_data);
|
|
|
|
|
|
|
|
unsigned dm_bio_get_target_bio_nr(const struct bio *bio)
|
|
|
|
{
|
|
|
|
return container_of(bio, struct dm_target_io, clone)->target_bio_nr;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(dm_bio_get_target_bio_nr);
|
|
|
|
|
2006-06-26 15:27:21 +08:00
|
|
|
#define MINOR_ALLOCED ((void *)-1)
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* Bits for the md->flags field.
|
|
|
|
*/
|
2009-04-09 07:27:14 +08:00
|
|
|
#define DMF_BLOCK_IO_FOR_SUSPEND 0
|
2005-04-17 06:20:36 +08:00
|
|
|
#define DMF_SUSPENDED 1
|
2006-01-06 16:20:06 +08:00
|
|
|
#define DMF_FROZEN 2
|
2006-06-26 15:27:23 +08:00
|
|
|
#define DMF_FREEING 3
|
2006-06-26 15:27:34 +08:00
|
|
|
#define DMF_DELETING 4
|
[PATCH] dm: suspend: add noflush pushback
In device-mapper I/O is sometimes queued within targets for later processing.
For example the multipath target can be configured to store I/O when no paths
are available instead of returning it -EIO.
This patch allows the device-mapper core to instruct a target to transfer the
contents of any such in-target queue back into the core. This frees up the
resources used by the target so the core can replace that target with an
alternative one and then resend the I/O to it. Without this patch the only
way to change the target in such circumstances involves returning the I/O with
an error back to the filesystem/application. In the multipath case, this
patch will let us add new paths for existing I/O to try after all the existing
paths have failed.
DMF_NOFLUSH_SUSPENDING
----------------------
If the DM_NOFLUSH_FLAG ioctl option is specified at suspend time, the
DMF_NOFLUSH_SUSPENDING flag is set in md->flags during dm_suspend(). It
is always cleared before dm_suspend() returns.
The flag must be visible while the target is flushing pending I/Os so it
is set before presuspend where the flush starts and unset after the wait
for md->pending where the flush ends.
Target drivers can check this flag by calling dm_noflush_suspending().
DM_MAPIO_REQUEUE / DM_ENDIO_REQUEUE
-----------------------------------
A target's map() function can now return DM_MAPIO_REQUEUE to request the
device mapper core queue the bio.
Similarly, a target's end_io() function can return DM_ENDIO_REQUEUE to request
the same. This has been labelled 'pushback'.
The __map_bio() and clone_endio() functions in the core treat these return
values as errors and call dec_pending() to end the I/O.
dec_pending
-----------
dec_pending() saves the pushback request in struct dm_io->error. Once all
the split clones have ended, dec_pending() will put the original bio on
the md->pushback list. Note that this supercedes any I/O errors.
It is possible for the suspend with DM_NOFLUSH_FLAG to be aborted while
in progress (e.g. by user interrupt). dec_pending() checks for this and
returns -EIO if it happened.
pushdback list and pushback_lock
--------------------------------
The bio is queued on md->pushback temporarily in dec_pending(), and after
all pending I/Os return, md->pushback is merged into md->deferred in
dm_suspend() for re-issuing at resume time.
md->pushback_lock protects md->pushback.
The lock should be held with irq disabled because dec_pending() can be
called from interrupt context.
Queueing bios to md->pushback in dec_pending() must be done atomically
with the check for DMF_NOFLUSH_SUSPENDING flag. So md->pushback_lock is
held when checking the flag. Otherwise dec_pending() may queue a bio to
md->pushback after the interrupted dm_suspend() flushes md->pushback.
Then the bio would be left in md->pushback.
Flag setting in dm_suspend() can be done without md->pushback_lock because
the flag is checked only after presuspend and the set value is already
made visible via the target's presuspend function.
The flag can be checked without md->pushback_lock (e.g. the first part of
the dec_pending() or target drivers), because the flag is checked again
with md->pushback_lock held when the bio is really queued to md->pushback
as described above. So even if the flag is cleared after the lockless
checkings, the bio isn't left in md->pushback but returned to applications
with -EIO.
Other notes on the current patch
--------------------------------
- md->pushback is added to the struct mapped_device instead of using
md->deferred directly because md->io_lock which protects md->deferred is
rw_semaphore and can't be used in interrupt context like dec_pending(),
and md->io_lock protects the DMF_BLOCK_IO flag of md->flags too.
- Don't issue lock_fs() in dm_suspend() if the DM_NOFLUSH_FLAG
ioctl option is specified, because I/Os generated by lock_fs() would be
pushed back and never return if there were no valid devices.
- If an error occurs in dm_suspend() after the DMF_NOFLUSH_SUSPENDING
flag is set, md->pushback must be flushed because I/Os may be queued to
the list already. (flush_and_out label in dm_suspend())
Test results
------------
I have tested using multipath target with the next patch.
The following tests are for regression/compatibility:
- I/Os succeed when valid paths exist;
- I/Os fail when there are no valid paths and queue_if_no_path is not
set;
- I/Os are queued in the multipath target when there are no valid paths and
queue_if_no_path is set;
- The queued I/Os above fail when suspend is issued without the
DM_NOFLUSH_FLAG ioctl option. I/Os spanning 2 multipath targets also
fail.
The following tests are for the normal code path of new pushback feature:
- Queued I/Os in the multipath target are flushed from the target
but don't return when suspend is issued with the DM_NOFLUSH_FLAG
ioctl option;
- The I/Os above are queued in the multipath target again when
resume is issued without path recovery;
- The I/Os above succeed when resume is issued after path recovery
or table load;
- Queued I/Os in the multipath target succeed when resume is issued
with the DM_NOFLUSH_FLAG ioctl option after table load. I/Os
spanning 2 multipath targets also succeed.
The following tests are for the error paths of the new pushback feature:
- When the bdget_disk() fails in dm_suspend(), the
DMF_NOFLUSH_SUSPENDING flag is cleared and I/Os already queued to the
pushback list are flushed properly.
- When suspend with the DM_NOFLUSH_FLAG ioctl option is interrupted,
o I/Os which had already been queued to the pushback list
at the time don't return, and are re-issued at resume time;
o I/Os which hadn't been returned at the time return with EIO.
Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com>
Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
Cc: dm-devel@redhat.com
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-12-08 18:41:09 +08:00
|
|
|
#define DMF_NOFLUSH_SUSPENDING 5
|
2015-04-28 14:48:34 +08:00
|
|
|
#define DMF_DEFERRED_REMOVE 6
|
|
|
|
#define DMF_SUSPENDED_INTERNALLY 7
|
2024-06-11 20:26:44 +08:00
|
|
|
#define DMF_POST_SUSPENDING 8
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2016-02-23 01:16:21 +08:00
|
|
|
#define DM_NUMA_NODE NUMA_NO_NODE
|
|
|
|
static int dm_numa_node = DM_NUMA_NODE;
|
2016-01-29 05:52:56 +08:00
|
|
|
|
2024-06-11 20:26:44 +08:00
|
|
|
#define DEFAULT_SWAP_BIOS (8 * 1048576 / PAGE_SIZE)
|
|
|
|
static int swap_bios = DEFAULT_SWAP_BIOS;
|
|
|
|
static int get_swap_bios(void)
|
|
|
|
{
|
|
|
|
int latch = READ_ONCE(swap_bios);
|
|
|
|
if (unlikely(latch <= 0))
|
|
|
|
latch = DEFAULT_SWAP_BIOS;
|
|
|
|
return latch;
|
|
|
|
}
|
|
|
|
|
dm: enable request based option
This patch enables request-based dm.
o Request-based dm and bio-based dm coexist, since there are
some target drivers which are more fitting to bio-based dm.
Also, there are other bio-based devices in the kernel
(e.g. md, loop).
Since bio-based device can't receive struct request,
there are some limitations on device stacking between
bio-based and request-based.
type of underlying device
bio-based request-based
----------------------------------------------
bio-based OK OK
request-based -- OK
The device type is recognized by the queue flag in the kernel,
so dm follows that.
o The type of a dm device is decided at the first table binding time.
Once the type of a dm device is decided, the type can't be changed.
o Mempool allocations are deferred to at the table loading time, since
mempools for request-based dm are different from those for bio-based
dm and needed mempool type is fixed by the type of table.
o Currently, request-based dm supports only tables that have a single
target. To support multiple targets, we need to support request
splitting or prevent bio/request from spanning multiple targets.
The former needs lots of changes in the block layer, and the latter
needs that all target drivers support merge() function.
Both will take a time.
Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com>
Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
2009-06-22 17:12:36 +08:00
|
|
|
/*
|
|
|
|
* For mempools pre-allocation at the table loading time.
|
|
|
|
*/
|
|
|
|
struct dm_md_mempools {
|
2018-05-21 06:25:53 +08:00
|
|
|
struct bio_set bs;
|
|
|
|
struct bio_set io_bs;
|
dm: enable request based option
This patch enables request-based dm.
o Request-based dm and bio-based dm coexist, since there are
some target drivers which are more fitting to bio-based dm.
Also, there are other bio-based devices in the kernel
(e.g. md, loop).
Since bio-based device can't receive struct request,
there are some limitations on device stacking between
bio-based and request-based.
type of underlying device
bio-based request-based
----------------------------------------------
bio-based OK OK
request-based -- OK
The device type is recognized by the queue flag in the kernel,
so dm follows that.
o The type of a dm device is decided at the first table binding time.
Once the type of a dm device is decided, the type can't be changed.
o Mempool allocations are deferred to at the table loading time, since
mempools for request-based dm are different from those for bio-based
dm and needed mempool type is fixed by the type of table.
o Currently, request-based dm supports only tables that have a single
target. To support multiple targets, we need to support request
splitting or prevent bio/request from spanning multiple targets.
The former needs lots of changes in the block layer, and the latter
needs that all target drivers support merge() function.
Both will take a time.
Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com>
Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
2009-06-22 17:12:36 +08:00
|
|
|
};
|
|
|
|
|
2014-08-14 02:53:43 +08:00
|
|
|
struct table_device {
|
|
|
|
struct list_head list;
|
2017-10-20 15:37:39 +08:00
|
|
|
refcount_t count;
|
2014-08-14 02:53:43 +08:00
|
|
|
struct dm_dev dm_dev;
|
|
|
|
};
|
|
|
|
|
2013-09-13 06:06:12 +08:00
|
|
|
/*
|
|
|
|
* Bio-based DM's mempools' reserved IOs set by the user.
|
|
|
|
*/
|
2016-05-13 04:28:10 +08:00
|
|
|
#define RESERVED_BIO_BASED_IOS 16
|
2013-09-13 06:06:12 +08:00
|
|
|
static unsigned reserved_bio_based_ios = RESERVED_BIO_BASED_IOS;
|
|
|
|
|
2016-02-23 01:16:21 +08:00
|
|
|
static int __dm_get_module_param_int(int *module_param, int min, int max)
|
|
|
|
{
|
locking/atomics: COCCINELLE/treewide: Convert trivial ACCESS_ONCE() patterns to READ_ONCE()/WRITE_ONCE()
Please do not apply this to mainline directly, instead please re-run the
coccinelle script shown below and apply its output.
For several reasons, it is desirable to use {READ,WRITE}_ONCE() in
preference to ACCESS_ONCE(), and new code is expected to use one of the
former. So far, there's been no reason to change most existing uses of
ACCESS_ONCE(), as these aren't harmful, and changing them results in
churn.
However, for some features, the read/write distinction is critical to
correct operation. To distinguish these cases, separate read/write
accessors must be used. This patch migrates (most) remaining
ACCESS_ONCE() instances to {READ,WRITE}_ONCE(), using the following
coccinelle script:
----
// Convert trivial ACCESS_ONCE() uses to equivalent READ_ONCE() and
// WRITE_ONCE()
// $ make coccicheck COCCI=/home/mark/once.cocci SPFLAGS="--include-headers" MODE=patch
virtual patch
@ depends on patch @
expression E1, E2;
@@
- ACCESS_ONCE(E1) = E2
+ WRITE_ONCE(E1, E2)
@ depends on patch @
expression E;
@@
- ACCESS_ONCE(E)
+ READ_ONCE(E)
----
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: davem@davemloft.net
Cc: linux-arch@vger.kernel.org
Cc: mpe@ellerman.id.au
Cc: shuah@kernel.org
Cc: snitzer@redhat.com
Cc: thor.thayer@linux.intel.com
Cc: tj@kernel.org
Cc: viro@zeniv.linux.org.uk
Cc: will.deacon@arm.com
Link: http://lkml.kernel.org/r/1508792849-3115-19-git-send-email-paulmck@linux.vnet.ibm.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2017-10-24 05:07:29 +08:00
|
|
|
int param = READ_ONCE(*module_param);
|
2016-02-23 01:16:21 +08:00
|
|
|
int modified_param = 0;
|
|
|
|
bool modified = true;
|
|
|
|
|
|
|
|
if (param < min)
|
|
|
|
modified_param = min;
|
|
|
|
else if (param > max)
|
|
|
|
modified_param = max;
|
|
|
|
else
|
|
|
|
modified = false;
|
|
|
|
|
|
|
|
if (modified) {
|
|
|
|
(void)cmpxchg(module_param, param, modified_param);
|
|
|
|
param = modified_param;
|
|
|
|
}
|
|
|
|
|
|
|
|
return param;
|
|
|
|
}
|
|
|
|
|
2016-05-13 04:28:10 +08:00
|
|
|
unsigned __dm_get_module_param(unsigned *module_param,
|
|
|
|
unsigned def, unsigned max)
|
2013-09-13 06:06:12 +08:00
|
|
|
{
|
locking/atomics: COCCINELLE/treewide: Convert trivial ACCESS_ONCE() patterns to READ_ONCE()/WRITE_ONCE()
Please do not apply this to mainline directly, instead please re-run the
coccinelle script shown below and apply its output.
For several reasons, it is desirable to use {READ,WRITE}_ONCE() in
preference to ACCESS_ONCE(), and new code is expected to use one of the
former. So far, there's been no reason to change most existing uses of
ACCESS_ONCE(), as these aren't harmful, and changing them results in
churn.
However, for some features, the read/write distinction is critical to
correct operation. To distinguish these cases, separate read/write
accessors must be used. This patch migrates (most) remaining
ACCESS_ONCE() instances to {READ,WRITE}_ONCE(), using the following
coccinelle script:
----
// Convert trivial ACCESS_ONCE() uses to equivalent READ_ONCE() and
// WRITE_ONCE()
// $ make coccicheck COCCI=/home/mark/once.cocci SPFLAGS="--include-headers" MODE=patch
virtual patch
@ depends on patch @
expression E1, E2;
@@
- ACCESS_ONCE(E1) = E2
+ WRITE_ONCE(E1, E2)
@ depends on patch @
expression E;
@@
- ACCESS_ONCE(E)
+ READ_ONCE(E)
----
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: davem@davemloft.net
Cc: linux-arch@vger.kernel.org
Cc: mpe@ellerman.id.au
Cc: shuah@kernel.org
Cc: snitzer@redhat.com
Cc: thor.thayer@linux.intel.com
Cc: tj@kernel.org
Cc: viro@zeniv.linux.org.uk
Cc: will.deacon@arm.com
Link: http://lkml.kernel.org/r/1508792849-3115-19-git-send-email-paulmck@linux.vnet.ibm.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2017-10-24 05:07:29 +08:00
|
|
|
unsigned param = READ_ONCE(*module_param);
|
2015-02-28 11:25:26 +08:00
|
|
|
unsigned modified_param = 0;
|
2013-09-13 06:06:12 +08:00
|
|
|
|
2015-02-28 11:25:26 +08:00
|
|
|
if (!param)
|
|
|
|
modified_param = def;
|
|
|
|
else if (param > max)
|
|
|
|
modified_param = max;
|
2013-09-13 06:06:12 +08:00
|
|
|
|
2015-02-28 11:25:26 +08:00
|
|
|
if (modified_param) {
|
|
|
|
(void)cmpxchg(module_param, param, modified_param);
|
|
|
|
param = modified_param;
|
2013-09-13 06:06:12 +08:00
|
|
|
}
|
|
|
|
|
2015-02-28 11:25:26 +08:00
|
|
|
return param;
|
2013-09-13 06:06:12 +08:00
|
|
|
}
|
|
|
|
|
2013-09-13 06:06:12 +08:00
|
|
|
unsigned dm_get_reserved_bio_based_ios(void)
|
|
|
|
{
|
2015-02-28 11:25:26 +08:00
|
|
|
return __dm_get_module_param(&reserved_bio_based_ios,
|
2016-05-13 04:28:10 +08:00
|
|
|
RESERVED_BIO_BASED_IOS, DM_RESERVED_MAX_IOS);
|
2013-09-13 06:06:12 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(dm_get_reserved_bio_based_ios);
|
|
|
|
|
2016-02-23 01:16:21 +08:00
|
|
|
static unsigned dm_get_numa_node(void)
|
|
|
|
{
|
|
|
|
return __dm_get_module_param_int(&dm_numa_node,
|
|
|
|
DM_NUMA_NODE, num_online_nodes() - 1);
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
static int __init local_init(void)
|
|
|
|
{
|
2019-02-21 04:37:44 +08:00
|
|
|
int r;
|
2014-12-06 06:11:05 +08:00
|
|
|
|
2007-10-20 05:48:00 +08:00
|
|
|
r = dm_uevent_init();
|
2008-10-22 00:45:08 +08:00
|
|
|
if (r)
|
2019-02-21 04:37:44 +08:00
|
|
|
return r;
|
2007-10-20 05:48:00 +08:00
|
|
|
|
2014-06-15 01:44:31 +08:00
|
|
|
deferred_remove_workqueue = alloc_workqueue("kdmremove", WQ_UNBOUND, 1);
|
|
|
|
if (!deferred_remove_workqueue) {
|
|
|
|
r = -ENOMEM;
|
|
|
|
goto out_uevent_exit;
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
_major = major;
|
|
|
|
r = register_blkdev(_major, _name);
|
2008-10-22 00:45:08 +08:00
|
|
|
if (r < 0)
|
2014-06-15 01:44:31 +08:00
|
|
|
goto out_free_workqueue;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
if (!_major)
|
|
|
|
_major = r;
|
|
|
|
|
|
|
|
return 0;
|
2008-10-22 00:45:08 +08:00
|
|
|
|
2014-06-15 01:44:31 +08:00
|
|
|
out_free_workqueue:
|
|
|
|
destroy_workqueue(deferred_remove_workqueue);
|
2008-10-22 00:45:08 +08:00
|
|
|
out_uevent_exit:
|
|
|
|
dm_uevent_exit();
|
|
|
|
|
|
|
|
return r;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void local_exit(void)
|
|
|
|
{
|
2014-06-15 01:44:31 +08:00
|
|
|
destroy_workqueue(deferred_remove_workqueue);
|
2013-11-02 06:27:41 +08:00
|
|
|
|
2007-07-17 19:03:46 +08:00
|
|
|
unregister_blkdev(_major, _name);
|
2007-10-20 05:48:00 +08:00
|
|
|
dm_uevent_exit();
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
_major = 0;
|
|
|
|
|
|
|
|
DMINFO("cleaned up");
|
|
|
|
}
|
|
|
|
|
2008-02-08 10:09:51 +08:00
|
|
|
static int (*_inits[])(void) __initdata = {
|
2005-04-17 06:20:36 +08:00
|
|
|
local_init,
|
|
|
|
dm_target_init,
|
|
|
|
dm_linear_init,
|
|
|
|
dm_stripe_init,
|
2009-12-11 07:51:57 +08:00
|
|
|
dm_io_init,
|
2008-04-25 04:43:49 +08:00
|
|
|
dm_kcopyd_init,
|
2005-04-17 06:20:36 +08:00
|
|
|
dm_interface_init,
|
2013-08-16 22:54:23 +08:00
|
|
|
dm_statistics_init,
|
2005-04-17 06:20:36 +08:00
|
|
|
};
|
|
|
|
|
2008-02-08 10:09:51 +08:00
|
|
|
static void (*_exits[])(void) = {
|
2005-04-17 06:20:36 +08:00
|
|
|
local_exit,
|
|
|
|
dm_target_exit,
|
|
|
|
dm_linear_exit,
|
|
|
|
dm_stripe_exit,
|
2009-12-11 07:51:57 +08:00
|
|
|
dm_io_exit,
|
2008-04-25 04:43:49 +08:00
|
|
|
dm_kcopyd_exit,
|
2005-04-17 06:20:36 +08:00
|
|
|
dm_interface_exit,
|
2013-08-16 22:54:23 +08:00
|
|
|
dm_statistics_exit,
|
2005-04-17 06:20:36 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
static int __init dm_init(void)
|
|
|
|
{
|
|
|
|
const int count = ARRAY_SIZE(_inits);
|
|
|
|
|
|
|
|
int r, i;
|
|
|
|
|
|
|
|
for (i = 0; i < count; i++) {
|
|
|
|
r = _inits[i]();
|
|
|
|
if (r)
|
|
|
|
goto bad;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
bad:
|
|
|
|
while (i--)
|
|
|
|
_exits[i]();
|
|
|
|
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __exit dm_exit(void)
|
|
|
|
{
|
|
|
|
int i = ARRAY_SIZE(_exits);
|
|
|
|
|
|
|
|
while (i--)
|
|
|
|
_exits[i]();
|
2011-08-02 19:32:01 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Should be empty by this point.
|
|
|
|
*/
|
|
|
|
idr_destroy(&_minor_idr);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Block device functions
|
|
|
|
*/
|
2009-12-11 07:52:20 +08:00
|
|
|
int dm_deleting_md(struct mapped_device *md)
|
|
|
|
{
|
|
|
|
return test_bit(DMF_DELETING, &md->flags);
|
|
|
|
}
|
|
|
|
|
2008-03-02 23:29:31 +08:00
|
|
|
static int dm_blk_open(struct block_device *bdev, fmode_t mode)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
struct mapped_device *md;
|
|
|
|
|
2006-06-26 15:27:23 +08:00
|
|
|
spin_lock(&_minor_lock);
|
|
|
|
|
2008-03-02 23:29:31 +08:00
|
|
|
md = bdev->bd_disk->private_data;
|
2006-06-26 15:27:23 +08:00
|
|
|
if (!md)
|
|
|
|
goto out;
|
|
|
|
|
2006-06-26 15:27:34 +08:00
|
|
|
if (test_bit(DMF_FREEING, &md->flags) ||
|
2009-12-11 07:52:20 +08:00
|
|
|
dm_deleting_md(md)) {
|
2006-06-26 15:27:23 +08:00
|
|
|
md = NULL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
dm_get(md);
|
2006-06-26 15:27:34 +08:00
|
|
|
atomic_inc(&md->open_count);
|
2006-06-26 15:27:23 +08:00
|
|
|
out:
|
|
|
|
spin_unlock(&_minor_lock);
|
|
|
|
|
|
|
|
return md ? 0 : -ENXIO;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2013-05-06 09:52:57 +08:00
|
|
|
static void dm_blk_close(struct gendisk *disk, fmode_t mode)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2015-03-24 05:01:43 +08:00
|
|
|
struct mapped_device *md;
|
2010-08-08 00:25:34 +08:00
|
|
|
|
2011-01-14 03:59:48 +08:00
|
|
|
spin_lock(&_minor_lock);
|
|
|
|
|
2015-03-24 05:01:43 +08:00
|
|
|
md = disk->private_data;
|
|
|
|
if (WARN_ON(!md))
|
|
|
|
goto out;
|
|
|
|
|
2013-11-02 06:27:41 +08:00
|
|
|
if (atomic_dec_and_test(&md->open_count) &&
|
|
|
|
(test_bit(DMF_DEFERRED_REMOVE, &md->flags)))
|
2014-06-15 01:44:31 +08:00
|
|
|
queue_work(deferred_remove_workqueue, &deferred_remove_work);
|
2013-11-02 06:27:41 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
dm_put(md);
|
2015-03-24 05:01:43 +08:00
|
|
|
out:
|
2011-01-14 03:59:48 +08:00
|
|
|
spin_unlock(&_minor_lock);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2006-06-26 15:27:34 +08:00
|
|
|
int dm_open_count(struct mapped_device *md)
|
|
|
|
{
|
|
|
|
return atomic_read(&md->open_count);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Guarantees nothing is using the device before it's deleted.
|
|
|
|
*/
|
2013-11-02 06:27:41 +08:00
|
|
|
int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred)
|
2006-06-26 15:27:34 +08:00
|
|
|
{
|
|
|
|
int r = 0;
|
|
|
|
|
|
|
|
spin_lock(&_minor_lock);
|
|
|
|
|
2013-11-02 06:27:41 +08:00
|
|
|
if (dm_open_count(md)) {
|
2006-06-26 15:27:34 +08:00
|
|
|
r = -EBUSY;
|
2013-11-02 06:27:41 +08:00
|
|
|
if (mark_deferred)
|
|
|
|
set_bit(DMF_DEFERRED_REMOVE, &md->flags);
|
|
|
|
} else if (only_deferred && !test_bit(DMF_DEFERRED_REMOVE, &md->flags))
|
|
|
|
r = -EEXIST;
|
2006-06-26 15:27:34 +08:00
|
|
|
else
|
|
|
|
set_bit(DMF_DELETING, &md->flags);
|
|
|
|
|
|
|
|
spin_unlock(&_minor_lock);
|
|
|
|
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2013-11-02 06:27:41 +08:00
|
|
|
int dm_cancel_deferred_remove(struct mapped_device *md)
|
|
|
|
{
|
|
|
|
int r = 0;
|
|
|
|
|
|
|
|
spin_lock(&_minor_lock);
|
|
|
|
|
|
|
|
if (test_bit(DMF_DELETING, &md->flags))
|
|
|
|
r = -EBUSY;
|
|
|
|
else
|
|
|
|
clear_bit(DMF_DEFERRED_REMOVE, &md->flags);
|
|
|
|
|
|
|
|
spin_unlock(&_minor_lock);
|
|
|
|
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void do_deferred_remove(struct work_struct *w)
|
|
|
|
{
|
|
|
|
dm_deferred_remove();
|
|
|
|
}
|
|
|
|
|
2013-08-16 22:54:23 +08:00
|
|
|
sector_t dm_get_size(struct mapped_device *md)
|
|
|
|
{
|
|
|
|
return get_capacity(md->disk);
|
|
|
|
}
|
|
|
|
|
2014-02-28 22:33:43 +08:00
|
|
|
struct request_queue *dm_get_md_queue(struct mapped_device *md)
|
|
|
|
{
|
|
|
|
return md->queue;
|
|
|
|
}
|
|
|
|
|
2013-08-16 22:54:23 +08:00
|
|
|
struct dm_stats *dm_get_stats(struct mapped_device *md)
|
|
|
|
{
|
|
|
|
return &md->stats;
|
|
|
|
}
|
|
|
|
|
2006-03-27 17:17:54 +08:00
|
|
|
static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
|
|
|
|
{
|
|
|
|
struct mapped_device *md = bdev->bd_disk->private_data;
|
|
|
|
|
|
|
|
return dm_get_geometry(md, geo);
|
|
|
|
}
|
|
|
|
|
2018-10-12 18:08:49 +08:00
|
|
|
static int dm_blk_report_zones(struct gendisk *disk, sector_t sector,
|
2019-07-01 13:09:16 +08:00
|
|
|
struct blk_zone *zones, unsigned int *nr_zones)
|
2018-10-12 18:08:49 +08:00
|
|
|
{
|
|
|
|
#ifdef CONFIG_BLK_DEV_ZONED
|
|
|
|
struct mapped_device *md = disk->private_data;
|
|
|
|
struct dm_target *tgt;
|
|
|
|
struct dm_table *map;
|
|
|
|
int srcu_idx, ret;
|
|
|
|
|
|
|
|
if (dm_suspended_md(md))
|
|
|
|
return -EAGAIN;
|
|
|
|
|
|
|
|
map = dm_get_live_table(md, &srcu_idx);
|
2024-06-11 20:26:44 +08:00
|
|
|
if (!map) {
|
|
|
|
ret = -EIO;
|
|
|
|
goto out;
|
|
|
|
}
|
2018-10-12 18:08:49 +08:00
|
|
|
|
|
|
|
tgt = dm_table_find_target(map, sector);
|
2019-08-23 21:55:26 +08:00
|
|
|
if (!tgt) {
|
2018-10-12 18:08:49 +08:00
|
|
|
ret = -EIO;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we are executing this, we already know that the block device
|
|
|
|
* is a zoned device and so each target should have support for that
|
|
|
|
* type of drive. A missing report_zones method means that the target
|
|
|
|
* driver has a problem.
|
|
|
|
*/
|
|
|
|
if (WARN_ON(!tgt->type->report_zones)) {
|
|
|
|
ret = -EIO;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* blkdev_report_zones() will loop and call this again to cover all the
|
|
|
|
* zones of the target, eventually moving on to the next target.
|
|
|
|
* So there is no need to loop here trying to fill the entire array
|
|
|
|
* of zones.
|
|
|
|
*/
|
2019-07-01 13:09:16 +08:00
|
|
|
ret = tgt->type->report_zones(tgt, sector, zones, nr_zones);
|
2018-10-12 18:08:49 +08:00
|
|
|
|
|
|
|
out:
|
|
|
|
dm_put_live_table(md, srcu_idx);
|
|
|
|
return ret;
|
|
|
|
#else
|
|
|
|
return -ENOTSUPP;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2018-04-04 03:05:12 +08:00
|
|
|
static int dm_prepare_ioctl(struct mapped_device *md, int *srcu_idx,
|
2018-04-04 04:54:10 +08:00
|
|
|
struct block_device **bdev)
|
2006-10-03 16:15:15 +08:00
|
|
|
{
|
2016-02-19 04:44:39 +08:00
|
|
|
struct dm_target *tgt;
|
2013-07-11 06:41:15 +08:00
|
|
|
struct dm_table *map;
|
2018-04-04 03:05:12 +08:00
|
|
|
int r;
|
2006-10-03 16:15:15 +08:00
|
|
|
|
2013-07-11 06:41:15 +08:00
|
|
|
retry:
|
2015-10-15 20:10:50 +08:00
|
|
|
r = -ENOTTY;
|
2018-04-04 03:05:12 +08:00
|
|
|
map = dm_get_live_table(md, srcu_idx);
|
2006-10-03 16:15:15 +08:00
|
|
|
if (!map || !dm_table_get_size(map))
|
2018-04-04 03:05:12 +08:00
|
|
|
return r;
|
2006-10-03 16:15:15 +08:00
|
|
|
|
|
|
|
/* We only support devices that have a single target */
|
|
|
|
if (dm_table_get_num_targets(map) != 1)
|
2018-04-04 03:05:12 +08:00
|
|
|
return r;
|
2006-10-03 16:15:15 +08:00
|
|
|
|
2016-02-19 04:44:39 +08:00
|
|
|
tgt = dm_table_get_target(map, 0);
|
|
|
|
if (!tgt->type->prepare_ioctl)
|
2018-04-04 03:05:12 +08:00
|
|
|
return r;
|
2018-02-23 02:31:20 +08:00
|
|
|
|
2018-04-04 03:05:12 +08:00
|
|
|
if (dm_suspended_md(md))
|
|
|
|
return -EAGAIN;
|
2006-10-03 16:15:15 +08:00
|
|
|
|
2018-04-04 04:54:10 +08:00
|
|
|
r = tgt->type->prepare_ioctl(tgt, bdev);
|
2015-11-17 17:39:26 +08:00
|
|
|
if (r == -ENOTCONN && !fatal_signal_pending(current)) {
|
2018-04-04 03:05:12 +08:00
|
|
|
dm_put_live_table(md, *srcu_idx);
|
2013-07-11 06:41:15 +08:00
|
|
|
msleep(10);
|
|
|
|
goto retry;
|
|
|
|
}
|
2018-04-04 03:05:12 +08:00
|
|
|
|
2015-10-15 20:10:50 +08:00
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2018-04-04 03:05:12 +08:00
|
|
|
static void dm_unprepare_ioctl(struct mapped_device *md, int srcu_idx)
|
|
|
|
{
|
|
|
|
dm_put_live_table(md, srcu_idx);
|
|
|
|
}
|
|
|
|
|
2015-10-15 20:10:50 +08:00
|
|
|
static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
|
|
|
|
unsigned int cmd, unsigned long arg)
|
|
|
|
{
|
|
|
|
struct mapped_device *md = bdev->bd_disk->private_data;
|
2018-04-04 03:05:12 +08:00
|
|
|
int r, srcu_idx;
|
2015-10-15 20:10:50 +08:00
|
|
|
|
2018-04-04 04:54:10 +08:00
|
|
|
r = dm_prepare_ioctl(md, &srcu_idx, &bdev);
|
2015-10-15 20:10:50 +08:00
|
|
|
if (r < 0)
|
2018-04-04 03:05:12 +08:00
|
|
|
goto out;
|
2013-07-11 06:41:15 +08:00
|
|
|
|
2015-10-15 20:10:50 +08:00
|
|
|
if (r > 0) {
|
|
|
|
/*
|
2017-02-04 17:45:03 +08:00
|
|
|
* Target determined this ioctl is being issued against a
|
|
|
|
* subset of the parent bdev; require extra privileges.
|
2015-10-15 20:10:50 +08:00
|
|
|
*/
|
2017-02-04 17:45:03 +08:00
|
|
|
if (!capable(CAP_SYS_RAWIO)) {
|
2024-06-11 20:26:44 +08:00
|
|
|
DMDEBUG_LIMIT(
|
2017-02-04 17:45:03 +08:00
|
|
|
"%s: sending ioctl %x to DM device without required privilege.",
|
|
|
|
current->comm, cmd);
|
|
|
|
r = -ENOIOCTLCMD;
|
2015-10-15 20:10:50 +08:00
|
|
|
goto out;
|
2017-02-04 17:45:03 +08:00
|
|
|
}
|
2015-10-15 20:10:50 +08:00
|
|
|
}
|
2013-07-11 06:41:15 +08:00
|
|
|
|
2016-02-19 04:44:39 +08:00
|
|
|
r = __blkdev_driver_ioctl(bdev, mode, cmd, arg);
|
2015-10-15 20:10:50 +08:00
|
|
|
out:
|
2018-04-04 03:05:12 +08:00
|
|
|
dm_unprepare_ioctl(md, srcu_idx);
|
2006-10-03 16:15:15 +08:00
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2017-12-10 04:16:42 +08:00
|
|
|
static void start_io_acct(struct dm_io *io);
|
|
|
|
|
|
|
|
static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2017-12-12 12:17:47 +08:00
|
|
|
struct dm_io *io;
|
|
|
|
struct dm_target_io *tio;
|
|
|
|
struct bio *clone;
|
|
|
|
|
2018-05-21 06:25:53 +08:00
|
|
|
clone = bio_alloc_bioset(GFP_NOIO, 0, &md->io_bs);
|
2017-12-12 12:17:47 +08:00
|
|
|
if (!clone)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
tio = container_of(clone, struct dm_target_io, clone);
|
|
|
|
tio->inside_dm_io = true;
|
|
|
|
tio->io = NULL;
|
|
|
|
|
|
|
|
io = container_of(tio, struct dm_io, tio);
|
|
|
|
io->magic = DM_IO_MAGIC;
|
2017-12-10 04:16:42 +08:00
|
|
|
io->status = 0;
|
|
|
|
atomic_set(&io->io_count, 1);
|
|
|
|
io->orig_bio = bio;
|
|
|
|
io->md = md;
|
|
|
|
spin_lock_init(&io->endio_lock);
|
|
|
|
|
|
|
|
start_io_acct(io);
|
2017-12-12 12:17:47 +08:00
|
|
|
|
|
|
|
return io;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2007-07-13 00:26:32 +08:00
|
|
|
static void free_io(struct mapped_device *md, struct dm_io *io)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2017-12-12 12:17:47 +08:00
|
|
|
bio_put(&io->tio.clone);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct dm_target_io *alloc_tio(struct clone_info *ci, struct dm_target *ti,
|
|
|
|
unsigned target_bio_nr, gfp_t gfp_mask)
|
|
|
|
{
|
|
|
|
struct dm_target_io *tio;
|
|
|
|
|
|
|
|
if (!ci->io->tio.io) {
|
|
|
|
/* the dm_target_io embedded in ci->io is available */
|
|
|
|
tio = &ci->io->tio;
|
|
|
|
} else {
|
2018-05-21 06:25:53 +08:00
|
|
|
struct bio *clone = bio_alloc_bioset(gfp_mask, 0, &ci->io->md->bs);
|
2017-12-12 12:17:47 +08:00
|
|
|
if (!clone)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
tio = container_of(clone, struct dm_target_io, clone);
|
|
|
|
tio->inside_dm_io = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
tio->magic = DM_TIO_MAGIC;
|
|
|
|
tio->io = ci->io;
|
|
|
|
tio->ti = ti;
|
|
|
|
tio->target_bio_nr = target_bio_nr;
|
|
|
|
|
|
|
|
return tio;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2016-04-12 00:05:38 +08:00
|
|
|
static void free_tio(struct dm_target_io *tio)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2017-12-12 12:17:47 +08:00
|
|
|
if (tio->inside_dm_io)
|
|
|
|
return;
|
2012-10-13 04:02:15 +08:00
|
|
|
bio_put(&tio->clone);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2018-12-11 22:10:26 +08:00
|
|
|
static bool md_in_flight_bios(struct mapped_device *md)
|
2009-12-11 07:52:13 +08:00
|
|
|
{
|
2018-12-07 00:41:22 +08:00
|
|
|
int cpu;
|
|
|
|
struct hd_struct *part = &dm_disk(md)->part0;
|
2018-12-11 06:45:53 +08:00
|
|
|
long sum = 0;
|
2018-12-07 00:41:22 +08:00
|
|
|
|
|
|
|
for_each_possible_cpu(cpu) {
|
2018-12-11 06:45:53 +08:00
|
|
|
sum += part_stat_local_read_cpu(part, in_flight[0], cpu);
|
|
|
|
sum += part_stat_local_read_cpu(part, in_flight[1], cpu);
|
2018-12-07 00:41:22 +08:00
|
|
|
}
|
|
|
|
|
2018-12-11 06:45:53 +08:00
|
|
|
return sum != 0;
|
2009-12-11 07:52:13 +08:00
|
|
|
}
|
|
|
|
|
2018-12-11 22:10:26 +08:00
|
|
|
static bool md_in_flight(struct mapped_device *md)
|
|
|
|
{
|
|
|
|
if (queue_is_mq(md->queue))
|
2018-12-18 12:11:17 +08:00
|
|
|
return blk_mq_queue_inflight(md->queue);
|
2018-12-11 22:10:26 +08:00
|
|
|
else
|
|
|
|
return md_in_flight_bios(md);
|
2009-12-11 07:52:13 +08:00
|
|
|
}
|
|
|
|
|
2006-02-01 19:04:53 +08:00
|
|
|
static void start_io_acct(struct dm_io *io)
|
|
|
|
{
|
|
|
|
struct mapped_device *md = io->md;
|
2017-12-12 09:51:50 +08:00
|
|
|
struct bio *bio = io->orig_bio;
|
2006-02-01 19:04:53 +08:00
|
|
|
|
|
|
|
io->start_time = jiffies;
|
|
|
|
|
2018-07-18 19:47:39 +08:00
|
|
|
generic_start_io_acct(md->queue, bio_op(bio), bio_sectors(bio),
|
|
|
|
&dm_disk(md)->part0);
|
2017-12-18 00:56:48 +08:00
|
|
|
|
2013-08-16 22:54:23 +08:00
|
|
|
if (unlikely(dm_stats_used(&md->stats)))
|
2016-06-06 03:32:03 +08:00
|
|
|
dm_stats_account_io(&md->stats, bio_data_dir(bio),
|
|
|
|
bio->bi_iter.bi_sector, bio_sectors(bio),
|
|
|
|
false, 0, &io->stats_aux);
|
2006-02-01 19:04:53 +08:00
|
|
|
}
|
|
|
|
|
2024-06-12 13:13:20 +08:00
|
|
|
static void end_io_acct(struct mapped_device *md, struct bio *bio,
|
|
|
|
unsigned long start_time, struct dm_stats_aux *stats_aux)
|
2006-02-01 19:04:53 +08:00
|
|
|
{
|
2024-06-12 13:13:20 +08:00
|
|
|
unsigned long duration = jiffies - start_time;
|
|
|
|
#ifdef CONFIG_BLK_CGROUP
|
2024-06-11 20:08:33 +08:00
|
|
|
struct blkcg *blkcg = bio_blkcg(bio);
|
|
|
|
int rw = bio_data_dir(bio), cpu;
|
2024-06-12 13:13:20 +08:00
|
|
|
#endif
|
|
|
|
|
|
|
|
if (unlikely(dm_stats_used(&md->stats)))
|
|
|
|
dm_stats_account_io(&md->stats, bio_data_dir(bio),
|
|
|
|
bio->bi_iter.bi_sector, bio_sectors(bio),
|
|
|
|
true, duration, stats_aux);
|
|
|
|
|
|
|
|
smp_wmb();
|
2006-02-01 19:04:53 +08:00
|
|
|
|
2018-07-18 19:47:39 +08:00
|
|
|
generic_end_io_acct(md->queue, bio_op(bio), &dm_disk(md)->part0,
|
2024-06-12 13:13:20 +08:00
|
|
|
start_time);
|
2006-02-01 19:04:53 +08:00
|
|
|
|
2024-06-12 13:13:20 +08:00
|
|
|
#ifdef CONFIG_BLK_CGROUP
|
2024-06-11 20:08:33 +08:00
|
|
|
cpu = part_stat_lock();
|
|
|
|
blkcg_part_stat_add(blkcg, cpu, &dm_disk(md)->part0, nsecs[rw],
|
|
|
|
jiffies_to_nsecs(duration));
|
|
|
|
part_stat_unlock();
|
2024-06-12 13:13:20 +08:00
|
|
|
#endif
|
2013-08-16 22:54:23 +08:00
|
|
|
|
2008-11-14 07:39:10 +08:00
|
|
|
/* nudge anyone waiting on suspend queue */
|
2019-02-05 18:09:00 +08:00
|
|
|
if (unlikely(wq_has_sleeper(&md->wait)))
|
2008-11-14 07:39:10 +08:00
|
|
|
wake_up(&md->wait);
|
2006-02-01 19:04:53 +08:00
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* Add the bio to the list of deferred io.
|
|
|
|
*/
|
2009-04-09 07:27:15 +08:00
|
|
|
static void queue_io(struct mapped_device *md, struct bio *bio)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2010-09-09 00:07:01 +08:00
|
|
|
unsigned long flags;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2010-09-09 00:07:01 +08:00
|
|
|
spin_lock_irqsave(&md->deferred_lock, flags);
|
2005-04-17 06:20:36 +08:00
|
|
|
bio_list_add(&md->deferred, bio);
|
2010-09-09 00:07:01 +08:00
|
|
|
spin_unlock_irqrestore(&md->deferred_lock, flags);
|
2010-09-09 00:07:00 +08:00
|
|
|
queue_work(md->wq, &md->work);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Everyone (including functions in this file), should use this
|
|
|
|
* function to access the md->map field, and make sure they call
|
2013-07-11 06:41:18 +08:00
|
|
|
* dm_put_live_table() when finished.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2013-07-11 06:41:18 +08:00
|
|
|
struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx) __acquires(md->io_barrier)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2013-07-11 06:41:18 +08:00
|
|
|
*srcu_idx = srcu_read_lock(&md->io_barrier);
|
|
|
|
|
|
|
|
return srcu_dereference(md->map, &md->io_barrier);
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2013-07-11 06:41:18 +08:00
|
|
|
void dm_put_live_table(struct mapped_device *md, int srcu_idx) __releases(md->io_barrier)
|
|
|
|
{
|
|
|
|
srcu_read_unlock(&md->io_barrier, srcu_idx);
|
|
|
|
}
|
|
|
|
|
|
|
|
void dm_sync_table(struct mapped_device *md)
|
|
|
|
{
|
|
|
|
synchronize_srcu(&md->io_barrier);
|
|
|
|
synchronize_rcu_expedited();
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* A fast alternative to dm_get_live_table/dm_put_live_table.
|
|
|
|
* The caller must not block between these two functions.
|
|
|
|
*/
|
|
|
|
static struct dm_table *dm_get_live_table_fast(struct mapped_device *md) __acquires(RCU)
|
|
|
|
{
|
|
|
|
rcu_read_lock();
|
|
|
|
return rcu_dereference(md->map);
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2013-07-11 06:41:18 +08:00
|
|
|
static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU)
|
|
|
|
{
|
|
|
|
rcu_read_unlock();
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2018-04-04 03:05:12 +08:00
|
|
|
static char *_dm_claim_ptr = "I belong to device-mapper";
|
|
|
|
|
2014-08-14 02:53:43 +08:00
|
|
|
/*
|
|
|
|
* Open a table device so we can use it as a map destination.
|
|
|
|
*/
|
|
|
|
static int open_table_device(struct table_device *td, dev_t dev,
|
|
|
|
struct mapped_device *md)
|
|
|
|
{
|
|
|
|
struct block_device *bdev;
|
|
|
|
|
|
|
|
int r;
|
|
|
|
|
|
|
|
BUG_ON(td->dm_dev.bdev);
|
|
|
|
|
2018-02-23 02:31:20 +08:00
|
|
|
bdev = blkdev_get_by_dev(dev, td->dm_dev.mode | FMODE_EXCL, _dm_claim_ptr);
|
2014-08-14 02:53:43 +08:00
|
|
|
if (IS_ERR(bdev))
|
|
|
|
return PTR_ERR(bdev);
|
|
|
|
|
|
|
|
r = bd_link_disk_holder(bdev, dm_disk(md));
|
|
|
|
if (r) {
|
|
|
|
blkdev_put(bdev, td->dm_dev.mode | FMODE_EXCL);
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
td->dm_dev.bdev = bdev;
|
2017-04-13 04:37:44 +08:00
|
|
|
td->dm_dev.dax_dev = dax_get_by_host(bdev->bd_disk->disk_name);
|
2014-08-14 02:53:43 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Close a table device that we've been using.
|
|
|
|
*/
|
|
|
|
static void close_table_device(struct table_device *td, struct mapped_device *md)
|
|
|
|
{
|
|
|
|
if (!td->dm_dev.bdev)
|
|
|
|
return;
|
|
|
|
|
|
|
|
bd_unlink_disk_holder(td->dm_dev.bdev, dm_disk(md));
|
|
|
|
blkdev_put(td->dm_dev.bdev, td->dm_dev.mode | FMODE_EXCL);
|
2017-04-13 04:37:44 +08:00
|
|
|
put_dax(td->dm_dev.dax_dev);
|
2014-08-14 02:53:43 +08:00
|
|
|
td->dm_dev.bdev = NULL;
|
2017-04-13 04:37:44 +08:00
|
|
|
td->dm_dev.dax_dev = NULL;
|
2014-08-14 02:53:43 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static struct table_device *find_table_device(struct list_head *l, dev_t dev,
|
2019-05-11 01:48:37 +08:00
|
|
|
fmode_t mode)
|
|
|
|
{
|
2014-08-14 02:53:43 +08:00
|
|
|
struct table_device *td;
|
|
|
|
|
|
|
|
list_for_each_entry(td, l, list)
|
|
|
|
if (td->dm_dev.bdev->bd_dev == dev && td->dm_dev.mode == mode)
|
|
|
|
return td;
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode,
|
2019-05-11 01:48:37 +08:00
|
|
|
struct dm_dev **result)
|
|
|
|
{
|
2014-08-14 02:53:43 +08:00
|
|
|
int r;
|
|
|
|
struct table_device *td;
|
|
|
|
|
|
|
|
mutex_lock(&md->table_devices_lock);
|
|
|
|
td = find_table_device(&md->table_devices, dev, mode);
|
|
|
|
if (!td) {
|
2016-02-23 01:16:21 +08:00
|
|
|
td = kmalloc_node(sizeof(*td), GFP_KERNEL, md->numa_node_id);
|
2014-08-14 02:53:43 +08:00
|
|
|
if (!td) {
|
|
|
|
mutex_unlock(&md->table_devices_lock);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
td->dm_dev.mode = mode;
|
|
|
|
td->dm_dev.bdev = NULL;
|
|
|
|
|
|
|
|
if ((r = open_table_device(td, dev, md))) {
|
|
|
|
mutex_unlock(&md->table_devices_lock);
|
|
|
|
kfree(td);
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
format_dev_t(td->dm_dev.name, dev);
|
|
|
|
|
2017-10-20 15:37:39 +08:00
|
|
|
refcount_set(&td->count, 1);
|
2014-08-14 02:53:43 +08:00
|
|
|
list_add(&td->list, &md->table_devices);
|
2017-10-20 15:37:39 +08:00
|
|
|
} else {
|
|
|
|
refcount_inc(&td->count);
|
2014-08-14 02:53:43 +08:00
|
|
|
}
|
|
|
|
mutex_unlock(&md->table_devices_lock);
|
|
|
|
|
|
|
|
*result = &td->dm_dev;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(dm_get_table_device);
|
|
|
|
|
|
|
|
void dm_put_table_device(struct mapped_device *md, struct dm_dev *d)
|
|
|
|
{
|
|
|
|
struct table_device *td = container_of(d, struct table_device, dm_dev);
|
|
|
|
|
|
|
|
mutex_lock(&md->table_devices_lock);
|
2017-10-20 15:37:39 +08:00
|
|
|
if (refcount_dec_and_test(&td->count)) {
|
2014-08-14 02:53:43 +08:00
|
|
|
close_table_device(td, md);
|
|
|
|
list_del(&td->list);
|
|
|
|
kfree(td);
|
|
|
|
}
|
|
|
|
mutex_unlock(&md->table_devices_lock);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(dm_put_table_device);
|
|
|
|
|
|
|
|
static void free_table_devices(struct list_head *devices)
|
|
|
|
{
|
|
|
|
struct list_head *tmp, *next;
|
|
|
|
|
|
|
|
list_for_each_safe(tmp, next, devices) {
|
|
|
|
struct table_device *td = list_entry(tmp, struct table_device, list);
|
|
|
|
|
|
|
|
DMWARN("dm_destroy: %s still exists with %d references",
|
2017-10-20 15:37:39 +08:00
|
|
|
td->dm_dev.name, refcount_read(&td->count));
|
2014-08-14 02:53:43 +08:00
|
|
|
kfree(td);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-03-27 17:17:54 +08:00
|
|
|
/*
|
|
|
|
* Get the geometry associated with a dm device
|
|
|
|
*/
|
|
|
|
int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo)
|
|
|
|
{
|
|
|
|
*geo = md->geometry;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Set the geometry of a device.
|
|
|
|
*/
|
|
|
|
int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo)
|
|
|
|
{
|
|
|
|
sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors;
|
|
|
|
|
|
|
|
if (geo->start > sz) {
|
|
|
|
DMWARN("Start sector is beyond the geometry limits.");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
md->geometry = *geo;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
[PATCH] dm: suspend: add noflush pushback
In device-mapper I/O is sometimes queued within targets for later processing.
For example the multipath target can be configured to store I/O when no paths
are available instead of returning it -EIO.
This patch allows the device-mapper core to instruct a target to transfer the
contents of any such in-target queue back into the core. This frees up the
resources used by the target so the core can replace that target with an
alternative one and then resend the I/O to it. Without this patch the only
way to change the target in such circumstances involves returning the I/O with
an error back to the filesystem/application. In the multipath case, this
patch will let us add new paths for existing I/O to try after all the existing
paths have failed.
DMF_NOFLUSH_SUSPENDING
----------------------
If the DM_NOFLUSH_FLAG ioctl option is specified at suspend time, the
DMF_NOFLUSH_SUSPENDING flag is set in md->flags during dm_suspend(). It
is always cleared before dm_suspend() returns.
The flag must be visible while the target is flushing pending I/Os so it
is set before presuspend where the flush starts and unset after the wait
for md->pending where the flush ends.
Target drivers can check this flag by calling dm_noflush_suspending().
DM_MAPIO_REQUEUE / DM_ENDIO_REQUEUE
-----------------------------------
A target's map() function can now return DM_MAPIO_REQUEUE to request the
device mapper core queue the bio.
Similarly, a target's end_io() function can return DM_ENDIO_REQUEUE to request
the same. This has been labelled 'pushback'.
The __map_bio() and clone_endio() functions in the core treat these return
values as errors and call dec_pending() to end the I/O.
dec_pending
-----------
dec_pending() saves the pushback request in struct dm_io->error. Once all
the split clones have ended, dec_pending() will put the original bio on
the md->pushback list. Note that this supercedes any I/O errors.
It is possible for the suspend with DM_NOFLUSH_FLAG to be aborted while
in progress (e.g. by user interrupt). dec_pending() checks for this and
returns -EIO if it happened.
pushdback list and pushback_lock
--------------------------------
The bio is queued on md->pushback temporarily in dec_pending(), and after
all pending I/Os return, md->pushback is merged into md->deferred in
dm_suspend() for re-issuing at resume time.
md->pushback_lock protects md->pushback.
The lock should be held with irq disabled because dec_pending() can be
called from interrupt context.
Queueing bios to md->pushback in dec_pending() must be done atomically
with the check for DMF_NOFLUSH_SUSPENDING flag. So md->pushback_lock is
held when checking the flag. Otherwise dec_pending() may queue a bio to
md->pushback after the interrupted dm_suspend() flushes md->pushback.
Then the bio would be left in md->pushback.
Flag setting in dm_suspend() can be done without md->pushback_lock because
the flag is checked only after presuspend and the set value is already
made visible via the target's presuspend function.
The flag can be checked without md->pushback_lock (e.g. the first part of
the dec_pending() or target drivers), because the flag is checked again
with md->pushback_lock held when the bio is really queued to md->pushback
as described above. So even if the flag is cleared after the lockless
checkings, the bio isn't left in md->pushback but returned to applications
with -EIO.
Other notes on the current patch
--------------------------------
- md->pushback is added to the struct mapped_device instead of using
md->deferred directly because md->io_lock which protects md->deferred is
rw_semaphore and can't be used in interrupt context like dec_pending(),
and md->io_lock protects the DMF_BLOCK_IO flag of md->flags too.
- Don't issue lock_fs() in dm_suspend() if the DM_NOFLUSH_FLAG
ioctl option is specified, because I/Os generated by lock_fs() would be
pushed back and never return if there were no valid devices.
- If an error occurs in dm_suspend() after the DMF_NOFLUSH_SUSPENDING
flag is set, md->pushback must be flushed because I/Os may be queued to
the list already. (flush_and_out label in dm_suspend())
Test results
------------
I have tested using multipath target with the next patch.
The following tests are for regression/compatibility:
- I/Os succeed when valid paths exist;
- I/Os fail when there are no valid paths and queue_if_no_path is not
set;
- I/Os are queued in the multipath target when there are no valid paths and
queue_if_no_path is set;
- The queued I/Os above fail when suspend is issued without the
DM_NOFLUSH_FLAG ioctl option. I/Os spanning 2 multipath targets also
fail.
The following tests are for the normal code path of new pushback feature:
- Queued I/Os in the multipath target are flushed from the target
but don't return when suspend is issued with the DM_NOFLUSH_FLAG
ioctl option;
- The I/Os above are queued in the multipath target again when
resume is issued without path recovery;
- The I/Os above succeed when resume is issued after path recovery
or table load;
- Queued I/Os in the multipath target succeed when resume is issued
with the DM_NOFLUSH_FLAG ioctl option after table load. I/Os
spanning 2 multipath targets also succeed.
The following tests are for the error paths of the new pushback feature:
- When the bdget_disk() fails in dm_suspend(), the
DMF_NOFLUSH_SUSPENDING flag is cleared and I/Os already queued to the
pushback list are flushed properly.
- When suspend with the DM_NOFLUSH_FLAG ioctl option is interrupted,
o I/Os which had already been queued to the pushback list
at the time don't return, and are re-issued at resume time;
o I/Os which hadn't been returned at the time return with EIO.
Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com>
Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
Cc: dm-devel@redhat.com
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-12-08 18:41:09 +08:00
|
|
|
static int __noflush_suspending(struct mapped_device *md)
|
|
|
|
{
|
|
|
|
return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* Decrements the number of outstanding ios that a bio has been
|
|
|
|
* cloned into, completing the original io if necc.
|
|
|
|
*/
|
2017-06-03 15:38:06 +08:00
|
|
|
static void dec_pending(struct dm_io *io, blk_status_t error)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
[PATCH] dm: suspend: add noflush pushback
In device-mapper I/O is sometimes queued within targets for later processing.
For example the multipath target can be configured to store I/O when no paths
are available instead of returning it -EIO.
This patch allows the device-mapper core to instruct a target to transfer the
contents of any such in-target queue back into the core. This frees up the
resources used by the target so the core can replace that target with an
alternative one and then resend the I/O to it. Without this patch the only
way to change the target in such circumstances involves returning the I/O with
an error back to the filesystem/application. In the multipath case, this
patch will let us add new paths for existing I/O to try after all the existing
paths have failed.
DMF_NOFLUSH_SUSPENDING
----------------------
If the DM_NOFLUSH_FLAG ioctl option is specified at suspend time, the
DMF_NOFLUSH_SUSPENDING flag is set in md->flags during dm_suspend(). It
is always cleared before dm_suspend() returns.
The flag must be visible while the target is flushing pending I/Os so it
is set before presuspend where the flush starts and unset after the wait
for md->pending where the flush ends.
Target drivers can check this flag by calling dm_noflush_suspending().
DM_MAPIO_REQUEUE / DM_ENDIO_REQUEUE
-----------------------------------
A target's map() function can now return DM_MAPIO_REQUEUE to request the
device mapper core queue the bio.
Similarly, a target's end_io() function can return DM_ENDIO_REQUEUE to request
the same. This has been labelled 'pushback'.
The __map_bio() and clone_endio() functions in the core treat these return
values as errors and call dec_pending() to end the I/O.
dec_pending
-----------
dec_pending() saves the pushback request in struct dm_io->error. Once all
the split clones have ended, dec_pending() will put the original bio on
the md->pushback list. Note that this supercedes any I/O errors.
It is possible for the suspend with DM_NOFLUSH_FLAG to be aborted while
in progress (e.g. by user interrupt). dec_pending() checks for this and
returns -EIO if it happened.
pushdback list and pushback_lock
--------------------------------
The bio is queued on md->pushback temporarily in dec_pending(), and after
all pending I/Os return, md->pushback is merged into md->deferred in
dm_suspend() for re-issuing at resume time.
md->pushback_lock protects md->pushback.
The lock should be held with irq disabled because dec_pending() can be
called from interrupt context.
Queueing bios to md->pushback in dec_pending() must be done atomically
with the check for DMF_NOFLUSH_SUSPENDING flag. So md->pushback_lock is
held when checking the flag. Otherwise dec_pending() may queue a bio to
md->pushback after the interrupted dm_suspend() flushes md->pushback.
Then the bio would be left in md->pushback.
Flag setting in dm_suspend() can be done without md->pushback_lock because
the flag is checked only after presuspend and the set value is already
made visible via the target's presuspend function.
The flag can be checked without md->pushback_lock (e.g. the first part of
the dec_pending() or target drivers), because the flag is checked again
with md->pushback_lock held when the bio is really queued to md->pushback
as described above. So even if the flag is cleared after the lockless
checkings, the bio isn't left in md->pushback but returned to applications
with -EIO.
Other notes on the current patch
--------------------------------
- md->pushback is added to the struct mapped_device instead of using
md->deferred directly because md->io_lock which protects md->deferred is
rw_semaphore and can't be used in interrupt context like dec_pending(),
and md->io_lock protects the DMF_BLOCK_IO flag of md->flags too.
- Don't issue lock_fs() in dm_suspend() if the DM_NOFLUSH_FLAG
ioctl option is specified, because I/Os generated by lock_fs() would be
pushed back and never return if there were no valid devices.
- If an error occurs in dm_suspend() after the DMF_NOFLUSH_SUSPENDING
flag is set, md->pushback must be flushed because I/Os may be queued to
the list already. (flush_and_out label in dm_suspend())
Test results
------------
I have tested using multipath target with the next patch.
The following tests are for regression/compatibility:
- I/Os succeed when valid paths exist;
- I/Os fail when there are no valid paths and queue_if_no_path is not
set;
- I/Os are queued in the multipath target when there are no valid paths and
queue_if_no_path is set;
- The queued I/Os above fail when suspend is issued without the
DM_NOFLUSH_FLAG ioctl option. I/Os spanning 2 multipath targets also
fail.
The following tests are for the normal code path of new pushback feature:
- Queued I/Os in the multipath target are flushed from the target
but don't return when suspend is issued with the DM_NOFLUSH_FLAG
ioctl option;
- The I/Os above are queued in the multipath target again when
resume is issued without path recovery;
- The I/Os above succeed when resume is issued after path recovery
or table load;
- Queued I/Os in the multipath target succeed when resume is issued
with the DM_NOFLUSH_FLAG ioctl option after table load. I/Os
spanning 2 multipath targets also succeed.
The following tests are for the error paths of the new pushback feature:
- When the bdget_disk() fails in dm_suspend(), the
DMF_NOFLUSH_SUSPENDING flag is cleared and I/Os already queued to the
pushback list are flushed properly.
- When suspend with the DM_NOFLUSH_FLAG ioctl option is interrupted,
o I/Os which had already been queued to the pushback list
at the time don't return, and are re-issued at resume time;
o I/Os which hadn't been returned at the time return with EIO.
Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com>
Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
Cc: dm-devel@redhat.com
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-12-08 18:41:09 +08:00
|
|
|
unsigned long flags;
|
2017-06-03 15:38:06 +08:00
|
|
|
blk_status_t io_error;
|
2009-03-17 01:44:36 +08:00
|
|
|
struct bio *bio;
|
|
|
|
struct mapped_device *md = io->md;
|
2024-06-12 13:13:20 +08:00
|
|
|
unsigned long start_time = 0;
|
|
|
|
struct dm_stats_aux stats_aux;
|
[PATCH] dm: suspend: add noflush pushback
In device-mapper I/O is sometimes queued within targets for later processing.
For example the multipath target can be configured to store I/O when no paths
are available instead of returning it -EIO.
This patch allows the device-mapper core to instruct a target to transfer the
contents of any such in-target queue back into the core. This frees up the
resources used by the target so the core can replace that target with an
alternative one and then resend the I/O to it. Without this patch the only
way to change the target in such circumstances involves returning the I/O with
an error back to the filesystem/application. In the multipath case, this
patch will let us add new paths for existing I/O to try after all the existing
paths have failed.
DMF_NOFLUSH_SUSPENDING
----------------------
If the DM_NOFLUSH_FLAG ioctl option is specified at suspend time, the
DMF_NOFLUSH_SUSPENDING flag is set in md->flags during dm_suspend(). It
is always cleared before dm_suspend() returns.
The flag must be visible while the target is flushing pending I/Os so it
is set before presuspend where the flush starts and unset after the wait
for md->pending where the flush ends.
Target drivers can check this flag by calling dm_noflush_suspending().
DM_MAPIO_REQUEUE / DM_ENDIO_REQUEUE
-----------------------------------
A target's map() function can now return DM_MAPIO_REQUEUE to request the
device mapper core queue the bio.
Similarly, a target's end_io() function can return DM_ENDIO_REQUEUE to request
the same. This has been labelled 'pushback'.
The __map_bio() and clone_endio() functions in the core treat these return
values as errors and call dec_pending() to end the I/O.
dec_pending
-----------
dec_pending() saves the pushback request in struct dm_io->error. Once all
the split clones have ended, dec_pending() will put the original bio on
the md->pushback list. Note that this supercedes any I/O errors.
It is possible for the suspend with DM_NOFLUSH_FLAG to be aborted while
in progress (e.g. by user interrupt). dec_pending() checks for this and
returns -EIO if it happened.
pushdback list and pushback_lock
--------------------------------
The bio is queued on md->pushback temporarily in dec_pending(), and after
all pending I/Os return, md->pushback is merged into md->deferred in
dm_suspend() for re-issuing at resume time.
md->pushback_lock protects md->pushback.
The lock should be held with irq disabled because dec_pending() can be
called from interrupt context.
Queueing bios to md->pushback in dec_pending() must be done atomically
with the check for DMF_NOFLUSH_SUSPENDING flag. So md->pushback_lock is
held when checking the flag. Otherwise dec_pending() may queue a bio to
md->pushback after the interrupted dm_suspend() flushes md->pushback.
Then the bio would be left in md->pushback.
Flag setting in dm_suspend() can be done without md->pushback_lock because
the flag is checked only after presuspend and the set value is already
made visible via the target's presuspend function.
The flag can be checked without md->pushback_lock (e.g. the first part of
the dec_pending() or target drivers), because the flag is checked again
with md->pushback_lock held when the bio is really queued to md->pushback
as described above. So even if the flag is cleared after the lockless
checkings, the bio isn't left in md->pushback but returned to applications
with -EIO.
Other notes on the current patch
--------------------------------
- md->pushback is added to the struct mapped_device instead of using
md->deferred directly because md->io_lock which protects md->deferred is
rw_semaphore and can't be used in interrupt context like dec_pending(),
and md->io_lock protects the DMF_BLOCK_IO flag of md->flags too.
- Don't issue lock_fs() in dm_suspend() if the DM_NOFLUSH_FLAG
ioctl option is specified, because I/Os generated by lock_fs() would be
pushed back and never return if there were no valid devices.
- If an error occurs in dm_suspend() after the DMF_NOFLUSH_SUSPENDING
flag is set, md->pushback must be flushed because I/Os may be queued to
the list already. (flush_and_out label in dm_suspend())
Test results
------------
I have tested using multipath target with the next patch.
The following tests are for regression/compatibility:
- I/Os succeed when valid paths exist;
- I/Os fail when there are no valid paths and queue_if_no_path is not
set;
- I/Os are queued in the multipath target when there are no valid paths and
queue_if_no_path is set;
- The queued I/Os above fail when suspend is issued without the
DM_NOFLUSH_FLAG ioctl option. I/Os spanning 2 multipath targets also
fail.
The following tests are for the normal code path of new pushback feature:
- Queued I/Os in the multipath target are flushed from the target
but don't return when suspend is issued with the DM_NOFLUSH_FLAG
ioctl option;
- The I/Os above are queued in the multipath target again when
resume is issued without path recovery;
- The I/Os above succeed when resume is issued after path recovery
or table load;
- Queued I/Os in the multipath target succeed when resume is issued
with the DM_NOFLUSH_FLAG ioctl option after table load. I/Os
spanning 2 multipath targets also succeed.
The following tests are for the error paths of the new pushback feature:
- When the bdget_disk() fails in dm_suspend(), the
DMF_NOFLUSH_SUSPENDING flag is cleared and I/Os already queued to the
pushback list are flushed properly.
- When suspend with the DM_NOFLUSH_FLAG ioctl option is interrupted,
o I/Os which had already been queued to the pushback list
at the time don't return, and are re-issued at resume time;
o I/Os which hadn't been returned at the time return with EIO.
Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com>
Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
Cc: dm-devel@redhat.com
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-12-08 18:41:09 +08:00
|
|
|
|
|
|
|
/* Push-back supersedes any I/O errors */
|
2009-10-17 06:18:15 +08:00
|
|
|
if (unlikely(error)) {
|
|
|
|
spin_lock_irqsave(&io->endio_lock, flags);
|
2017-12-12 09:51:50 +08:00
|
|
|
if (!(io->status == BLK_STS_DM_REQUEUE && __noflush_suspending(md)))
|
2017-06-03 15:38:06 +08:00
|
|
|
io->status = error;
|
2009-10-17 06:18:15 +08:00
|
|
|
spin_unlock_irqrestore(&io->endio_lock, flags);
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
if (atomic_dec_and_test(&io->io_count)) {
|
2017-06-03 15:38:06 +08:00
|
|
|
if (io->status == BLK_STS_DM_REQUEUE) {
|
[PATCH] dm: suspend: add noflush pushback
In device-mapper I/O is sometimes queued within targets for later processing.
For example the multipath target can be configured to store I/O when no paths
are available instead of returning it -EIO.
This patch allows the device-mapper core to instruct a target to transfer the
contents of any such in-target queue back into the core. This frees up the
resources used by the target so the core can replace that target with an
alternative one and then resend the I/O to it. Without this patch the only
way to change the target in such circumstances involves returning the I/O with
an error back to the filesystem/application. In the multipath case, this
patch will let us add new paths for existing I/O to try after all the existing
paths have failed.
DMF_NOFLUSH_SUSPENDING
----------------------
If the DM_NOFLUSH_FLAG ioctl option is specified at suspend time, the
DMF_NOFLUSH_SUSPENDING flag is set in md->flags during dm_suspend(). It
is always cleared before dm_suspend() returns.
The flag must be visible while the target is flushing pending I/Os so it
is set before presuspend where the flush starts and unset after the wait
for md->pending where the flush ends.
Target drivers can check this flag by calling dm_noflush_suspending().
DM_MAPIO_REQUEUE / DM_ENDIO_REQUEUE
-----------------------------------
A target's map() function can now return DM_MAPIO_REQUEUE to request the
device mapper core queue the bio.
Similarly, a target's end_io() function can return DM_ENDIO_REQUEUE to request
the same. This has been labelled 'pushback'.
The __map_bio() and clone_endio() functions in the core treat these return
values as errors and call dec_pending() to end the I/O.
dec_pending
-----------
dec_pending() saves the pushback request in struct dm_io->error. Once all
the split clones have ended, dec_pending() will put the original bio on
the md->pushback list. Note that this supercedes any I/O errors.
It is possible for the suspend with DM_NOFLUSH_FLAG to be aborted while
in progress (e.g. by user interrupt). dec_pending() checks for this and
returns -EIO if it happened.
pushdback list and pushback_lock
--------------------------------
The bio is queued on md->pushback temporarily in dec_pending(), and after
all pending I/Os return, md->pushback is merged into md->deferred in
dm_suspend() for re-issuing at resume time.
md->pushback_lock protects md->pushback.
The lock should be held with irq disabled because dec_pending() can be
called from interrupt context.
Queueing bios to md->pushback in dec_pending() must be done atomically
with the check for DMF_NOFLUSH_SUSPENDING flag. So md->pushback_lock is
held when checking the flag. Otherwise dec_pending() may queue a bio to
md->pushback after the interrupted dm_suspend() flushes md->pushback.
Then the bio would be left in md->pushback.
Flag setting in dm_suspend() can be done without md->pushback_lock because
the flag is checked only after presuspend and the set value is already
made visible via the target's presuspend function.
The flag can be checked without md->pushback_lock (e.g. the first part of
the dec_pending() or target drivers), because the flag is checked again
with md->pushback_lock held when the bio is really queued to md->pushback
as described above. So even if the flag is cleared after the lockless
checkings, the bio isn't left in md->pushback but returned to applications
with -EIO.
Other notes on the current patch
--------------------------------
- md->pushback is added to the struct mapped_device instead of using
md->deferred directly because md->io_lock which protects md->deferred is
rw_semaphore and can't be used in interrupt context like dec_pending(),
and md->io_lock protects the DMF_BLOCK_IO flag of md->flags too.
- Don't issue lock_fs() in dm_suspend() if the DM_NOFLUSH_FLAG
ioctl option is specified, because I/Os generated by lock_fs() would be
pushed back and never return if there were no valid devices.
- If an error occurs in dm_suspend() after the DMF_NOFLUSH_SUSPENDING
flag is set, md->pushback must be flushed because I/Os may be queued to
the list already. (flush_and_out label in dm_suspend())
Test results
------------
I have tested using multipath target with the next patch.
The following tests are for regression/compatibility:
- I/Os succeed when valid paths exist;
- I/Os fail when there are no valid paths and queue_if_no_path is not
set;
- I/Os are queued in the multipath target when there are no valid paths and
queue_if_no_path is set;
- The queued I/Os above fail when suspend is issued without the
DM_NOFLUSH_FLAG ioctl option. I/Os spanning 2 multipath targets also
fail.
The following tests are for the normal code path of new pushback feature:
- Queued I/Os in the multipath target are flushed from the target
but don't return when suspend is issued with the DM_NOFLUSH_FLAG
ioctl option;
- The I/Os above are queued in the multipath target again when
resume is issued without path recovery;
- The I/Os above succeed when resume is issued after path recovery
or table load;
- Queued I/Os in the multipath target succeed when resume is issued
with the DM_NOFLUSH_FLAG ioctl option after table load. I/Os
spanning 2 multipath targets also succeed.
The following tests are for the error paths of the new pushback feature:
- When the bdget_disk() fails in dm_suspend(), the
DMF_NOFLUSH_SUSPENDING flag is cleared and I/Os already queued to the
pushback list are flushed properly.
- When suspend with the DM_NOFLUSH_FLAG ioctl option is interrupted,
o I/Os which had already been queued to the pushback list
at the time don't return, and are re-issued at resume time;
o I/Os which hadn't been returned at the time return with EIO.
Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com>
Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
Cc: dm-devel@redhat.com
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-12-08 18:41:09 +08:00
|
|
|
/*
|
|
|
|
* Target requested pushing back the I/O.
|
|
|
|
*/
|
2009-04-03 02:55:39 +08:00
|
|
|
spin_lock_irqsave(&md->deferred_lock, flags);
|
2010-09-09 00:07:00 +08:00
|
|
|
if (__noflush_suspending(md))
|
2017-12-12 09:51:50 +08:00
|
|
|
/* NOTE early return due to BLK_STS_DM_REQUEUE below */
|
|
|
|
bio_list_add_head(&md->deferred, io->orig_bio);
|
2010-09-09 00:07:00 +08:00
|
|
|
else
|
[PATCH] dm: suspend: add noflush pushback
In device-mapper I/O is sometimes queued within targets for later processing.
For example the multipath target can be configured to store I/O when no paths
are available instead of returning it -EIO.
This patch allows the device-mapper core to instruct a target to transfer the
contents of any such in-target queue back into the core. This frees up the
resources used by the target so the core can replace that target with an
alternative one and then resend the I/O to it. Without this patch the only
way to change the target in such circumstances involves returning the I/O with
an error back to the filesystem/application. In the multipath case, this
patch will let us add new paths for existing I/O to try after all the existing
paths have failed.
DMF_NOFLUSH_SUSPENDING
----------------------
If the DM_NOFLUSH_FLAG ioctl option is specified at suspend time, the
DMF_NOFLUSH_SUSPENDING flag is set in md->flags during dm_suspend(). It
is always cleared before dm_suspend() returns.
The flag must be visible while the target is flushing pending I/Os so it
is set before presuspend where the flush starts and unset after the wait
for md->pending where the flush ends.
Target drivers can check this flag by calling dm_noflush_suspending().
DM_MAPIO_REQUEUE / DM_ENDIO_REQUEUE
-----------------------------------
A target's map() function can now return DM_MAPIO_REQUEUE to request the
device mapper core queue the bio.
Similarly, a target's end_io() function can return DM_ENDIO_REQUEUE to request
the same. This has been labelled 'pushback'.
The __map_bio() and clone_endio() functions in the core treat these return
values as errors and call dec_pending() to end the I/O.
dec_pending
-----------
dec_pending() saves the pushback request in struct dm_io->error. Once all
the split clones have ended, dec_pending() will put the original bio on
the md->pushback list. Note that this supercedes any I/O errors.
It is possible for the suspend with DM_NOFLUSH_FLAG to be aborted while
in progress (e.g. by user interrupt). dec_pending() checks for this and
returns -EIO if it happened.
pushdback list and pushback_lock
--------------------------------
The bio is queued on md->pushback temporarily in dec_pending(), and after
all pending I/Os return, md->pushback is merged into md->deferred in
dm_suspend() for re-issuing at resume time.
md->pushback_lock protects md->pushback.
The lock should be held with irq disabled because dec_pending() can be
called from interrupt context.
Queueing bios to md->pushback in dec_pending() must be done atomically
with the check for DMF_NOFLUSH_SUSPENDING flag. So md->pushback_lock is
held when checking the flag. Otherwise dec_pending() may queue a bio to
md->pushback after the interrupted dm_suspend() flushes md->pushback.
Then the bio would be left in md->pushback.
Flag setting in dm_suspend() can be done without md->pushback_lock because
the flag is checked only after presuspend and the set value is already
made visible via the target's presuspend function.
The flag can be checked without md->pushback_lock (e.g. the first part of
the dec_pending() or target drivers), because the flag is checked again
with md->pushback_lock held when the bio is really queued to md->pushback
as described above. So even if the flag is cleared after the lockless
checkings, the bio isn't left in md->pushback but returned to applications
with -EIO.
Other notes on the current patch
--------------------------------
- md->pushback is added to the struct mapped_device instead of using
md->deferred directly because md->io_lock which protects md->deferred is
rw_semaphore and can't be used in interrupt context like dec_pending(),
and md->io_lock protects the DMF_BLOCK_IO flag of md->flags too.
- Don't issue lock_fs() in dm_suspend() if the DM_NOFLUSH_FLAG
ioctl option is specified, because I/Os generated by lock_fs() would be
pushed back and never return if there were no valid devices.
- If an error occurs in dm_suspend() after the DMF_NOFLUSH_SUSPENDING
flag is set, md->pushback must be flushed because I/Os may be queued to
the list already. (flush_and_out label in dm_suspend())
Test results
------------
I have tested using multipath target with the next patch.
The following tests are for regression/compatibility:
- I/Os succeed when valid paths exist;
- I/Os fail when there are no valid paths and queue_if_no_path is not
set;
- I/Os are queued in the multipath target when there are no valid paths and
queue_if_no_path is set;
- The queued I/Os above fail when suspend is issued without the
DM_NOFLUSH_FLAG ioctl option. I/Os spanning 2 multipath targets also
fail.
The following tests are for the normal code path of new pushback feature:
- Queued I/Os in the multipath target are flushed from the target
but don't return when suspend is issued with the DM_NOFLUSH_FLAG
ioctl option;
- The I/Os above are queued in the multipath target again when
resume is issued without path recovery;
- The I/Os above succeed when resume is issued after path recovery
or table load;
- Queued I/Os in the multipath target succeed when resume is issued
with the DM_NOFLUSH_FLAG ioctl option after table load. I/Os
spanning 2 multipath targets also succeed.
The following tests are for the error paths of the new pushback feature:
- When the bdget_disk() fails in dm_suspend(), the
DMF_NOFLUSH_SUSPENDING flag is cleared and I/Os already queued to the
pushback list are flushed properly.
- When suspend with the DM_NOFLUSH_FLAG ioctl option is interrupted,
o I/Os which had already been queued to the pushback list
at the time don't return, and are re-issued at resume time;
o I/Os which hadn't been returned at the time return with EIO.
Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com>
Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
Cc: dm-devel@redhat.com
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-12-08 18:41:09 +08:00
|
|
|
/* noflush suspend was interrupted. */
|
2017-06-03 15:38:06 +08:00
|
|
|
io->status = BLK_STS_IOERR;
|
2009-04-03 02:55:39 +08:00
|
|
|
spin_unlock_irqrestore(&md->deferred_lock, flags);
|
[PATCH] dm: suspend: add noflush pushback
In device-mapper I/O is sometimes queued within targets for later processing.
For example the multipath target can be configured to store I/O when no paths
are available instead of returning it -EIO.
This patch allows the device-mapper core to instruct a target to transfer the
contents of any such in-target queue back into the core. This frees up the
resources used by the target so the core can replace that target with an
alternative one and then resend the I/O to it. Without this patch the only
way to change the target in such circumstances involves returning the I/O with
an error back to the filesystem/application. In the multipath case, this
patch will let us add new paths for existing I/O to try after all the existing
paths have failed.
DMF_NOFLUSH_SUSPENDING
----------------------
If the DM_NOFLUSH_FLAG ioctl option is specified at suspend time, the
DMF_NOFLUSH_SUSPENDING flag is set in md->flags during dm_suspend(). It
is always cleared before dm_suspend() returns.
The flag must be visible while the target is flushing pending I/Os so it
is set before presuspend where the flush starts and unset after the wait
for md->pending where the flush ends.
Target drivers can check this flag by calling dm_noflush_suspending().
DM_MAPIO_REQUEUE / DM_ENDIO_REQUEUE
-----------------------------------
A target's map() function can now return DM_MAPIO_REQUEUE to request the
device mapper core queue the bio.
Similarly, a target's end_io() function can return DM_ENDIO_REQUEUE to request
the same. This has been labelled 'pushback'.
The __map_bio() and clone_endio() functions in the core treat these return
values as errors and call dec_pending() to end the I/O.
dec_pending
-----------
dec_pending() saves the pushback request in struct dm_io->error. Once all
the split clones have ended, dec_pending() will put the original bio on
the md->pushback list. Note that this supercedes any I/O errors.
It is possible for the suspend with DM_NOFLUSH_FLAG to be aborted while
in progress (e.g. by user interrupt). dec_pending() checks for this and
returns -EIO if it happened.
pushdback list and pushback_lock
--------------------------------
The bio is queued on md->pushback temporarily in dec_pending(), and after
all pending I/Os return, md->pushback is merged into md->deferred in
dm_suspend() for re-issuing at resume time.
md->pushback_lock protects md->pushback.
The lock should be held with irq disabled because dec_pending() can be
called from interrupt context.
Queueing bios to md->pushback in dec_pending() must be done atomically
with the check for DMF_NOFLUSH_SUSPENDING flag. So md->pushback_lock is
held when checking the flag. Otherwise dec_pending() may queue a bio to
md->pushback after the interrupted dm_suspend() flushes md->pushback.
Then the bio would be left in md->pushback.
Flag setting in dm_suspend() can be done without md->pushback_lock because
the flag is checked only after presuspend and the set value is already
made visible via the target's presuspend function.
The flag can be checked without md->pushback_lock (e.g. the first part of
the dec_pending() or target drivers), because the flag is checked again
with md->pushback_lock held when the bio is really queued to md->pushback
as described above. So even if the flag is cleared after the lockless
checkings, the bio isn't left in md->pushback but returned to applications
with -EIO.
Other notes on the current patch
--------------------------------
- md->pushback is added to the struct mapped_device instead of using
md->deferred directly because md->io_lock which protects md->deferred is
rw_semaphore and can't be used in interrupt context like dec_pending(),
and md->io_lock protects the DMF_BLOCK_IO flag of md->flags too.
- Don't issue lock_fs() in dm_suspend() if the DM_NOFLUSH_FLAG
ioctl option is specified, because I/Os generated by lock_fs() would be
pushed back and never return if there were no valid devices.
- If an error occurs in dm_suspend() after the DMF_NOFLUSH_SUSPENDING
flag is set, md->pushback must be flushed because I/Os may be queued to
the list already. (flush_and_out label in dm_suspend())
Test results
------------
I have tested using multipath target with the next patch.
The following tests are for regression/compatibility:
- I/Os succeed when valid paths exist;
- I/Os fail when there are no valid paths and queue_if_no_path is not
set;
- I/Os are queued in the multipath target when there are no valid paths and
queue_if_no_path is set;
- The queued I/Os above fail when suspend is issued without the
DM_NOFLUSH_FLAG ioctl option. I/Os spanning 2 multipath targets also
fail.
The following tests are for the normal code path of new pushback feature:
- Queued I/Os in the multipath target are flushed from the target
but don't return when suspend is issued with the DM_NOFLUSH_FLAG
ioctl option;
- The I/Os above are queued in the multipath target again when
resume is issued without path recovery;
- The I/Os above succeed when resume is issued after path recovery
or table load;
- Queued I/Os in the multipath target succeed when resume is issued
with the DM_NOFLUSH_FLAG ioctl option after table load. I/Os
spanning 2 multipath targets also succeed.
The following tests are for the error paths of the new pushback feature:
- When the bdget_disk() fails in dm_suspend(), the
DMF_NOFLUSH_SUSPENDING flag is cleared and I/Os already queued to the
pushback list are flushed properly.
- When suspend with the DM_NOFLUSH_FLAG ioctl option is interrupted,
o I/Os which had already been queued to the pushback list
at the time don't return, and are re-issued at resume time;
o I/Os which hadn't been returned at the time return with EIO.
Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com>
Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
Cc: dm-devel@redhat.com
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-12-08 18:41:09 +08:00
|
|
|
}
|
|
|
|
|
2017-06-03 15:38:06 +08:00
|
|
|
io_error = io->status;
|
2017-12-12 09:51:50 +08:00
|
|
|
bio = io->orig_bio;
|
2024-06-12 13:13:20 +08:00
|
|
|
start_time = io->start_time;
|
|
|
|
stats_aux = io->stats_aux;
|
2010-09-09 00:07:00 +08:00
|
|
|
free_io(md, io);
|
2024-06-12 13:13:20 +08:00
|
|
|
end_io_acct(md, bio, start_time, &stats_aux);
|
2010-09-09 00:07:00 +08:00
|
|
|
|
2017-06-03 15:38:06 +08:00
|
|
|
if (io_error == BLK_STS_DM_REQUEUE)
|
2010-09-09 00:07:00 +08:00
|
|
|
return;
|
[PATCH] dm: suspend: add noflush pushback
In device-mapper I/O is sometimes queued within targets for later processing.
For example the multipath target can be configured to store I/O when no paths
are available instead of returning it -EIO.
This patch allows the device-mapper core to instruct a target to transfer the
contents of any such in-target queue back into the core. This frees up the
resources used by the target so the core can replace that target with an
alternative one and then resend the I/O to it. Without this patch the only
way to change the target in such circumstances involves returning the I/O with
an error back to the filesystem/application. In the multipath case, this
patch will let us add new paths for existing I/O to try after all the existing
paths have failed.
DMF_NOFLUSH_SUSPENDING
----------------------
If the DM_NOFLUSH_FLAG ioctl option is specified at suspend time, the
DMF_NOFLUSH_SUSPENDING flag is set in md->flags during dm_suspend(). It
is always cleared before dm_suspend() returns.
The flag must be visible while the target is flushing pending I/Os so it
is set before presuspend where the flush starts and unset after the wait
for md->pending where the flush ends.
Target drivers can check this flag by calling dm_noflush_suspending().
DM_MAPIO_REQUEUE / DM_ENDIO_REQUEUE
-----------------------------------
A target's map() function can now return DM_MAPIO_REQUEUE to request the
device mapper core queue the bio.
Similarly, a target's end_io() function can return DM_ENDIO_REQUEUE to request
the same. This has been labelled 'pushback'.
The __map_bio() and clone_endio() functions in the core treat these return
values as errors and call dec_pending() to end the I/O.
dec_pending
-----------
dec_pending() saves the pushback request in struct dm_io->error. Once all
the split clones have ended, dec_pending() will put the original bio on
the md->pushback list. Note that this supercedes any I/O errors.
It is possible for the suspend with DM_NOFLUSH_FLAG to be aborted while
in progress (e.g. by user interrupt). dec_pending() checks for this and
returns -EIO if it happened.
pushdback list and pushback_lock
--------------------------------
The bio is queued on md->pushback temporarily in dec_pending(), and after
all pending I/Os return, md->pushback is merged into md->deferred in
dm_suspend() for re-issuing at resume time.
md->pushback_lock protects md->pushback.
The lock should be held with irq disabled because dec_pending() can be
called from interrupt context.
Queueing bios to md->pushback in dec_pending() must be done atomically
with the check for DMF_NOFLUSH_SUSPENDING flag. So md->pushback_lock is
held when checking the flag. Otherwise dec_pending() may queue a bio to
md->pushback after the interrupted dm_suspend() flushes md->pushback.
Then the bio would be left in md->pushback.
Flag setting in dm_suspend() can be done without md->pushback_lock because
the flag is checked only after presuspend and the set value is already
made visible via the target's presuspend function.
The flag can be checked without md->pushback_lock (e.g. the first part of
the dec_pending() or target drivers), because the flag is checked again
with md->pushback_lock held when the bio is really queued to md->pushback
as described above. So even if the flag is cleared after the lockless
checkings, the bio isn't left in md->pushback but returned to applications
with -EIO.
Other notes on the current patch
--------------------------------
- md->pushback is added to the struct mapped_device instead of using
md->deferred directly because md->io_lock which protects md->deferred is
rw_semaphore and can't be used in interrupt context like dec_pending(),
and md->io_lock protects the DMF_BLOCK_IO flag of md->flags too.
- Don't issue lock_fs() in dm_suspend() if the DM_NOFLUSH_FLAG
ioctl option is specified, because I/Os generated by lock_fs() would be
pushed back and never return if there were no valid devices.
- If an error occurs in dm_suspend() after the DMF_NOFLUSH_SUSPENDING
flag is set, md->pushback must be flushed because I/Os may be queued to
the list already. (flush_and_out label in dm_suspend())
Test results
------------
I have tested using multipath target with the next patch.
The following tests are for regression/compatibility:
- I/Os succeed when valid paths exist;
- I/Os fail when there are no valid paths and queue_if_no_path is not
set;
- I/Os are queued in the multipath target when there are no valid paths and
queue_if_no_path is set;
- The queued I/Os above fail when suspend is issued without the
DM_NOFLUSH_FLAG ioctl option. I/Os spanning 2 multipath targets also
fail.
The following tests are for the normal code path of new pushback feature:
- Queued I/Os in the multipath target are flushed from the target
but don't return when suspend is issued with the DM_NOFLUSH_FLAG
ioctl option;
- The I/Os above are queued in the multipath target again when
resume is issued without path recovery;
- The I/Os above succeed when resume is issued after path recovery
or table load;
- Queued I/Os in the multipath target succeed when resume is issued
with the DM_NOFLUSH_FLAG ioctl option after table load. I/Os
spanning 2 multipath targets also succeed.
The following tests are for the error paths of the new pushback feature:
- When the bdget_disk() fails in dm_suspend(), the
DMF_NOFLUSH_SUSPENDING flag is cleared and I/Os already queued to the
pushback list are flushed properly.
- When suspend with the DM_NOFLUSH_FLAG ioctl option is interrupted,
o I/Os which had already been queued to the pushback list
at the time don't return, and are re-issued at resume time;
o I/Os which hadn't been returned at the time return with EIO.
Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com>
Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
Cc: dm-devel@redhat.com
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-12-08 18:41:09 +08:00
|
|
|
|
2016-08-06 05:35:16 +08:00
|
|
|
if ((bio->bi_opf & REQ_PREFLUSH) && bio->bi_iter.bi_size) {
|
2009-04-09 07:27:16 +08:00
|
|
|
/*
|
2010-09-09 00:07:00 +08:00
|
|
|
* Preflush done for flush with data, reissue
|
2016-06-06 03:32:25 +08:00
|
|
|
* without REQ_PREFLUSH.
|
2009-04-09 07:27:16 +08:00
|
|
|
*/
|
2016-08-06 05:35:16 +08:00
|
|
|
bio->bi_opf &= ~REQ_PREFLUSH;
|
2010-09-09 00:07:00 +08:00
|
|
|
queue_io(md, bio);
|
2009-04-09 07:27:16 +08:00
|
|
|
} else {
|
2010-09-09 00:07:01 +08:00
|
|
|
/* done with normal IO or empty flush */
|
2018-02-15 17:00:15 +08:00
|
|
|
if (io_error)
|
|
|
|
bio->bi_status = io_error;
|
2015-07-20 21:29:37 +08:00
|
|
|
bio_endio(bio);
|
2009-03-17 01:44:36 +08:00
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
dm: disable DISCARD if the underlying storage no longer supports it
Storage devices which report supporting discard commands like
WRITE_SAME_16 with unmap, but reject discard commands sent to the
storage device. This is a clear storage firmware bug but it doesn't
change the fact that should a program cause discards to be sent to a
multipath device layered on this buggy storage, all paths can end up
failed at the same time from the discards, causing possible I/O loss.
The first discard to a path will fail with Illegal Request, Invalid
field in cdb, e.g.:
kernel: sd 8:0:8:19: [sdfn] tag#0 FAILED Result: hostbyte=DID_OK driverbyte=DRIVER_SENSE
kernel: sd 8:0:8:19: [sdfn] tag#0 Sense Key : Illegal Request [current]
kernel: sd 8:0:8:19: [sdfn] tag#0 Add. Sense: Invalid field in cdb
kernel: sd 8:0:8:19: [sdfn] tag#0 CDB: Write same(16) 93 08 00 00 00 00 00 a0 08 00 00 00 80 00 00 00
kernel: blk_update_request: critical target error, dev sdfn, sector 10487808
The SCSI layer converts this to the BLK_STS_TARGET error number, the sd
device disables its support for discard on this path, and because of the
BLK_STS_TARGET error multipath fails the discard without failing any
path or retrying down a different path. But subsequent discards can
cause path failures. Any discards sent to the path which already failed
a discard ends up failing with EIO from blk_cloned_rq_check_limits with
an "over max size limit" error since the discard limit was set to 0 by
the sd driver for the path. As the error is EIO, this now fails the
path and multipath tries to send the discard down the next path. This
cycle continues as discards are sent until all paths fail.
Fix this by training DM core to disable DISCARD if the underlying
storage already did so.
Also, fix branching in dm_done() and clone_endio() to reflect the
mutually exclussive nature of the IO operations in question.
Cc: stable@vger.kernel.org
Reported-by: David Jeffery <djeffery@redhat.com>
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
2019-04-04 00:23:11 +08:00
|
|
|
void disable_discard(struct mapped_device *md)
|
|
|
|
{
|
|
|
|
struct queue_limits *limits = dm_get_queue_limits(md);
|
|
|
|
|
|
|
|
/* device doesn't really support DISCARD, disable it */
|
|
|
|
limits->max_discard_sectors = 0;
|
|
|
|
blk_queue_flag_clear(QUEUE_FLAG_DISCARD, md->queue);
|
|
|
|
}
|
|
|
|
|
2016-05-13 04:28:10 +08:00
|
|
|
void disable_write_same(struct mapped_device *md)
|
2014-06-03 03:50:06 +08:00
|
|
|
{
|
|
|
|
struct queue_limits *limits = dm_get_queue_limits(md);
|
|
|
|
|
|
|
|
/* device doesn't really support WRITE SAME, disable it */
|
|
|
|
limits->max_write_same_sectors = 0;
|
|
|
|
}
|
|
|
|
|
2017-04-06 01:21:05 +08:00
|
|
|
void disable_write_zeroes(struct mapped_device *md)
|
|
|
|
{
|
|
|
|
struct queue_limits *limits = dm_get_queue_limits(md);
|
|
|
|
|
|
|
|
/* device doesn't really support WRITE ZEROES, disable it */
|
|
|
|
limits->max_write_zeroes_sectors = 0;
|
|
|
|
}
|
|
|
|
|
2024-06-11 20:26:44 +08:00
|
|
|
static bool swap_bios_limit(struct dm_target *ti, struct bio *bio)
|
|
|
|
{
|
|
|
|
return unlikely((bio->bi_opf & REQ_SWAP) != 0) && unlikely(ti->limit_swap_bios);
|
|
|
|
}
|
|
|
|
|
2015-07-20 21:29:37 +08:00
|
|
|
static void clone_endio(struct bio *bio)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2017-06-03 15:38:06 +08:00
|
|
|
blk_status_t error = bio->bi_status;
|
2014-03-05 07:24:49 +08:00
|
|
|
struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
|
2009-03-17 01:44:36 +08:00
|
|
|
struct dm_io *io = tio->io;
|
2006-10-03 16:15:41 +08:00
|
|
|
struct mapped_device *md = tio->io->md;
|
2005-04-17 06:20:36 +08:00
|
|
|
dm_endio_fn endio = tio->ti->type->end_io;
|
|
|
|
|
2024-06-12 13:13:20 +08:00
|
|
|
if (unlikely(error == BLK_STS_TARGET)) {
|
dm: disable DISCARD if the underlying storage no longer supports it
Storage devices which report supporting discard commands like
WRITE_SAME_16 with unmap, but reject discard commands sent to the
storage device. This is a clear storage firmware bug but it doesn't
change the fact that should a program cause discards to be sent to a
multipath device layered on this buggy storage, all paths can end up
failed at the same time from the discards, causing possible I/O loss.
The first discard to a path will fail with Illegal Request, Invalid
field in cdb, e.g.:
kernel: sd 8:0:8:19: [sdfn] tag#0 FAILED Result: hostbyte=DID_OK driverbyte=DRIVER_SENSE
kernel: sd 8:0:8:19: [sdfn] tag#0 Sense Key : Illegal Request [current]
kernel: sd 8:0:8:19: [sdfn] tag#0 Add. Sense: Invalid field in cdb
kernel: sd 8:0:8:19: [sdfn] tag#0 CDB: Write same(16) 93 08 00 00 00 00 00 a0 08 00 00 00 80 00 00 00
kernel: blk_update_request: critical target error, dev sdfn, sector 10487808
The SCSI layer converts this to the BLK_STS_TARGET error number, the sd
device disables its support for discard on this path, and because of the
BLK_STS_TARGET error multipath fails the discard without failing any
path or retrying down a different path. But subsequent discards can
cause path failures. Any discards sent to the path which already failed
a discard ends up failing with EIO from blk_cloned_rq_check_limits with
an "over max size limit" error since the discard limit was set to 0 by
the sd driver for the path. As the error is EIO, this now fails the
path and multipath tries to send the discard down the next path. This
cycle continues as discards are sent until all paths fail.
Fix this by training DM core to disable DISCARD if the underlying
storage already did so.
Also, fix branching in dm_done() and clone_endio() to reflect the
mutually exclussive nature of the IO operations in question.
Cc: stable@vger.kernel.org
Reported-by: David Jeffery <djeffery@redhat.com>
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
2019-04-04 00:23:11 +08:00
|
|
|
if (bio_op(bio) == REQ_OP_DISCARD &&
|
|
|
|
!bio->bi_disk->queue->limits.max_discard_sectors)
|
|
|
|
disable_discard(md);
|
|
|
|
else if (bio_op(bio) == REQ_OP_WRITE_SAME &&
|
|
|
|
!bio->bi_disk->queue->limits.max_write_same_sectors)
|
2017-04-06 01:21:05 +08:00
|
|
|
disable_write_same(md);
|
dm: disable DISCARD if the underlying storage no longer supports it
Storage devices which report supporting discard commands like
WRITE_SAME_16 with unmap, but reject discard commands sent to the
storage device. This is a clear storage firmware bug but it doesn't
change the fact that should a program cause discards to be sent to a
multipath device layered on this buggy storage, all paths can end up
failed at the same time from the discards, causing possible I/O loss.
The first discard to a path will fail with Illegal Request, Invalid
field in cdb, e.g.:
kernel: sd 8:0:8:19: [sdfn] tag#0 FAILED Result: hostbyte=DID_OK driverbyte=DRIVER_SENSE
kernel: sd 8:0:8:19: [sdfn] tag#0 Sense Key : Illegal Request [current]
kernel: sd 8:0:8:19: [sdfn] tag#0 Add. Sense: Invalid field in cdb
kernel: sd 8:0:8:19: [sdfn] tag#0 CDB: Write same(16) 93 08 00 00 00 00 00 a0 08 00 00 00 80 00 00 00
kernel: blk_update_request: critical target error, dev sdfn, sector 10487808
The SCSI layer converts this to the BLK_STS_TARGET error number, the sd
device disables its support for discard on this path, and because of the
BLK_STS_TARGET error multipath fails the discard without failing any
path or retrying down a different path. But subsequent discards can
cause path failures. Any discards sent to the path which already failed
a discard ends up failing with EIO from blk_cloned_rq_check_limits with
an "over max size limit" error since the discard limit was set to 0 by
the sd driver for the path. As the error is EIO, this now fails the
path and multipath tries to send the discard down the next path. This
cycle continues as discards are sent until all paths fail.
Fix this by training DM core to disable DISCARD if the underlying
storage already did so.
Also, fix branching in dm_done() and clone_endio() to reflect the
mutually exclussive nature of the IO operations in question.
Cc: stable@vger.kernel.org
Reported-by: David Jeffery <djeffery@redhat.com>
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
2019-04-04 00:23:11 +08:00
|
|
|
else if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
|
|
|
|
!bio->bi_disk->queue->limits.max_write_zeroes_sectors)
|
2017-04-06 01:21:05 +08:00
|
|
|
disable_write_zeroes(md);
|
|
|
|
}
|
2014-06-03 03:50:06 +08:00
|
|
|
|
2017-06-03 15:38:03 +08:00
|
|
|
if (endio) {
|
2017-06-03 15:38:06 +08:00
|
|
|
int r = endio(tio->ti, bio, &error);
|
2017-06-03 15:38:03 +08:00
|
|
|
switch (r) {
|
|
|
|
case DM_ENDIO_REQUEUE:
|
2017-06-03 15:38:06 +08:00
|
|
|
error = BLK_STS_DM_REQUEUE;
|
2017-06-03 15:38:03 +08:00
|
|
|
/*FALLTHRU*/
|
|
|
|
case DM_ENDIO_DONE:
|
|
|
|
break;
|
|
|
|
case DM_ENDIO_INCOMPLETE:
|
|
|
|
/* The target will handle the io */
|
|
|
|
return;
|
|
|
|
default:
|
|
|
|
DMWARN("unimplemented target endio return value: %d", r);
|
|
|
|
BUG();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-06-11 20:26:44 +08:00
|
|
|
if (unlikely(swap_bios_limit(tio->ti, bio))) {
|
|
|
|
struct mapped_device *md = io->md;
|
|
|
|
up(&md->swap_bios_semaphore);
|
|
|
|
}
|
|
|
|
|
2016-04-12 00:05:38 +08:00
|
|
|
free_tio(tio);
|
2009-03-17 01:44:36 +08:00
|
|
|
dec_pending(io, error);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2010-08-12 11:14:10 +08:00
|
|
|
/*
|
|
|
|
* Return maximum size of I/O possible at the supplied sector up to the current
|
|
|
|
* target boundary.
|
|
|
|
*/
|
|
|
|
static sector_t max_io_len_target_boundary(sector_t sector, struct dm_target *ti)
|
|
|
|
{
|
|
|
|
sector_t target_offset = dm_target_offset(ti, sector);
|
|
|
|
|
|
|
|
return ti->len - target_offset;
|
|
|
|
}
|
|
|
|
|
|
|
|
static sector_t max_io_len(sector_t sector, struct dm_target *ti)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2010-08-12 11:14:10 +08:00
|
|
|
sector_t len = max_io_len_target_boundary(sector, ti);
|
2012-07-27 22:08:00 +08:00
|
|
|
sector_t offset, max_len;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/*
|
2012-07-27 22:08:00 +08:00
|
|
|
* Does the target need to split even further?
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2012-07-27 22:08:00 +08:00
|
|
|
if (ti->max_io_len) {
|
|
|
|
offset = dm_target_offset(ti, sector);
|
|
|
|
if (unlikely(ti->max_io_len & (ti->max_io_len - 1)))
|
|
|
|
max_len = sector_div(offset, ti->max_io_len);
|
|
|
|
else
|
|
|
|
max_len = offset & (ti->max_io_len - 1);
|
|
|
|
max_len = ti->max_io_len - max_len;
|
|
|
|
|
|
|
|
if (len > max_len)
|
|
|
|
len = max_len;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return len;
|
|
|
|
}
|
|
|
|
|
2012-07-27 22:08:00 +08:00
|
|
|
int dm_set_target_max_io_len(struct dm_target *ti, sector_t len)
|
|
|
|
{
|
|
|
|
if (len > UINT_MAX) {
|
|
|
|
DMERR("Specified maximum size of target IO (%llu) exceeds limit (%u)",
|
|
|
|
(unsigned long long)len, UINT_MAX);
|
|
|
|
ti->error = "Maximum size of target IO is too large";
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2019-03-22 04:46:12 +08:00
|
|
|
ti->max_io_len = (uint32_t) len;
|
2012-07-27 22:08:00 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(dm_set_target_max_io_len);
|
|
|
|
|
2017-04-13 03:35:44 +08:00
|
|
|
static struct dm_target *dm_dax_get_live_target(struct mapped_device *md,
|
2018-05-01 04:06:28 +08:00
|
|
|
sector_t sector, int *srcu_idx)
|
|
|
|
__acquires(md->io_barrier)
|
2016-06-23 07:54:53 +08:00
|
|
|
{
|
|
|
|
struct dm_table *map;
|
|
|
|
struct dm_target *ti;
|
|
|
|
|
2017-04-13 03:35:44 +08:00
|
|
|
map = dm_get_live_table(md, srcu_idx);
|
2016-06-23 07:54:53 +08:00
|
|
|
if (!map)
|
2017-04-13 03:35:44 +08:00
|
|
|
return NULL;
|
2016-06-23 07:54:53 +08:00
|
|
|
|
|
|
|
ti = dm_table_find_target(map, sector);
|
2019-08-23 21:55:26 +08:00
|
|
|
if (!ti)
|
2017-04-13 03:35:44 +08:00
|
|
|
return NULL;
|
2016-06-23 07:54:53 +08:00
|
|
|
|
2017-04-13 03:35:44 +08:00
|
|
|
return ti;
|
|
|
|
}
|
2016-06-23 07:54:53 +08:00
|
|
|
|
2017-04-13 03:35:44 +08:00
|
|
|
static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
|
2018-05-01 04:06:28 +08:00
|
|
|
long nr_pages, void **kaddr, pfn_t *pfn)
|
2017-04-13 03:35:44 +08:00
|
|
|
{
|
|
|
|
struct mapped_device *md = dax_get_private(dax_dev);
|
|
|
|
sector_t sector = pgoff * PAGE_SECTORS;
|
|
|
|
struct dm_target *ti;
|
|
|
|
long len, ret = -EIO;
|
|
|
|
int srcu_idx;
|
2016-06-23 07:54:53 +08:00
|
|
|
|
2017-04-13 03:35:44 +08:00
|
|
|
ti = dm_dax_get_live_target(md, sector, &srcu_idx);
|
2016-06-23 07:54:53 +08:00
|
|
|
|
2017-04-13 03:35:44 +08:00
|
|
|
if (!ti)
|
|
|
|
goto out;
|
|
|
|
if (!ti->type->direct_access)
|
|
|
|
goto out;
|
|
|
|
len = max_io_len(sector, ti) / PAGE_SECTORS;
|
|
|
|
if (len < 1)
|
|
|
|
goto out;
|
|
|
|
nr_pages = min(len, nr_pages);
|
2018-06-27 06:30:41 +08:00
|
|
|
ret = ti->type->direct_access(ti, pgoff, nr_pages, kaddr, pfn);
|
2017-04-13 04:37:44 +08:00
|
|
|
|
2017-04-13 03:35:44 +08:00
|
|
|
out:
|
2016-06-23 07:54:53 +08:00
|
|
|
dm_put_live_table(md, srcu_idx);
|
2017-04-13 03:35:44 +08:00
|
|
|
|
|
|
|
return ret;
|
2016-06-23 07:54:53 +08:00
|
|
|
}
|
|
|
|
|
2019-05-17 04:26:29 +08:00
|
|
|
static bool dm_dax_supported(struct dax_device *dax_dev, struct block_device *bdev,
|
|
|
|
int blocksize, sector_t start, sector_t len)
|
|
|
|
{
|
|
|
|
struct mapped_device *md = dax_get_private(dax_dev);
|
|
|
|
struct dm_table *map;
|
2024-06-11 20:26:44 +08:00
|
|
|
bool ret = false;
|
2019-05-17 04:26:29 +08:00
|
|
|
int srcu_idx;
|
|
|
|
|
|
|
|
map = dm_get_live_table(md, &srcu_idx);
|
|
|
|
if (!map)
|
2024-06-11 20:26:44 +08:00
|
|
|
goto out;
|
2019-05-17 04:26:29 +08:00
|
|
|
|
2024-06-11 20:26:44 +08:00
|
|
|
ret = dm_table_supports_dax(map, device_not_dax_capable, &blocksize);
|
2019-05-17 04:26:29 +08:00
|
|
|
|
2024-06-11 20:26:44 +08:00
|
|
|
out:
|
2019-05-17 04:26:29 +08:00
|
|
|
dm_put_live_table(md, srcu_idx);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2017-05-30 03:57:56 +08:00
|
|
|
static size_t dm_dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff,
|
2018-05-01 04:06:28 +08:00
|
|
|
void *addr, size_t bytes, struct iov_iter *i)
|
2017-05-30 03:57:56 +08:00
|
|
|
{
|
|
|
|
struct mapped_device *md = dax_get_private(dax_dev);
|
|
|
|
sector_t sector = pgoff * PAGE_SECTORS;
|
|
|
|
struct dm_target *ti;
|
|
|
|
long ret = 0;
|
|
|
|
int srcu_idx;
|
|
|
|
|
|
|
|
ti = dm_dax_get_live_target(md, sector, &srcu_idx);
|
|
|
|
|
|
|
|
if (!ti)
|
|
|
|
goto out;
|
|
|
|
if (!ti->type->dax_copy_from_iter) {
|
|
|
|
ret = copy_from_iter(addr, bytes, i);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
ret = ti->type->dax_copy_from_iter(ti, pgoff, addr, bytes, i);
|
|
|
|
out:
|
|
|
|
dm_put_live_table(md, srcu_idx);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2018-05-02 21:46:33 +08:00
|
|
|
static size_t dm_dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff,
|
|
|
|
void *addr, size_t bytes, struct iov_iter *i)
|
|
|
|
{
|
|
|
|
struct mapped_device *md = dax_get_private(dax_dev);
|
|
|
|
sector_t sector = pgoff * PAGE_SECTORS;
|
|
|
|
struct dm_target *ti;
|
|
|
|
long ret = 0;
|
|
|
|
int srcu_idx;
|
|
|
|
|
|
|
|
ti = dm_dax_get_live_target(md, sector, &srcu_idx);
|
|
|
|
|
|
|
|
if (!ti)
|
|
|
|
goto out;
|
|
|
|
if (!ti->type->dax_copy_to_iter) {
|
|
|
|
ret = copy_to_iter(addr, bytes, i);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
ret = ti->type->dax_copy_to_iter(ti, pgoff, addr, bytes, i);
|
|
|
|
out:
|
|
|
|
dm_put_live_table(md, srcu_idx);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2014-03-15 06:41:24 +08:00
|
|
|
/*
|
|
|
|
* A target may call dm_accept_partial_bio only from the map routine. It is
|
2017-11-21 21:44:35 +08:00
|
|
|
* allowed for all bio types except REQ_PREFLUSH and REQ_OP_ZONE_RESET.
|
2014-03-15 06:41:24 +08:00
|
|
|
*
|
|
|
|
* dm_accept_partial_bio informs the dm that the target only wants to process
|
|
|
|
* additional n_sectors sectors of the bio and the rest of the data should be
|
|
|
|
* sent in a next bio.
|
|
|
|
*
|
|
|
|
* A diagram that explains the arithmetics:
|
|
|
|
* +--------------------+---------------+-------+
|
|
|
|
* | 1 | 2 | 3 |
|
|
|
|
* +--------------------+---------------+-------+
|
|
|
|
*
|
|
|
|
* <-------------- *tio->len_ptr --------------->
|
|
|
|
* <------- bi_size ------->
|
|
|
|
* <-- n_sectors -->
|
|
|
|
*
|
|
|
|
* Region 1 was already iterated over with bio_advance or similar function.
|
|
|
|
* (it may be empty if the target doesn't use bio_advance)
|
|
|
|
* Region 2 is the remaining bio size that the target wants to process.
|
|
|
|
* (it may be empty if region 1 is non-empty, although there is no reason
|
|
|
|
* to make it empty)
|
|
|
|
* The target requires that region 3 is to be sent in the next bio.
|
|
|
|
*
|
|
|
|
* If the target wants to receive multiple copies of the bio (via num_*bios, etc),
|
|
|
|
* the partially processed part (the sum of regions 1+2) must be the same for all
|
|
|
|
* copies of the bio.
|
|
|
|
*/
|
|
|
|
void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors)
|
|
|
|
{
|
|
|
|
struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
|
|
|
|
unsigned bi_size = bio->bi_iter.bi_size >> SECTOR_SHIFT;
|
2016-08-06 05:35:16 +08:00
|
|
|
BUG_ON(bio->bi_opf & REQ_PREFLUSH);
|
2014-03-15 06:41:24 +08:00
|
|
|
BUG_ON(bi_size > *tio->len_ptr);
|
|
|
|
BUG_ON(n_sectors > bi_size);
|
|
|
|
*tio->len_ptr -= bi_size - n_sectors;
|
|
|
|
bio->bi_iter.bi_size = n_sectors << SECTOR_SHIFT;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(dm_accept_partial_bio);
|
|
|
|
|
2017-05-09 07:40:48 +08:00
|
|
|
/*
|
2018-10-12 18:08:49 +08:00
|
|
|
* The zone descriptors obtained with a zone report indicate
|
|
|
|
* zone positions within the underlying device of the target. The zone
|
|
|
|
* descriptors must be remapped to match their position within the dm device.
|
|
|
|
* The caller target should obtain the zones information using
|
|
|
|
* blkdev_report_zones() to ensure that remapping for partition offset is
|
|
|
|
* already handled.
|
2017-05-09 07:40:48 +08:00
|
|
|
*/
|
2018-10-12 18:08:49 +08:00
|
|
|
void dm_remap_zone_report(struct dm_target *ti, sector_t start,
|
|
|
|
struct blk_zone *zones, unsigned int *nr_zones)
|
2017-05-09 07:40:48 +08:00
|
|
|
{
|
|
|
|
#ifdef CONFIG_BLK_DEV_ZONED
|
|
|
|
struct blk_zone *zone;
|
2018-10-12 18:08:49 +08:00
|
|
|
unsigned int nrz = *nr_zones;
|
|
|
|
int i;
|
2018-10-09 13:24:31 +08:00
|
|
|
|
2017-05-09 07:40:48 +08:00
|
|
|
/*
|
2018-10-12 18:08:49 +08:00
|
|
|
* Remap the start sector and write pointer position of the zones in
|
|
|
|
* the array. Since we may have obtained from the target underlying
|
|
|
|
* device more zones that the target size, also adjust the number
|
|
|
|
* of zones.
|
2017-05-09 07:40:48 +08:00
|
|
|
*/
|
2018-10-12 18:08:49 +08:00
|
|
|
for (i = 0; i < nrz; i++) {
|
|
|
|
zone = zones + i;
|
|
|
|
if (zone->start >= start + ti->len) {
|
|
|
|
memset(zone, 0, sizeof(struct blk_zone) * (nrz - i));
|
|
|
|
break;
|
2017-05-09 07:40:48 +08:00
|
|
|
}
|
|
|
|
|
2018-10-12 18:08:49 +08:00
|
|
|
zone->start = zone->start + ti->begin - start;
|
|
|
|
if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
|
|
|
|
continue;
|
2017-05-09 07:40:48 +08:00
|
|
|
|
2018-10-12 18:08:49 +08:00
|
|
|
if (zone->cond == BLK_ZONE_COND_FULL)
|
|
|
|
zone->wp = zone->start + zone->len;
|
|
|
|
else if (zone->cond == BLK_ZONE_COND_EMPTY)
|
|
|
|
zone->wp = zone->start;
|
|
|
|
else
|
|
|
|
zone->wp = zone->wp + ti->begin - start;
|
2017-05-09 07:40:48 +08:00
|
|
|
}
|
|
|
|
|
2018-10-12 18:08:49 +08:00
|
|
|
*nr_zones = i;
|
2017-05-09 07:40:48 +08:00
|
|
|
#else /* !CONFIG_BLK_DEV_ZONED */
|
2018-10-12 18:08:49 +08:00
|
|
|
*nr_zones = 0;
|
2017-05-09 07:40:48 +08:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(dm_remap_zone_report);
|
|
|
|
|
2024-06-11 20:26:44 +08:00
|
|
|
static noinline void __set_swap_bios_limit(struct mapped_device *md, int latch)
|
|
|
|
{
|
|
|
|
mutex_lock(&md->swap_bios_lock);
|
|
|
|
while (latch < md->swap_bios) {
|
|
|
|
cond_resched();
|
|
|
|
down(&md->swap_bios_semaphore);
|
|
|
|
md->swap_bios--;
|
|
|
|
}
|
|
|
|
while (latch > md->swap_bios) {
|
|
|
|
cond_resched();
|
|
|
|
up(&md->swap_bios_semaphore);
|
|
|
|
md->swap_bios++;
|
|
|
|
}
|
|
|
|
mutex_unlock(&md->swap_bios_lock);
|
|
|
|
}
|
|
|
|
|
2017-12-10 04:16:42 +08:00
|
|
|
static blk_qc_t __map_bio(struct dm_target_io *tio)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
int r;
|
2006-03-24 03:00:26 +08:00
|
|
|
sector_t sector;
|
2012-10-13 04:02:15 +08:00
|
|
|
struct bio *clone = &tio->clone;
|
2017-12-12 12:17:47 +08:00
|
|
|
struct dm_io *io = tio->io;
|
2017-12-10 04:16:42 +08:00
|
|
|
struct mapped_device *md = io->md;
|
2013-03-02 06:45:46 +08:00
|
|
|
struct dm_target *ti = tio->ti;
|
2017-12-10 04:16:42 +08:00
|
|
|
blk_qc_t ret = BLK_QC_T_NONE;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
clone->bi_end_io = clone_endio;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Map the clone. If r == 0 we don't need to do
|
|
|
|
* anything, the target has assumed ownership of
|
|
|
|
* this io.
|
|
|
|
*/
|
2017-12-12 12:17:47 +08:00
|
|
|
atomic_inc(&io->io_count);
|
2013-10-12 06:44:27 +08:00
|
|
|
sector = clone->bi_iter.bi_sector;
|
2017-02-16 00:26:10 +08:00
|
|
|
|
2024-06-11 20:26:44 +08:00
|
|
|
if (unlikely(swap_bios_limit(ti, clone))) {
|
|
|
|
int latch = get_swap_bios();
|
|
|
|
if (unlikely(latch != md->swap_bios))
|
|
|
|
__set_swap_bios_limit(md, latch);
|
|
|
|
down(&md->swap_bios_semaphore);
|
|
|
|
}
|
|
|
|
|
2012-12-22 04:23:41 +08:00
|
|
|
r = ti->type->map(ti, clone);
|
2017-06-03 15:38:02 +08:00
|
|
|
switch (r) {
|
|
|
|
case DM_MAPIO_SUBMITTED:
|
|
|
|
break;
|
|
|
|
case DM_MAPIO_REMAPPED:
|
2005-04-17 06:20:36 +08:00
|
|
|
/* the bio has been remapped so dispatch it */
|
2017-08-24 01:10:32 +08:00
|
|
|
trace_block_bio_remap(clone->bi_disk->queue, clone,
|
2017-12-12 12:17:47 +08:00
|
|
|
bio_dev(io->orig_bio), sector);
|
2024-06-12 13:13:20 +08:00
|
|
|
ret = generic_make_request(clone);
|
2017-06-03 15:38:02 +08:00
|
|
|
break;
|
|
|
|
case DM_MAPIO_KILL:
|
2024-06-12 13:13:20 +08:00
|
|
|
if (unlikely(swap_bios_limit(ti, clone)))
|
2024-06-11 20:26:44 +08:00
|
|
|
up(&md->swap_bios_semaphore);
|
2017-06-03 15:38:06 +08:00
|
|
|
free_tio(tio);
|
2017-12-12 12:17:47 +08:00
|
|
|
dec_pending(io, BLK_STS_IOERR);
|
2017-06-03 15:38:06 +08:00
|
|
|
break;
|
2017-06-03 15:38:02 +08:00
|
|
|
case DM_MAPIO_REQUEUE:
|
2024-06-12 13:13:20 +08:00
|
|
|
if (unlikely(swap_bios_limit(ti, clone)))
|
2024-06-11 20:26:44 +08:00
|
|
|
up(&md->swap_bios_semaphore);
|
2016-04-12 00:05:38 +08:00
|
|
|
free_tio(tio);
|
2017-12-12 12:17:47 +08:00
|
|
|
dec_pending(io, BLK_STS_DM_REQUEUE);
|
2017-06-03 15:38:02 +08:00
|
|
|
break;
|
|
|
|
default:
|
2006-12-08 18:41:05 +08:00
|
|
|
DMWARN("unimplemented target map return value: %d", r);
|
|
|
|
BUG();
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2017-12-10 04:16:42 +08:00
|
|
|
return ret;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2014-03-15 06:40:39 +08:00
|
|
|
static void bio_setup_sector(struct bio *bio, sector_t sector, unsigned len)
|
2013-03-02 06:45:46 +08:00
|
|
|
{
|
2013-10-12 06:44:27 +08:00
|
|
|
bio->bi_iter.bi_sector = sector;
|
|
|
|
bio->bi_iter.bi_size = to_bytes(len);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Creates a bio that consists of range of complete bvecs.
|
|
|
|
*/
|
2016-03-03 01:33:03 +08:00
|
|
|
static int clone_bio(struct dm_target_io *tio, struct bio *bio,
|
|
|
|
sector_t sector, unsigned len)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2012-10-13 04:02:15 +08:00
|
|
|
struct bio *clone = &tio->clone;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2013-10-30 08:17:49 +08:00
|
|
|
__bio_clone_fast(clone, bio);
|
|
|
|
|
2019-01-17 07:53:26 +08:00
|
|
|
if (bio_integrity(bio)) {
|
2017-04-19 04:51:48 +08:00
|
|
|
int r;
|
|
|
|
|
|
|
|
if (unlikely(!dm_target_has_integrity(tio->ti->type) &&
|
|
|
|
!dm_target_passes_integrity(tio->ti->type))) {
|
|
|
|
DMWARN("%s: the target %s doesn't support integrity data.",
|
|
|
|
dm_device_name(tio->io->md),
|
|
|
|
tio->ti->type->name);
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
r = bio_integrity_clone(clone, bio, GFP_NOIO);
|
2016-03-03 01:33:03 +08:00
|
|
|
if (r < 0)
|
|
|
|
return r;
|
|
|
|
}
|
2013-03-02 06:45:46 +08:00
|
|
|
|
2019-02-06 06:07:58 +08:00
|
|
|
bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector));
|
|
|
|
clone->bi_iter.bi_size = to_bytes(len);
|
|
|
|
|
|
|
|
if (bio_integrity(bio))
|
|
|
|
bio_integrity_trim(clone);
|
2016-03-03 01:33:03 +08:00
|
|
|
|
|
|
|
return 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2017-11-23 03:56:12 +08:00
|
|
|
static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci,
|
|
|
|
struct dm_target *ti, unsigned num_bios)
|
2009-06-22 17:12:20 +08:00
|
|
|
{
|
2012-10-13 04:02:15 +08:00
|
|
|
struct dm_target_io *tio;
|
2017-11-23 03:56:12 +08:00
|
|
|
int try;
|
2012-10-13 04:02:15 +08:00
|
|
|
|
2017-11-23 03:56:12 +08:00
|
|
|
if (!num_bios)
|
|
|
|
return;
|
2009-06-22 17:12:20 +08:00
|
|
|
|
2017-11-23 03:56:12 +08:00
|
|
|
if (num_bios == 1) {
|
|
|
|
tio = alloc_tio(ci, ti, 0, GFP_NOIO);
|
|
|
|
bio_list_add(blist, &tio->clone);
|
|
|
|
return;
|
|
|
|
}
|
2009-06-22 17:12:21 +08:00
|
|
|
|
2017-11-23 03:56:12 +08:00
|
|
|
for (try = 0; try < 2; try++) {
|
|
|
|
int bio_nr;
|
|
|
|
struct bio *bio;
|
|
|
|
|
|
|
|
if (try)
|
2017-12-15 05:30:42 +08:00
|
|
|
mutex_lock(&ci->io->md->table_devices_lock);
|
2017-11-23 03:56:12 +08:00
|
|
|
for (bio_nr = 0; bio_nr < num_bios; bio_nr++) {
|
|
|
|
tio = alloc_tio(ci, ti, bio_nr, try ? GFP_NOIO : GFP_NOWAIT);
|
|
|
|
if (!tio)
|
|
|
|
break;
|
|
|
|
|
|
|
|
bio_list_add(blist, &tio->clone);
|
|
|
|
}
|
|
|
|
if (try)
|
2017-12-15 05:30:42 +08:00
|
|
|
mutex_unlock(&ci->io->md->table_devices_lock);
|
2017-11-23 03:56:12 +08:00
|
|
|
if (bio_nr == num_bios)
|
|
|
|
return;
|
|
|
|
|
|
|
|
while ((bio = bio_list_pop(blist))) {
|
|
|
|
tio = container_of(bio, struct dm_target_io, clone);
|
|
|
|
free_tio(tio);
|
|
|
|
}
|
|
|
|
}
|
2009-06-22 17:12:21 +08:00
|
|
|
}
|
|
|
|
|
2017-12-10 04:16:42 +08:00
|
|
|
static blk_qc_t __clone_and_map_simple_bio(struct clone_info *ci,
|
|
|
|
struct dm_target_io *tio, unsigned *len)
|
2009-06-22 17:12:21 +08:00
|
|
|
{
|
2012-10-13 04:02:15 +08:00
|
|
|
struct bio *clone = &tio->clone;
|
2009-06-22 17:12:21 +08:00
|
|
|
|
2014-03-15 06:41:24 +08:00
|
|
|
tio->len_ptr = len;
|
|
|
|
|
2014-10-03 19:55:16 +08:00
|
|
|
__bio_clone_fast(clone, ci->bio);
|
2013-03-02 06:45:46 +08:00
|
|
|
if (len)
|
2014-03-15 06:41:24 +08:00
|
|
|
bio_setup_sector(clone, ci->sector, *len);
|
2009-06-22 17:12:20 +08:00
|
|
|
|
2017-12-10 04:16:42 +08:00
|
|
|
return __map_bio(tio);
|
2009-06-22 17:12:20 +08:00
|
|
|
}
|
|
|
|
|
2013-03-02 06:45:47 +08:00
|
|
|
static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti,
|
2014-03-15 06:41:24 +08:00
|
|
|
unsigned num_bios, unsigned *len)
|
2010-08-12 11:14:09 +08:00
|
|
|
{
|
2017-11-23 03:56:12 +08:00
|
|
|
struct bio_list blist = BIO_EMPTY_LIST;
|
|
|
|
struct bio *bio;
|
|
|
|
struct dm_target_io *tio;
|
|
|
|
|
|
|
|
alloc_multiple_bios(&blist, ci, ti, num_bios);
|
2010-08-12 11:14:09 +08:00
|
|
|
|
2017-11-23 03:56:12 +08:00
|
|
|
while ((bio = bio_list_pop(&blist))) {
|
|
|
|
tio = container_of(bio, struct dm_target_io, clone);
|
2017-12-10 04:16:42 +08:00
|
|
|
(void) __clone_and_map_simple_bio(ci, tio, len);
|
2017-11-23 03:56:12 +08:00
|
|
|
}
|
2010-08-12 11:14:09 +08:00
|
|
|
}
|
|
|
|
|
2013-03-02 06:45:47 +08:00
|
|
|
static int __send_empty_flush(struct clone_info *ci)
|
2009-06-22 17:12:20 +08:00
|
|
|
{
|
2010-08-12 11:14:09 +08:00
|
|
|
unsigned target_nr = 0;
|
2009-06-22 17:12:20 +08:00
|
|
|
struct dm_target *ti;
|
|
|
|
|
2018-12-06 01:10:30 +08:00
|
|
|
/*
|
dm: don't reuse bio for flushes
DM currently has a statically allocated bio that it uses to issue empty
flushes. It doesn't submit this bio, it just uses it for maintaining
state while setting up clones. Multiple users can access this bio at the
same time. This wasn't previously an issue, even if it was a bit iffy,
but with the blkg associations it can become one.
We setup the blkg association, then clone bio's and submit, then remove
the blkg assocation again. But since we can have multiple tasks doing
this at the same time, against multiple blkg's, then we can either lose
references to a blkg, or put it twice. The latter causes complaints on
the percpu ref being <= 0 when released, and can cause use-after-free as
well. Ming reports that xfstest generic/475 triggers this:
------------[ cut here ]------------
percpu ref (blkg_release) <= 0 (0) after switching to atomic
WARNING: CPU: 13 PID: 0 at lib/percpu-refcount.c:155 percpu_ref_switch_to_atomic_rcu+0x2c9/0x4a0
Switch to just using an on-stack bio for this, and get rid of the
embedded bio.
Fixes: 5cdf2e3fea5e ("blkcg: associate blkg when associating a device")
Reported-by: Ming Lei <ming.lei@redhat.com>
Tested-by: Ming Lei <ming.lei@redhat.com>
Reviewed-by: Mike Snitzer <snitzer@redhat.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2018-12-20 00:13:34 +08:00
|
|
|
* Empty flush uses a statically initialized bio, as the base for
|
|
|
|
* cloning. However, blkg association requires that a bdev is
|
|
|
|
* associated with a gendisk, which doesn't happen until the bdev is
|
|
|
|
* opened. So, blkg association is done at issue time of the flush
|
|
|
|
* rather than when the device is created in alloc_dev().
|
2018-12-06 01:10:30 +08:00
|
|
|
*/
|
|
|
|
bio_set_dev(ci->bio, ci->io->md->bdev);
|
|
|
|
|
2010-09-09 00:07:01 +08:00
|
|
|
BUG_ON(bio_has_data(ci->bio));
|
2009-06-22 17:12:20 +08:00
|
|
|
while ((ti = dm_table_get_target(ci->map, target_nr++)))
|
2014-03-15 06:41:24 +08:00
|
|
|
__send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL);
|
2009-06-22 17:12:20 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-03-03 01:33:03 +08:00
|
|
|
static int __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti,
|
2017-11-22 11:25:18 +08:00
|
|
|
sector_t sector, unsigned *len)
|
2010-08-12 11:14:08 +08:00
|
|
|
{
|
2012-10-13 04:02:15 +08:00
|
|
|
struct bio *bio = ci->bio;
|
2010-08-12 11:14:08 +08:00
|
|
|
struct dm_target_io *tio;
|
2017-11-22 11:25:18 +08:00
|
|
|
int r;
|
2010-08-12 11:14:08 +08:00
|
|
|
|
2017-11-23 03:56:12 +08:00
|
|
|
tio = alloc_tio(ci, ti, 0, GFP_NOIO);
|
2017-11-22 11:25:18 +08:00
|
|
|
tio->len_ptr = len;
|
|
|
|
r = clone_bio(tio, bio, sector, *len);
|
|
|
|
if (r < 0) {
|
|
|
|
free_tio(tio);
|
|
|
|
return r;
|
2013-03-02 06:45:49 +08:00
|
|
|
}
|
2017-12-10 04:16:42 +08:00
|
|
|
(void) __map_bio(tio);
|
2016-03-03 01:33:03 +08:00
|
|
|
|
2017-11-22 11:25:18 +08:00
|
|
|
return 0;
|
2010-08-12 11:14:08 +08:00
|
|
|
}
|
|
|
|
|
2013-03-02 06:45:47 +08:00
|
|
|
typedef unsigned (*get_num_bios_fn)(struct dm_target *ti);
|
2012-12-22 04:23:37 +08:00
|
|
|
|
2013-03-02 06:45:47 +08:00
|
|
|
static unsigned get_num_discard_bios(struct dm_target *ti)
|
2012-12-22 04:23:37 +08:00
|
|
|
{
|
2013-03-02 06:45:47 +08:00
|
|
|
return ti->num_discard_bios;
|
2012-12-22 04:23:37 +08:00
|
|
|
}
|
|
|
|
|
2018-03-13 17:23:45 +08:00
|
|
|
static unsigned get_num_secure_erase_bios(struct dm_target *ti)
|
|
|
|
{
|
|
|
|
return ti->num_secure_erase_bios;
|
|
|
|
}
|
|
|
|
|
2013-03-02 06:45:47 +08:00
|
|
|
static unsigned get_num_write_same_bios(struct dm_target *ti)
|
2012-12-22 04:23:37 +08:00
|
|
|
{
|
2013-03-02 06:45:47 +08:00
|
|
|
return ti->num_write_same_bios;
|
2012-12-22 04:23:37 +08:00
|
|
|
}
|
|
|
|
|
2017-04-06 01:21:05 +08:00
|
|
|
static unsigned get_num_write_zeroes_bios(struct dm_target *ti)
|
|
|
|
{
|
|
|
|
return ti->num_write_zeroes_bios;
|
|
|
|
}
|
|
|
|
|
2017-12-09 04:02:11 +08:00
|
|
|
static int __send_changing_extent_only(struct clone_info *ci, struct dm_target *ti,
|
2019-01-19 03:19:26 +08:00
|
|
|
unsigned num_bios)
|
2012-09-27 06:45:42 +08:00
|
|
|
{
|
2019-05-22 03:58:07 +08:00
|
|
|
unsigned len;
|
2012-09-27 06:45:42 +08:00
|
|
|
|
2017-12-09 04:02:11 +08:00
|
|
|
/*
|
|
|
|
* Even though the device advertised support for this type of
|
|
|
|
* request, that does not mean every target supports it, and
|
|
|
|
* reconfiguration might also have changed that since the
|
|
|
|
* check was performed.
|
|
|
|
*/
|
|
|
|
if (!num_bios)
|
|
|
|
return -EOPNOTSUPP;
|
2012-09-27 06:45:42 +08:00
|
|
|
|
2019-05-22 03:58:07 +08:00
|
|
|
len = min((sector_t)ci->sector_count, max_io_len_target_boundary(ci->sector, ti));
|
|
|
|
|
2017-12-09 04:02:11 +08:00
|
|
|
__send_duplicate_bios(ci, ti, num_bios, &len);
|
2015-06-10 05:22:49 +08:00
|
|
|
|
2017-12-09 04:02:11 +08:00
|
|
|
ci->sector += len;
|
|
|
|
ci->sector_count -= len;
|
2010-08-12 11:14:08 +08:00
|
|
|
|
|
|
|
return 0;
|
2012-09-27 06:45:42 +08:00
|
|
|
}
|
|
|
|
|
2017-12-09 04:02:11 +08:00
|
|
|
static int __send_discard(struct clone_info *ci, struct dm_target *ti)
|
2012-12-22 04:23:37 +08:00
|
|
|
{
|
2019-01-19 03:19:26 +08:00
|
|
|
return __send_changing_extent_only(ci, ti, get_num_discard_bios(ti));
|
2012-12-22 04:23:37 +08:00
|
|
|
}
|
2015-02-26 13:50:28 +08:00
|
|
|
|
2018-03-13 17:23:45 +08:00
|
|
|
static int __send_secure_erase(struct clone_info *ci, struct dm_target *ti)
|
|
|
|
{
|
2019-01-19 03:19:26 +08:00
|
|
|
return __send_changing_extent_only(ci, ti, get_num_secure_erase_bios(ti));
|
2018-03-13 17:23:45 +08:00
|
|
|
}
|
|
|
|
|
2017-12-09 04:02:11 +08:00
|
|
|
static int __send_write_same(struct clone_info *ci, struct dm_target *ti)
|
2015-02-26 13:50:28 +08:00
|
|
|
{
|
2019-01-19 03:19:26 +08:00
|
|
|
return __send_changing_extent_only(ci, ti, get_num_write_same_bios(ti));
|
2015-02-26 13:50:28 +08:00
|
|
|
}
|
|
|
|
|
2017-12-09 04:02:11 +08:00
|
|
|
static int __send_write_zeroes(struct clone_info *ci, struct dm_target *ti)
|
2017-04-06 01:21:05 +08:00
|
|
|
{
|
2019-01-19 03:19:26 +08:00
|
|
|
return __send_changing_extent_only(ci, ti, get_num_write_zeroes_bios(ti));
|
2017-04-06 01:21:05 +08:00
|
|
|
}
|
|
|
|
|
2019-01-19 03:10:37 +08:00
|
|
|
static bool is_abnormal_io(struct bio *bio)
|
|
|
|
{
|
|
|
|
bool r = false;
|
|
|
|
|
|
|
|
switch (bio_op(bio)) {
|
|
|
|
case REQ_OP_DISCARD:
|
|
|
|
case REQ_OP_SECURE_ERASE:
|
|
|
|
case REQ_OP_WRITE_SAME:
|
|
|
|
case REQ_OP_WRITE_ZEROES:
|
|
|
|
r = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2018-03-26 23:49:16 +08:00
|
|
|
static bool __process_abnormal_io(struct clone_info *ci, struct dm_target *ti,
|
|
|
|
int *result)
|
|
|
|
{
|
|
|
|
struct bio *bio = ci->bio;
|
|
|
|
|
|
|
|
if (bio_op(bio) == REQ_OP_DISCARD)
|
|
|
|
*result = __send_discard(ci, ti);
|
2018-03-13 17:23:45 +08:00
|
|
|
else if (bio_op(bio) == REQ_OP_SECURE_ERASE)
|
|
|
|
*result = __send_secure_erase(ci, ti);
|
2018-03-26 23:49:16 +08:00
|
|
|
else if (bio_op(bio) == REQ_OP_WRITE_SAME)
|
|
|
|
*result = __send_write_same(ci, ti);
|
|
|
|
else if (bio_op(bio) == REQ_OP_WRITE_ZEROES)
|
|
|
|
*result = __send_write_zeroes(ci, ti);
|
|
|
|
else
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2013-03-02 06:45:47 +08:00
|
|
|
/*
|
|
|
|
* Select the correct strategy for processing a non-flush bio.
|
|
|
|
*/
|
2013-03-02 06:45:47 +08:00
|
|
|
static int __split_and_process_non_flush(struct clone_info *ci)
|
2015-02-26 13:50:28 +08:00
|
|
|
{
|
2007-12-13 22:15:25 +08:00
|
|
|
struct dm_target *ti;
|
2013-10-30 08:17:49 +08:00
|
|
|
unsigned len;
|
2016-03-03 01:33:03 +08:00
|
|
|
int r;
|
2015-02-26 13:50:28 +08:00
|
|
|
|
2007-12-13 22:15:25 +08:00
|
|
|
ti = dm_table_find_target(ci->map, ci->sector);
|
2019-08-23 21:55:26 +08:00
|
|
|
if (!ti)
|
2007-12-13 22:15:25 +08:00
|
|
|
return -EIO;
|
|
|
|
|
2019-01-19 03:10:37 +08:00
|
|
|
if (__process_abnormal_io(ci, ti, &r))
|
2018-03-26 23:49:16 +08:00
|
|
|
return r;
|
2017-12-09 04:02:11 +08:00
|
|
|
|
2018-10-12 18:08:49 +08:00
|
|
|
len = min_t(sector_t, max_io_len(ci->sector, ti), ci->sector_count);
|
2015-02-26 13:50:28 +08:00
|
|
|
|
2016-03-03 01:33:03 +08:00
|
|
|
r = __clone_and_map_data_bio(ci, ti, ci->sector, &len);
|
|
|
|
if (r < 0)
|
|
|
|
return r;
|
2015-02-26 13:50:28 +08:00
|
|
|
|
2013-10-30 08:17:49 +08:00
|
|
|
ci->sector += len;
|
|
|
|
ci->sector_count -= len;
|
2015-02-26 13:50:28 +08:00
|
|
|
|
2013-10-30 08:17:49 +08:00
|
|
|
return 0;
|
2015-02-26 13:50:28 +08:00
|
|
|
}
|
|
|
|
|
2017-12-10 04:16:42 +08:00
|
|
|
static void init_clone_info(struct clone_info *ci, struct mapped_device *md,
|
|
|
|
struct dm_table *map, struct bio *bio)
|
|
|
|
{
|
|
|
|
ci->map = map;
|
|
|
|
ci->io = alloc_io(md, bio);
|
|
|
|
ci->sector = bio->bi_iter.bi_sector;
|
|
|
|
}
|
|
|
|
|
2019-01-17 23:48:01 +08:00
|
|
|
#define __dm_part_stat_sub(part, field, subnd) \
|
|
|
|
(part_stat_get(part, field) -= (subnd))
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
2013-03-02 06:45:47 +08:00
|
|
|
* Entry point to split a bio into clones and submit them to the targets.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2017-12-10 04:16:42 +08:00
|
|
|
static blk_qc_t __split_and_process_bio(struct mapped_device *md,
|
|
|
|
struct dm_table *map, struct bio *bio)
|
2015-02-26 13:50:28 +08:00
|
|
|
{
|
2005-04-17 06:20:36 +08:00
|
|
|
struct clone_info ci;
|
2017-12-10 04:16:42 +08:00
|
|
|
blk_qc_t ret = BLK_QC_T_NONE;
|
2007-12-13 22:15:25 +08:00
|
|
|
int error = 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2017-12-10 04:16:42 +08:00
|
|
|
init_clone_info(&ci, md, map, bio);
|
2015-02-26 13:50:28 +08:00
|
|
|
|
2016-08-06 05:35:16 +08:00
|
|
|
if (bio->bi_opf & REQ_PREFLUSH) {
|
dm: don't reuse bio for flushes
DM currently has a statically allocated bio that it uses to issue empty
flushes. It doesn't submit this bio, it just uses it for maintaining
state while setting up clones. Multiple users can access this bio at the
same time. This wasn't previously an issue, even if it was a bit iffy,
but with the blkg associations it can become one.
We setup the blkg association, then clone bio's and submit, then remove
the blkg assocation again. But since we can have multiple tasks doing
this at the same time, against multiple blkg's, then we can either lose
references to a blkg, or put it twice. The latter causes complaints on
the percpu ref being <= 0 when released, and can cause use-after-free as
well. Ming reports that xfstest generic/475 triggers this:
------------[ cut here ]------------
percpu ref (blkg_release) <= 0 (0) after switching to atomic
WARNING: CPU: 13 PID: 0 at lib/percpu-refcount.c:155 percpu_ref_switch_to_atomic_rcu+0x2c9/0x4a0
Switch to just using an on-stack bio for this, and get rid of the
embedded bio.
Fixes: 5cdf2e3fea5e ("blkcg: associate blkg when associating a device")
Reported-by: Ming Lei <ming.lei@redhat.com>
Tested-by: Ming Lei <ming.lei@redhat.com>
Reviewed-by: Mike Snitzer <snitzer@redhat.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2018-12-20 00:13:34 +08:00
|
|
|
struct bio flush_bio;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Use an on-stack bio for this, it's safe since we don't
|
|
|
|
* need to reference it after submit. It's just used as
|
|
|
|
* the basis for the clone(s).
|
|
|
|
*/
|
|
|
|
bio_init(&flush_bio, NULL, 0);
|
|
|
|
flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC;
|
|
|
|
ci.bio = &flush_bio;
|
2010-09-09 00:07:01 +08:00
|
|
|
ci.sector_count = 0;
|
2013-03-02 06:45:47 +08:00
|
|
|
error = __send_empty_flush(&ci);
|
2024-06-11 20:26:44 +08:00
|
|
|
bio_uninit(ci.bio);
|
2010-09-09 00:07:01 +08:00
|
|
|
/* dec_pending submits any data associated with flush */
|
2017-05-09 07:40:46 +08:00
|
|
|
} else if (bio_op(bio) == REQ_OP_ZONE_RESET) {
|
|
|
|
ci.bio = bio;
|
|
|
|
ci.sector_count = 0;
|
|
|
|
error = __split_and_process_non_flush(&ci);
|
2010-09-09 00:07:01 +08:00
|
|
|
} else {
|
2010-09-09 00:07:00 +08:00
|
|
|
ci.bio = bio;
|
2010-09-03 17:56:19 +08:00
|
|
|
ci.sector_count = bio_sectors(bio);
|
dm: ensure bio submission follows a depth-first tree walk
A dm device can, in general, represent a tree of targets, each of which
handles a sub-range of the range of blocks handled by the parent.
The bio sequencing managed by generic_make_request() requires that bios
are generated and handled in a depth-first manner. Each call to a
make_request_fn() may submit bios to a single member device, and may
submit bios for a reduced region of the same device as the
make_request_fn.
In particular, any bios submitted to member devices must be expected to
be processed in order, so a later one must never wait for an earlier
one.
This ordering is usually achieved by using bio_split() to reduce a bio
to a size that can be completely handled by one target, and resubmitting
the remainder to the originating device. bio_queue_split() shows the
canonical approach.
dm doesn't follow this approach, largely because it has needed to split
bios since long before bio_split() was available. It currently can
submit bios to separate targets within the one dm_make_request() call.
Dependencies between these targets, as can happen with dm-snap, can
cause deadlocks if either bios gets stuck behind the other in the queues
managed by generic_make_request(). This requires the 'rescue'
functionality provided by dm_offload_{start,end}.
Some of this requirement can be removed by changing the order of bio
submission to follow the canonical approach. That is, if dm finds that
it needs to split a bio, the remainder should be sent to
generic_make_request() rather than being handled immediately. This
delays the handling until the first part is completely processed, so the
deadlock problems do not occur.
__split_and_process_bio() can be called both from dm_make_request() and
from dm_wq_work(). When called from dm_wq_work() the current approach
is perfectly satisfactory as each bio will be processed immediately.
When called from dm_make_request(), current->bio_list will be non-NULL,
and in this case it is best to create a separate "clone" bio for the
remainder.
When we use bio_clone_bioset() to split off the front part of a bio
and chain the two together and submit the remainder to
generic_make_request(), it is important that the newly allocated
bio is used as the head to be processed immediately, and the original
bio gets "bio_advance()"d and sent to generic_make_request() as the
remainder. Otherwise, if the newly allocated bio is used as the
remainder, and if it then needs to be split again, then the next
bio_clone_bioset() call will be made while holding a reference a bio
(result of the first clone) from the same bioset. This can potentially
exhaust the bioset mempool and result in a memory allocation deadlock.
Note that there is no race caused by reassigning cio.io->bio after already
calling __map_bio(). This bio will only be dereferenced again after
dec_pending() has found io->io_count to be zero, and this cannot happen
before the dec_pending() call at the end of __split_and_process_bio().
To provide the clone bio when splitting, we use q->bio_split. This
was previously being freed by bio-based dm to avoid having excess
rescuer threads. As bio_split bio sets no longer create rescuer
threads, there is little cost and much gain from restoring the
q->bio_split bio set.
Signed-off-by: NeilBrown <neilb@suse.com>
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
2017-09-06 07:43:28 +08:00
|
|
|
while (ci.sector_count && !error) {
|
2013-03-02 06:45:47 +08:00
|
|
|
error = __split_and_process_non_flush(&ci);
|
dm: ensure bio submission follows a depth-first tree walk
A dm device can, in general, represent a tree of targets, each of which
handles a sub-range of the range of blocks handled by the parent.
The bio sequencing managed by generic_make_request() requires that bios
are generated and handled in a depth-first manner. Each call to a
make_request_fn() may submit bios to a single member device, and may
submit bios for a reduced region of the same device as the
make_request_fn.
In particular, any bios submitted to member devices must be expected to
be processed in order, so a later one must never wait for an earlier
one.
This ordering is usually achieved by using bio_split() to reduce a bio
to a size that can be completely handled by one target, and resubmitting
the remainder to the originating device. bio_queue_split() shows the
canonical approach.
dm doesn't follow this approach, largely because it has needed to split
bios since long before bio_split() was available. It currently can
submit bios to separate targets within the one dm_make_request() call.
Dependencies between these targets, as can happen with dm-snap, can
cause deadlocks if either bios gets stuck behind the other in the queues
managed by generic_make_request(). This requires the 'rescue'
functionality provided by dm_offload_{start,end}.
Some of this requirement can be removed by changing the order of bio
submission to follow the canonical approach. That is, if dm finds that
it needs to split a bio, the remainder should be sent to
generic_make_request() rather than being handled immediately. This
delays the handling until the first part is completely processed, so the
deadlock problems do not occur.
__split_and_process_bio() can be called both from dm_make_request() and
from dm_wq_work(). When called from dm_wq_work() the current approach
is perfectly satisfactory as each bio will be processed immediately.
When called from dm_make_request(), current->bio_list will be non-NULL,
and in this case it is best to create a separate "clone" bio for the
remainder.
When we use bio_clone_bioset() to split off the front part of a bio
and chain the two together and submit the remainder to
generic_make_request(), it is important that the newly allocated
bio is used as the head to be processed immediately, and the original
bio gets "bio_advance()"d and sent to generic_make_request() as the
remainder. Otherwise, if the newly allocated bio is used as the
remainder, and if it then needs to be split again, then the next
bio_clone_bioset() call will be made while holding a reference a bio
(result of the first clone) from the same bioset. This can potentially
exhaust the bioset mempool and result in a memory allocation deadlock.
Note that there is no race caused by reassigning cio.io->bio after already
calling __map_bio(). This bio will only be dereferenced again after
dec_pending() has found io->io_count to be zero, and this cannot happen
before the dec_pending() call at the end of __split_and_process_bio().
To provide the clone bio when splitting, we use q->bio_split. This
was previously being freed by bio-based dm to avoid having excess
rescuer threads. As bio_split bio sets no longer create rescuer
threads, there is little cost and much gain from restoring the
q->bio_split bio set.
Signed-off-by: NeilBrown <neilb@suse.com>
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
2017-09-06 07:43:28 +08:00
|
|
|
if (current->bio_list && ci.sector_count && !error) {
|
|
|
|
/*
|
|
|
|
* Remainder must be passed to generic_make_request()
|
|
|
|
* so that it gets handled *after* bios already submitted
|
|
|
|
* have been completely processed.
|
|
|
|
* We take a clone of the original to store in
|
2017-12-12 09:51:50 +08:00
|
|
|
* ci.io->orig_bio to be used by end_io_acct() and
|
dm: ensure bio submission follows a depth-first tree walk
A dm device can, in general, represent a tree of targets, each of which
handles a sub-range of the range of blocks handled by the parent.
The bio sequencing managed by generic_make_request() requires that bios
are generated and handled in a depth-first manner. Each call to a
make_request_fn() may submit bios to a single member device, and may
submit bios for a reduced region of the same device as the
make_request_fn.
In particular, any bios submitted to member devices must be expected to
be processed in order, so a later one must never wait for an earlier
one.
This ordering is usually achieved by using bio_split() to reduce a bio
to a size that can be completely handled by one target, and resubmitting
the remainder to the originating device. bio_queue_split() shows the
canonical approach.
dm doesn't follow this approach, largely because it has needed to split
bios since long before bio_split() was available. It currently can
submit bios to separate targets within the one dm_make_request() call.
Dependencies between these targets, as can happen with dm-snap, can
cause deadlocks if either bios gets stuck behind the other in the queues
managed by generic_make_request(). This requires the 'rescue'
functionality provided by dm_offload_{start,end}.
Some of this requirement can be removed by changing the order of bio
submission to follow the canonical approach. That is, if dm finds that
it needs to split a bio, the remainder should be sent to
generic_make_request() rather than being handled immediately. This
delays the handling until the first part is completely processed, so the
deadlock problems do not occur.
__split_and_process_bio() can be called both from dm_make_request() and
from dm_wq_work(). When called from dm_wq_work() the current approach
is perfectly satisfactory as each bio will be processed immediately.
When called from dm_make_request(), current->bio_list will be non-NULL,
and in this case it is best to create a separate "clone" bio for the
remainder.
When we use bio_clone_bioset() to split off the front part of a bio
and chain the two together and submit the remainder to
generic_make_request(), it is important that the newly allocated
bio is used as the head to be processed immediately, and the original
bio gets "bio_advance()"d and sent to generic_make_request() as the
remainder. Otherwise, if the newly allocated bio is used as the
remainder, and if it then needs to be split again, then the next
bio_clone_bioset() call will be made while holding a reference a bio
(result of the first clone) from the same bioset. This can potentially
exhaust the bioset mempool and result in a memory allocation deadlock.
Note that there is no race caused by reassigning cio.io->bio after already
calling __map_bio(). This bio will only be dereferenced again after
dec_pending() has found io->io_count to be zero, and this cannot happen
before the dec_pending() call at the end of __split_and_process_bio().
To provide the clone bio when splitting, we use q->bio_split. This
was previously being freed by bio-based dm to avoid having excess
rescuer threads. As bio_split bio sets no longer create rescuer
threads, there is little cost and much gain from restoring the
q->bio_split bio set.
Signed-off-by: NeilBrown <neilb@suse.com>
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
2017-09-06 07:43:28 +08:00
|
|
|
* for dec_pending to use for completion handling.
|
|
|
|
*/
|
2018-06-15 21:35:33 +08:00
|
|
|
struct bio *b = bio_split(bio, bio_sectors(bio) - ci.sector_count,
|
|
|
|
GFP_NOIO, &md->queue->bio_split);
|
2017-12-12 09:51:50 +08:00
|
|
|
ci.io->orig_bio = b;
|
2019-01-17 23:48:01 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Adjust IO stats for each split, otherwise upon queue
|
|
|
|
* reentry there will be redundant IO accounting.
|
|
|
|
* NOTE: this is a stop-gap fix, a proper fix involves
|
|
|
|
* significant refactoring of DM core's bio splitting
|
|
|
|
* (by eliminating DM's splitting and just using bio_split)
|
|
|
|
*/
|
|
|
|
part_stat_lock();
|
|
|
|
__dm_part_stat_sub(&dm_disk(md)->part0,
|
|
|
|
sectors[op_stat_group(bio_op(bio))], ci.sector_count);
|
|
|
|
part_stat_unlock();
|
|
|
|
|
dm: ensure bio submission follows a depth-first tree walk
A dm device can, in general, represent a tree of targets, each of which
handles a sub-range of the range of blocks handled by the parent.
The bio sequencing managed by generic_make_request() requires that bios
are generated and handled in a depth-first manner. Each call to a
make_request_fn() may submit bios to a single member device, and may
submit bios for a reduced region of the same device as the
make_request_fn.
In particular, any bios submitted to member devices must be expected to
be processed in order, so a later one must never wait for an earlier
one.
This ordering is usually achieved by using bio_split() to reduce a bio
to a size that can be completely handled by one target, and resubmitting
the remainder to the originating device. bio_queue_split() shows the
canonical approach.
dm doesn't follow this approach, largely because it has needed to split
bios since long before bio_split() was available. It currently can
submit bios to separate targets within the one dm_make_request() call.
Dependencies between these targets, as can happen with dm-snap, can
cause deadlocks if either bios gets stuck behind the other in the queues
managed by generic_make_request(). This requires the 'rescue'
functionality provided by dm_offload_{start,end}.
Some of this requirement can be removed by changing the order of bio
submission to follow the canonical approach. That is, if dm finds that
it needs to split a bio, the remainder should be sent to
generic_make_request() rather than being handled immediately. This
delays the handling until the first part is completely processed, so the
deadlock problems do not occur.
__split_and_process_bio() can be called both from dm_make_request() and
from dm_wq_work(). When called from dm_wq_work() the current approach
is perfectly satisfactory as each bio will be processed immediately.
When called from dm_make_request(), current->bio_list will be non-NULL,
and in this case it is best to create a separate "clone" bio for the
remainder.
When we use bio_clone_bioset() to split off the front part of a bio
and chain the two together and submit the remainder to
generic_make_request(), it is important that the newly allocated
bio is used as the head to be processed immediately, and the original
bio gets "bio_advance()"d and sent to generic_make_request() as the
remainder. Otherwise, if the newly allocated bio is used as the
remainder, and if it then needs to be split again, then the next
bio_clone_bioset() call will be made while holding a reference a bio
(result of the first clone) from the same bioset. This can potentially
exhaust the bioset mempool and result in a memory allocation deadlock.
Note that there is no race caused by reassigning cio.io->bio after already
calling __map_bio(). This bio will only be dereferenced again after
dec_pending() has found io->io_count to be zero, and this cannot happen
before the dec_pending() call at the end of __split_and_process_bio().
To provide the clone bio when splitting, we use q->bio_split. This
was previously being freed by bio-based dm to avoid having excess
rescuer threads. As bio_split bio sets no longer create rescuer
threads, there is little cost and much gain from restoring the
q->bio_split bio set.
Signed-off-by: NeilBrown <neilb@suse.com>
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
2017-09-06 07:43:28 +08:00
|
|
|
bio_chain(b, bio);
|
2019-01-18 14:21:11 +08:00
|
|
|
trace_block_split(md->queue, b, bio->bi_iter.bi_sector);
|
2017-12-10 04:16:42 +08:00
|
|
|
ret = generic_make_request(bio);
|
dm: ensure bio submission follows a depth-first tree walk
A dm device can, in general, represent a tree of targets, each of which
handles a sub-range of the range of blocks handled by the parent.
The bio sequencing managed by generic_make_request() requires that bios
are generated and handled in a depth-first manner. Each call to a
make_request_fn() may submit bios to a single member device, and may
submit bios for a reduced region of the same device as the
make_request_fn.
In particular, any bios submitted to member devices must be expected to
be processed in order, so a later one must never wait for an earlier
one.
This ordering is usually achieved by using bio_split() to reduce a bio
to a size that can be completely handled by one target, and resubmitting
the remainder to the originating device. bio_queue_split() shows the
canonical approach.
dm doesn't follow this approach, largely because it has needed to split
bios since long before bio_split() was available. It currently can
submit bios to separate targets within the one dm_make_request() call.
Dependencies between these targets, as can happen with dm-snap, can
cause deadlocks if either bios gets stuck behind the other in the queues
managed by generic_make_request(). This requires the 'rescue'
functionality provided by dm_offload_{start,end}.
Some of this requirement can be removed by changing the order of bio
submission to follow the canonical approach. That is, if dm finds that
it needs to split a bio, the remainder should be sent to
generic_make_request() rather than being handled immediately. This
delays the handling until the first part is completely processed, so the
deadlock problems do not occur.
__split_and_process_bio() can be called both from dm_make_request() and
from dm_wq_work(). When called from dm_wq_work() the current approach
is perfectly satisfactory as each bio will be processed immediately.
When called from dm_make_request(), current->bio_list will be non-NULL,
and in this case it is best to create a separate "clone" bio for the
remainder.
When we use bio_clone_bioset() to split off the front part of a bio
and chain the two together and submit the remainder to
generic_make_request(), it is important that the newly allocated
bio is used as the head to be processed immediately, and the original
bio gets "bio_advance()"d and sent to generic_make_request() as the
remainder. Otherwise, if the newly allocated bio is used as the
remainder, and if it then needs to be split again, then the next
bio_clone_bioset() call will be made while holding a reference a bio
(result of the first clone) from the same bioset. This can potentially
exhaust the bioset mempool and result in a memory allocation deadlock.
Note that there is no race caused by reassigning cio.io->bio after already
calling __map_bio(). This bio will only be dereferenced again after
dec_pending() has found io->io_count to be zero, and this cannot happen
before the dec_pending() call at the end of __split_and_process_bio().
To provide the clone bio when splitting, we use q->bio_split. This
was previously being freed by bio-based dm to avoid having excess
rescuer threads. As bio_split bio sets no longer create rescuer
threads, there is little cost and much gain from restoring the
q->bio_split bio set.
Signed-off-by: NeilBrown <neilb@suse.com>
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
2017-09-06 07:43:28 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2010-09-03 17:56:19 +08:00
|
|
|
}
|
2015-02-26 13:50:28 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/* drop the extra reference count */
|
2017-08-10 02:32:10 +08:00
|
|
|
dec_pending(ci.io, errno_to_blk_status(error));
|
2017-12-10 04:16:42 +08:00
|
|
|
return ret;
|
2015-02-26 13:50:28 +08:00
|
|
|
}
|
|
|
|
|
2019-01-18 03:33:01 +08:00
|
|
|
static blk_qc_t dm_process_bio(struct mapped_device *md,
|
|
|
|
struct dm_table *map, struct bio *bio)
|
|
|
|
{
|
2019-01-19 03:10:37 +08:00
|
|
|
blk_qc_t ret = BLK_QC_T_NONE;
|
|
|
|
struct dm_target *ti = md->immutable_target;
|
|
|
|
|
|
|
|
if (unlikely(!map)) {
|
|
|
|
bio_io_error(bio);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!ti) {
|
|
|
|
ti = dm_table_find_target(map, bio->bi_iter.bi_sector);
|
2019-08-23 21:55:26 +08:00
|
|
|
if (unlikely(!ti)) {
|
2019-01-19 03:10:37 +08:00
|
|
|
bio_io_error(bio);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If in ->make_request_fn we need to use blk_queue_split(), otherwise
|
|
|
|
* queue_limits for abnormal requests (e.g. discard, writesame, etc)
|
|
|
|
* won't be imposed.
|
|
|
|
*/
|
|
|
|
if (current->bio_list) {
|
2024-06-11 20:08:33 +08:00
|
|
|
if (is_abnormal_io(bio))
|
|
|
|
blk_queue_split(md->queue, &bio);
|
2024-06-11 20:26:44 +08:00
|
|
|
/* regular IO is split by __split_and_process_bio */
|
2019-01-19 03:10:37 +08:00
|
|
|
}
|
|
|
|
|
2024-06-11 20:26:44 +08:00
|
|
|
return __split_and_process_bio(md, map, bio);
|
2019-01-18 03:33:01 +08:00
|
|
|
}
|
|
|
|
|
2018-11-07 05:34:59 +08:00
|
|
|
static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio)
|
dm: prepare for request based option
This patch adds core functions for request-based dm.
When struct mapped device (md) is initialized, md->queue has
an I/O scheduler and the following functions are used for
request-based dm as the queue functions:
make_request_fn: dm_make_request()
pref_fn: dm_prep_fn()
request_fn: dm_request_fn()
softirq_done_fn: dm_softirq_done()
lld_busy_fn: dm_lld_busy()
Actual initializations are done in another patch (PATCH 2).
Below is a brief summary of how request-based dm behaves, including:
- making request from bio
- cloning, mapping and dispatching request
- completing request and bio
- suspending md
- resuming md
bio to request
==============
md->queue->make_request_fn() (dm_make_request()) calls __make_request()
for a bio submitted to the md.
Then, the bio is kept in the queue as a new request or merged into
another request in the queue if possible.
Cloning and Mapping
===================
Cloning and mapping are done in md->queue->request_fn() (dm_request_fn()),
when requests are dispatched after they are sorted by the I/O scheduler.
dm_request_fn() checks busy state of underlying devices using
target's busy() function and stops dispatching requests to keep them
on the dm device's queue if busy.
It helps better I/O merging, since no merge is done for a request
once it is dispatched to underlying devices.
Actual cloning and mapping are done in dm_prep_fn() and map_request()
called from dm_request_fn().
dm_prep_fn() clones not only request but also bios of the request
so that dm can hold bio completion in error cases and prevent
the bio submitter from noticing the error.
(See the "Completion" section below for details.)
After the cloning, the clone is mapped by target's map_rq() function
and inserted to underlying device's queue using
blk_insert_cloned_request().
Completion
==========
Request completion can be hooked by rq->end_io(), but then, all bios
in the request will have been completed even error cases, and the bio
submitter will have noticed the error.
To prevent the bio completion in error cases, request-based dm clones
both bio and request and hooks both bio->bi_end_io() and rq->end_io():
bio->bi_end_io(): end_clone_bio()
rq->end_io(): end_clone_request()
Summary of the request completion flow is below:
blk_end_request() for a clone request
=> blk_update_request()
=> bio->bi_end_io() == end_clone_bio() for each clone bio
=> Free the clone bio
=> Success: Complete the original bio (blk_update_request())
Error: Don't complete the original bio
=> blk_finish_request()
=> rq->end_io() == end_clone_request()
=> blk_complete_request()
=> dm_softirq_done()
=> Free the clone request
=> Success: Complete the original request (blk_end_request())
Error: Requeue the original request
end_clone_bio() completes the original request on the size of
the original bio in successful cases.
Even if all bios in the original request are completed by that
completion, the original request must not be completed yet to keep
the ordering of request completion for the stacking.
So end_clone_bio() uses blk_update_request() instead of
blk_end_request().
In error cases, end_clone_bio() doesn't complete the original bio.
It just frees the cloned bio and gives over the error handling to
end_clone_request().
end_clone_request(), which is called with queue lock held, completes
the clone request and the original request in a softirq context
(dm_softirq_done()), which has no queue lock, to avoid a deadlock
issue on submission of another request during the completion:
- The submitted request may be mapped to the same device
- Request submission requires queue lock, but the queue lock
has been held by itself and it doesn't know that
The clone request has no clone bio when dm_softirq_done() is called.
So target drivers can't resubmit it again even error cases.
Instead, they can ask dm core for requeueing and remapping
the original request in that cases.
suspend
=======
Request-based dm uses stopping md->queue as suspend of the md.
For noflush suspend, just stops md->queue.
For flush suspend, inserts a marker request to the tail of md->queue.
And dispatches all requests in md->queue until the marker comes to
the front of md->queue. Then, stops dispatching request and waits
for the all dispatched requests to complete.
After that, completes the marker request, stops md->queue and
wake up the waiter on the suspend queue, md->wait.
resume
======
Starts md->queue.
Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com>
Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
2009-06-22 17:12:35 +08:00
|
|
|
{
|
|
|
|
struct mapped_device *md = q->queuedata;
|
2017-12-10 04:16:42 +08:00
|
|
|
blk_qc_t ret = BLK_QC_T_NONE;
|
2013-07-11 06:41:18 +08:00
|
|
|
int srcu_idx;
|
|
|
|
struct dm_table *map;
|
2024-06-12 13:13:20 +08:00
|
|
|
#ifdef CONFIG_BLK_CGROUP
|
|
|
|
struct blkcg *blkcg = bio_blkcg(bio);
|
2024-06-11 20:08:33 +08:00
|
|
|
int rw = bio_data_dir(bio), cpu;
|
2024-06-12 13:13:20 +08:00
|
|
|
#endif
|
dm: prepare for request based option
This patch adds core functions for request-based dm.
When struct mapped device (md) is initialized, md->queue has
an I/O scheduler and the following functions are used for
request-based dm as the queue functions:
make_request_fn: dm_make_request()
pref_fn: dm_prep_fn()
request_fn: dm_request_fn()
softirq_done_fn: dm_softirq_done()
lld_busy_fn: dm_lld_busy()
Actual initializations are done in another patch (PATCH 2).
Below is a brief summary of how request-based dm behaves, including:
- making request from bio
- cloning, mapping and dispatching request
- completing request and bio
- suspending md
- resuming md
bio to request
==============
md->queue->make_request_fn() (dm_make_request()) calls __make_request()
for a bio submitted to the md.
Then, the bio is kept in the queue as a new request or merged into
another request in the queue if possible.
Cloning and Mapping
===================
Cloning and mapping are done in md->queue->request_fn() (dm_request_fn()),
when requests are dispatched after they are sorted by the I/O scheduler.
dm_request_fn() checks busy state of underlying devices using
target's busy() function and stops dispatching requests to keep them
on the dm device's queue if busy.
It helps better I/O merging, since no merge is done for a request
once it is dispatched to underlying devices.
Actual cloning and mapping are done in dm_prep_fn() and map_request()
called from dm_request_fn().
dm_prep_fn() clones not only request but also bios of the request
so that dm can hold bio completion in error cases and prevent
the bio submitter from noticing the error.
(See the "Completion" section below for details.)
After the cloning, the clone is mapped by target's map_rq() function
and inserted to underlying device's queue using
blk_insert_cloned_request().
Completion
==========
Request completion can be hooked by rq->end_io(), but then, all bios
in the request will have been completed even error cases, and the bio
submitter will have noticed the error.
To prevent the bio completion in error cases, request-based dm clones
both bio and request and hooks both bio->bi_end_io() and rq->end_io():
bio->bi_end_io(): end_clone_bio()
rq->end_io(): end_clone_request()
Summary of the request completion flow is below:
blk_end_request() for a clone request
=> blk_update_request()
=> bio->bi_end_io() == end_clone_bio() for each clone bio
=> Free the clone bio
=> Success: Complete the original bio (blk_update_request())
Error: Don't complete the original bio
=> blk_finish_request()
=> rq->end_io() == end_clone_request()
=> blk_complete_request()
=> dm_softirq_done()
=> Free the clone request
=> Success: Complete the original request (blk_end_request())
Error: Requeue the original request
end_clone_bio() completes the original request on the size of
the original bio in successful cases.
Even if all bios in the original request are completed by that
completion, the original request must not be completed yet to keep
the ordering of request completion for the stacking.
So end_clone_bio() uses blk_update_request() instead of
blk_end_request().
In error cases, end_clone_bio() doesn't complete the original bio.
It just frees the cloned bio and gives over the error handling to
end_clone_request().
end_clone_request(), which is called with queue lock held, completes
the clone request and the original request in a softirq context
(dm_softirq_done()), which has no queue lock, to avoid a deadlock
issue on submission of another request during the completion:
- The submitted request may be mapped to the same device
- Request submission requires queue lock, but the queue lock
has been held by itself and it doesn't know that
The clone request has no clone bio when dm_softirq_done() is called.
So target drivers can't resubmit it again even error cases.
Instead, they can ask dm core for requeueing and remapping
the original request in that cases.
suspend
=======
Request-based dm uses stopping md->queue as suspend of the md.
For noflush suspend, just stops md->queue.
For flush suspend, inserts a marker request to the tail of md->queue.
And dispatches all requests in md->queue until the marker comes to
the front of md->queue. Then, stops dispatching request and waits
for the all dispatched requests to complete.
After that, completes the marker request, stops md->queue and
wake up the waiter on the suspend queue, md->wait.
resume
======
Starts md->queue.
Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com>
Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
2009-06-22 17:12:35 +08:00
|
|
|
|
2013-07-11 06:41:18 +08:00
|
|
|
map = dm_get_live_table(md, &srcu_idx);
|
2010-09-09 00:07:00 +08:00
|
|
|
|
2024-06-12 13:13:20 +08:00
|
|
|
#ifdef CONFIG_BLK_CGROUP
|
2024-06-11 20:08:33 +08:00
|
|
|
cpu = part_stat_lock();
|
|
|
|
blkcg_part_stat_inc(blkcg, cpu, &dm_disk(md)->part0, ios[rw]);
|
|
|
|
blkcg_part_stat_add(blkcg, cpu, &dm_disk(md)->part0, sectors[rw],
|
|
|
|
bio_sectors(bio));
|
|
|
|
part_stat_unlock();
|
2024-06-12 13:13:20 +08:00
|
|
|
#endif
|
2024-06-11 20:08:33 +08:00
|
|
|
|
2010-09-09 00:07:00 +08:00
|
|
|
/* if we're suspended, we have to queue this io for later */
|
|
|
|
if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) {
|
2013-07-11 06:41:18 +08:00
|
|
|
dm_put_live_table(md, srcu_idx);
|
2010-02-17 02:43:01 +08:00
|
|
|
|
2016-08-06 05:35:16 +08:00
|
|
|
if (!(bio->bi_opf & REQ_RAHEAD))
|
2010-09-09 00:07:00 +08:00
|
|
|
queue_io(md, bio);
|
|
|
|
else
|
2009-04-09 07:27:14 +08:00
|
|
|
bio_io_error(bio);
|
2017-12-10 04:16:42 +08:00
|
|
|
return ret;
|
dm: prepare for request based option
This patch adds core functions for request-based dm.
When struct mapped device (md) is initialized, md->queue has
an I/O scheduler and the following functions are used for
request-based dm as the queue functions:
make_request_fn: dm_make_request()
pref_fn: dm_prep_fn()
request_fn: dm_request_fn()
softirq_done_fn: dm_softirq_done()
lld_busy_fn: dm_lld_busy()
Actual initializations are done in another patch (PATCH 2).
Below is a brief summary of how request-based dm behaves, including:
- making request from bio
- cloning, mapping and dispatching request
- completing request and bio
- suspending md
- resuming md
bio to request
==============
md->queue->make_request_fn() (dm_make_request()) calls __make_request()
for a bio submitted to the md.
Then, the bio is kept in the queue as a new request or merged into
another request in the queue if possible.
Cloning and Mapping
===================
Cloning and mapping are done in md->queue->request_fn() (dm_request_fn()),
when requests are dispatched after they are sorted by the I/O scheduler.
dm_request_fn() checks busy state of underlying devices using
target's busy() function and stops dispatching requests to keep them
on the dm device's queue if busy.
It helps better I/O merging, since no merge is done for a request
once it is dispatched to underlying devices.
Actual cloning and mapping are done in dm_prep_fn() and map_request()
called from dm_request_fn().
dm_prep_fn() clones not only request but also bios of the request
so that dm can hold bio completion in error cases and prevent
the bio submitter from noticing the error.
(See the "Completion" section below for details.)
After the cloning, the clone is mapped by target's map_rq() function
and inserted to underlying device's queue using
blk_insert_cloned_request().
Completion
==========
Request completion can be hooked by rq->end_io(), but then, all bios
in the request will have been completed even error cases, and the bio
submitter will have noticed the error.
To prevent the bio completion in error cases, request-based dm clones
both bio and request and hooks both bio->bi_end_io() and rq->end_io():
bio->bi_end_io(): end_clone_bio()
rq->end_io(): end_clone_request()
Summary of the request completion flow is below:
blk_end_request() for a clone request
=> blk_update_request()
=> bio->bi_end_io() == end_clone_bio() for each clone bio
=> Free the clone bio
=> Success: Complete the original bio (blk_update_request())
Error: Don't complete the original bio
=> blk_finish_request()
=> rq->end_io() == end_clone_request()
=> blk_complete_request()
=> dm_softirq_done()
=> Free the clone request
=> Success: Complete the original request (blk_end_request())
Error: Requeue the original request
end_clone_bio() completes the original request on the size of
the original bio in successful cases.
Even if all bios in the original request are completed by that
completion, the original request must not be completed yet to keep
the ordering of request completion for the stacking.
So end_clone_bio() uses blk_update_request() instead of
blk_end_request().
In error cases, end_clone_bio() doesn't complete the original bio.
It just frees the cloned bio and gives over the error handling to
end_clone_request().
end_clone_request(), which is called with queue lock held, completes
the clone request and the original request in a softirq context
(dm_softirq_done()), which has no queue lock, to avoid a deadlock
issue on submission of another request during the completion:
- The submitted request may be mapped to the same device
- Request submission requires queue lock, but the queue lock
has been held by itself and it doesn't know that
The clone request has no clone bio when dm_softirq_done() is called.
So target drivers can't resubmit it again even error cases.
Instead, they can ask dm core for requeueing and remapping
the original request in that cases.
suspend
=======
Request-based dm uses stopping md->queue as suspend of the md.
For noflush suspend, just stops md->queue.
For flush suspend, inserts a marker request to the tail of md->queue.
And dispatches all requests in md->queue until the marker comes to
the front of md->queue. Then, stops dispatching request and waits
for the all dispatched requests to complete.
After that, completes the marker request, stops md->queue and
wake up the waiter on the suspend queue, md->wait.
resume
======
Starts md->queue.
Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com>
Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
2009-06-22 17:12:35 +08:00
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2019-01-18 03:33:01 +08:00
|
|
|
ret = dm_process_bio(md, map, bio);
|
2017-12-10 04:16:42 +08:00
|
|
|
|
2013-07-11 06:41:18 +08:00
|
|
|
dm_put_live_table(md, srcu_idx);
|
2017-12-10 04:16:42 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
static int dm_any_congested(void *congested_data, int bdi_bits)
|
|
|
|
{
|
2008-11-14 07:39:14 +08:00
|
|
|
int r = bdi_bits;
|
|
|
|
struct mapped_device *md = congested_data;
|
|
|
|
struct dm_table *map;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2009-04-09 07:27:14 +08:00
|
|
|
if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
|
2016-02-03 11:35:06 +08:00
|
|
|
if (dm_request_based(md)) {
|
dm: prepare for request based option
This patch adds core functions for request-based dm.
When struct mapped device (md) is initialized, md->queue has
an I/O scheduler and the following functions are used for
request-based dm as the queue functions:
make_request_fn: dm_make_request()
pref_fn: dm_prep_fn()
request_fn: dm_request_fn()
softirq_done_fn: dm_softirq_done()
lld_busy_fn: dm_lld_busy()
Actual initializations are done in another patch (PATCH 2).
Below is a brief summary of how request-based dm behaves, including:
- making request from bio
- cloning, mapping and dispatching request
- completing request and bio
- suspending md
- resuming md
bio to request
==============
md->queue->make_request_fn() (dm_make_request()) calls __make_request()
for a bio submitted to the md.
Then, the bio is kept in the queue as a new request or merged into
another request in the queue if possible.
Cloning and Mapping
===================
Cloning and mapping are done in md->queue->request_fn() (dm_request_fn()),
when requests are dispatched after they are sorted by the I/O scheduler.
dm_request_fn() checks busy state of underlying devices using
target's busy() function and stops dispatching requests to keep them
on the dm device's queue if busy.
It helps better I/O merging, since no merge is done for a request
once it is dispatched to underlying devices.
Actual cloning and mapping are done in dm_prep_fn() and map_request()
called from dm_request_fn().
dm_prep_fn() clones not only request but also bios of the request
so that dm can hold bio completion in error cases and prevent
the bio submitter from noticing the error.
(See the "Completion" section below for details.)
After the cloning, the clone is mapped by target's map_rq() function
and inserted to underlying device's queue using
blk_insert_cloned_request().
Completion
==========
Request completion can be hooked by rq->end_io(), but then, all bios
in the request will have been completed even error cases, and the bio
submitter will have noticed the error.
To prevent the bio completion in error cases, request-based dm clones
both bio and request and hooks both bio->bi_end_io() and rq->end_io():
bio->bi_end_io(): end_clone_bio()
rq->end_io(): end_clone_request()
Summary of the request completion flow is below:
blk_end_request() for a clone request
=> blk_update_request()
=> bio->bi_end_io() == end_clone_bio() for each clone bio
=> Free the clone bio
=> Success: Complete the original bio (blk_update_request())
Error: Don't complete the original bio
=> blk_finish_request()
=> rq->end_io() == end_clone_request()
=> blk_complete_request()
=> dm_softirq_done()
=> Free the clone request
=> Success: Complete the original request (blk_end_request())
Error: Requeue the original request
end_clone_bio() completes the original request on the size of
the original bio in successful cases.
Even if all bios in the original request are completed by that
completion, the original request must not be completed yet to keep
the ordering of request completion for the stacking.
So end_clone_bio() uses blk_update_request() instead of
blk_end_request().
In error cases, end_clone_bio() doesn't complete the original bio.
It just frees the cloned bio and gives over the error handling to
end_clone_request().
end_clone_request(), which is called with queue lock held, completes
the clone request and the original request in a softirq context
(dm_softirq_done()), which has no queue lock, to avoid a deadlock
issue on submission of another request during the completion:
- The submitted request may be mapped to the same device
- Request submission requires queue lock, but the queue lock
has been held by itself and it doesn't know that
The clone request has no clone bio when dm_softirq_done() is called.
So target drivers can't resubmit it again even error cases.
Instead, they can ask dm core for requeueing and remapping
the original request in that cases.
suspend
=======
Request-based dm uses stopping md->queue as suspend of the md.
For noflush suspend, just stops md->queue.
For flush suspend, inserts a marker request to the tail of md->queue.
And dispatches all requests in md->queue until the marker comes to
the front of md->queue. Then, stops dispatching request and waits
for the all dispatched requests to complete.
After that, completes the marker request, stops md->queue and
wake up the waiter on the suspend queue, md->wait.
resume
======
Starts md->queue.
Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com>
Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
2009-06-22 17:12:35 +08:00
|
|
|
/*
|
2016-02-03 11:35:06 +08:00
|
|
|
* With request-based DM we only need to check the
|
|
|
|
* top-level queue for congestion.
|
dm: prepare for request based option
This patch adds core functions for request-based dm.
When struct mapped device (md) is initialized, md->queue has
an I/O scheduler and the following functions are used for
request-based dm as the queue functions:
make_request_fn: dm_make_request()
pref_fn: dm_prep_fn()
request_fn: dm_request_fn()
softirq_done_fn: dm_softirq_done()
lld_busy_fn: dm_lld_busy()
Actual initializations are done in another patch (PATCH 2).
Below is a brief summary of how request-based dm behaves, including:
- making request from bio
- cloning, mapping and dispatching request
- completing request and bio
- suspending md
- resuming md
bio to request
==============
md->queue->make_request_fn() (dm_make_request()) calls __make_request()
for a bio submitted to the md.
Then, the bio is kept in the queue as a new request or merged into
another request in the queue if possible.
Cloning and Mapping
===================
Cloning and mapping are done in md->queue->request_fn() (dm_request_fn()),
when requests are dispatched after they are sorted by the I/O scheduler.
dm_request_fn() checks busy state of underlying devices using
target's busy() function and stops dispatching requests to keep them
on the dm device's queue if busy.
It helps better I/O merging, since no merge is done for a request
once it is dispatched to underlying devices.
Actual cloning and mapping are done in dm_prep_fn() and map_request()
called from dm_request_fn().
dm_prep_fn() clones not only request but also bios of the request
so that dm can hold bio completion in error cases and prevent
the bio submitter from noticing the error.
(See the "Completion" section below for details.)
After the cloning, the clone is mapped by target's map_rq() function
and inserted to underlying device's queue using
blk_insert_cloned_request().
Completion
==========
Request completion can be hooked by rq->end_io(), but then, all bios
in the request will have been completed even error cases, and the bio
submitter will have noticed the error.
To prevent the bio completion in error cases, request-based dm clones
both bio and request and hooks both bio->bi_end_io() and rq->end_io():
bio->bi_end_io(): end_clone_bio()
rq->end_io(): end_clone_request()
Summary of the request completion flow is below:
blk_end_request() for a clone request
=> blk_update_request()
=> bio->bi_end_io() == end_clone_bio() for each clone bio
=> Free the clone bio
=> Success: Complete the original bio (blk_update_request())
Error: Don't complete the original bio
=> blk_finish_request()
=> rq->end_io() == end_clone_request()
=> blk_complete_request()
=> dm_softirq_done()
=> Free the clone request
=> Success: Complete the original request (blk_end_request())
Error: Requeue the original request
end_clone_bio() completes the original request on the size of
the original bio in successful cases.
Even if all bios in the original request are completed by that
completion, the original request must not be completed yet to keep
the ordering of request completion for the stacking.
So end_clone_bio() uses blk_update_request() instead of
blk_end_request().
In error cases, end_clone_bio() doesn't complete the original bio.
It just frees the cloned bio and gives over the error handling to
end_clone_request().
end_clone_request(), which is called with queue lock held, completes
the clone request and the original request in a softirq context
(dm_softirq_done()), which has no queue lock, to avoid a deadlock
issue on submission of another request during the completion:
- The submitted request may be mapped to the same device
- Request submission requires queue lock, but the queue lock
has been held by itself and it doesn't know that
The clone request has no clone bio when dm_softirq_done() is called.
So target drivers can't resubmit it again even error cases.
Instead, they can ask dm core for requeueing and remapping
the original request in that cases.
suspend
=======
Request-based dm uses stopping md->queue as suspend of the md.
For noflush suspend, just stops md->queue.
For flush suspend, inserts a marker request to the tail of md->queue.
And dispatches all requests in md->queue until the marker comes to
the front of md->queue. Then, stops dispatching request and waits
for the all dispatched requests to complete.
After that, completes the marker request, stops md->queue and
wake up the waiter on the suspend queue, md->wait.
resume
======
Starts md->queue.
Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com>
Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
2009-06-22 17:12:35 +08:00
|
|
|
*/
|
2024-06-11 20:08:33 +08:00
|
|
|
struct backing_dev_info *bdi = md->queue->backing_dev_info;
|
|
|
|
r = bdi->wb.congested->state & bdi_bits;
|
2016-02-03 11:35:06 +08:00
|
|
|
} else {
|
|
|
|
map = dm_get_live_table_fast(md);
|
|
|
|
if (map)
|
dm: prepare for request based option
This patch adds core functions for request-based dm.
When struct mapped device (md) is initialized, md->queue has
an I/O scheduler and the following functions are used for
request-based dm as the queue functions:
make_request_fn: dm_make_request()
pref_fn: dm_prep_fn()
request_fn: dm_request_fn()
softirq_done_fn: dm_softirq_done()
lld_busy_fn: dm_lld_busy()
Actual initializations are done in another patch (PATCH 2).
Below is a brief summary of how request-based dm behaves, including:
- making request from bio
- cloning, mapping and dispatching request
- completing request and bio
- suspending md
- resuming md
bio to request
==============
md->queue->make_request_fn() (dm_make_request()) calls __make_request()
for a bio submitted to the md.
Then, the bio is kept in the queue as a new request or merged into
another request in the queue if possible.
Cloning and Mapping
===================
Cloning and mapping are done in md->queue->request_fn() (dm_request_fn()),
when requests are dispatched after they are sorted by the I/O scheduler.
dm_request_fn() checks busy state of underlying devices using
target's busy() function and stops dispatching requests to keep them
on the dm device's queue if busy.
It helps better I/O merging, since no merge is done for a request
once it is dispatched to underlying devices.
Actual cloning and mapping are done in dm_prep_fn() and map_request()
called from dm_request_fn().
dm_prep_fn() clones not only request but also bios of the request
so that dm can hold bio completion in error cases and prevent
the bio submitter from noticing the error.
(See the "Completion" section below for details.)
After the cloning, the clone is mapped by target's map_rq() function
and inserted to underlying device's queue using
blk_insert_cloned_request().
Completion
==========
Request completion can be hooked by rq->end_io(), but then, all bios
in the request will have been completed even error cases, and the bio
submitter will have noticed the error.
To prevent the bio completion in error cases, request-based dm clones
both bio and request and hooks both bio->bi_end_io() and rq->end_io():
bio->bi_end_io(): end_clone_bio()
rq->end_io(): end_clone_request()
Summary of the request completion flow is below:
blk_end_request() for a clone request
=> blk_update_request()
=> bio->bi_end_io() == end_clone_bio() for each clone bio
=> Free the clone bio
=> Success: Complete the original bio (blk_update_request())
Error: Don't complete the original bio
=> blk_finish_request()
=> rq->end_io() == end_clone_request()
=> blk_complete_request()
=> dm_softirq_done()
=> Free the clone request
=> Success: Complete the original request (blk_end_request())
Error: Requeue the original request
end_clone_bio() completes the original request on the size of
the original bio in successful cases.
Even if all bios in the original request are completed by that
completion, the original request must not be completed yet to keep
the ordering of request completion for the stacking.
So end_clone_bio() uses blk_update_request() instead of
blk_end_request().
In error cases, end_clone_bio() doesn't complete the original bio.
It just frees the cloned bio and gives over the error handling to
end_clone_request().
end_clone_request(), which is called with queue lock held, completes
the clone request and the original request in a softirq context
(dm_softirq_done()), which has no queue lock, to avoid a deadlock
issue on submission of another request during the completion:
- The submitted request may be mapped to the same device
- Request submission requires queue lock, but the queue lock
has been held by itself and it doesn't know that
The clone request has no clone bio when dm_softirq_done() is called.
So target drivers can't resubmit it again even error cases.
Instead, they can ask dm core for requeueing and remapping
the original request in that cases.
suspend
=======
Request-based dm uses stopping md->queue as suspend of the md.
For noflush suspend, just stops md->queue.
For flush suspend, inserts a marker request to the tail of md->queue.
And dispatches all requests in md->queue until the marker comes to
the front of md->queue. Then, stops dispatching request and waits
for the all dispatched requests to complete.
After that, completes the marker request, stops md->queue and
wake up the waiter on the suspend queue, md->wait.
resume
======
Starts md->queue.
Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com>
Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
2009-06-22 17:12:35 +08:00
|
|
|
r = dm_table_any_congested(map, bdi_bits);
|
2016-02-03 11:35:06 +08:00
|
|
|
dm_put_live_table_fast(md);
|
2008-11-14 07:39:14 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*-----------------------------------------------------------------
|
|
|
|
* An IDR is used to keep track of allocated minor numbers.
|
|
|
|
*---------------------------------------------------------------*/
|
2006-06-26 15:27:32 +08:00
|
|
|
static void free_minor(int minor)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2006-06-26 15:27:22 +08:00
|
|
|
spin_lock(&_minor_lock);
|
2005-04-17 06:20:36 +08:00
|
|
|
idr_remove(&_minor_idr, minor);
|
2006-06-26 15:27:22 +08:00
|
|
|
spin_unlock(&_minor_lock);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* See if the device with a specific minor # is free.
|
|
|
|
*/
|
2008-04-25 05:10:59 +08:00
|
|
|
static int specific_minor(int minor)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2013-02-28 09:04:26 +08:00
|
|
|
int r;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
if (minor >= (1 << MINORBITS))
|
|
|
|
return -EINVAL;
|
|
|
|
|
2013-02-28 09:04:26 +08:00
|
|
|
idr_preload(GFP_KERNEL);
|
2006-06-26 15:27:22 +08:00
|
|
|
spin_lock(&_minor_lock);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2013-02-28 09:04:26 +08:00
|
|
|
r = idr_alloc(&_minor_idr, MINOR_ALLOCED, minor, minor + 1, GFP_NOWAIT);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2006-06-26 15:27:22 +08:00
|
|
|
spin_unlock(&_minor_lock);
|
2013-02-28 09:04:26 +08:00
|
|
|
idr_preload_end();
|
|
|
|
if (r < 0)
|
|
|
|
return r == -ENOSPC ? -EBUSY : r;
|
|
|
|
return 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2008-04-25 05:10:59 +08:00
|
|
|
static int next_free_minor(int *minor)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2013-02-28 09:04:26 +08:00
|
|
|
int r;
|
2006-06-26 15:27:21 +08:00
|
|
|
|
2013-02-28 09:04:26 +08:00
|
|
|
idr_preload(GFP_KERNEL);
|
2006-06-26 15:27:22 +08:00
|
|
|
spin_lock(&_minor_lock);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2013-02-28 09:04:26 +08:00
|
|
|
r = idr_alloc(&_minor_idr, MINOR_ALLOCED, 0, 1 << MINORBITS, GFP_NOWAIT);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2006-06-26 15:27:22 +08:00
|
|
|
spin_unlock(&_minor_lock);
|
2013-02-28 09:04:26 +08:00
|
|
|
idr_preload_end();
|
|
|
|
if (r < 0)
|
|
|
|
return r;
|
|
|
|
*minor = r;
|
|
|
|
return 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2009-09-22 08:01:13 +08:00
|
|
|
static const struct block_device_operations dm_blk_dops;
|
2017-04-13 03:35:44 +08:00
|
|
|
static const struct dax_operations dm_dax_ops;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2009-04-03 02:55:37 +08:00
|
|
|
static void dm_wq_work(struct work_struct *work);
|
|
|
|
|
2015-04-28 23:50:29 +08:00
|
|
|
static void cleanup_mapped_device(struct mapped_device *md)
|
|
|
|
{
|
|
|
|
if (md->wq)
|
|
|
|
destroy_workqueue(md->wq);
|
2018-05-21 06:25:53 +08:00
|
|
|
bioset_exit(&md->bs);
|
|
|
|
bioset_exit(&md->io_bs);
|
2015-04-28 23:50:29 +08:00
|
|
|
|
2017-04-13 03:35:44 +08:00
|
|
|
if (md->dax_dev) {
|
|
|
|
kill_dax(md->dax_dev);
|
|
|
|
put_dax(md->dax_dev);
|
|
|
|
md->dax_dev = NULL;
|
|
|
|
}
|
|
|
|
|
2015-04-28 23:50:29 +08:00
|
|
|
if (md->disk) {
|
|
|
|
spin_lock(&_minor_lock);
|
|
|
|
md->disk->private_data = NULL;
|
|
|
|
spin_unlock(&_minor_lock);
|
|
|
|
del_gendisk(md->disk);
|
|
|
|
put_disk(md->disk);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (md->queue)
|
|
|
|
blk_cleanup_queue(md->queue);
|
|
|
|
|
2016-10-10 20:35:19 +08:00
|
|
|
cleanup_srcu_struct(&md->io_barrier);
|
|
|
|
|
2015-04-28 23:50:29 +08:00
|
|
|
if (md->bdev) {
|
|
|
|
bdput(md->bdev);
|
|
|
|
md->bdev = NULL;
|
|
|
|
}
|
2016-05-13 04:28:10 +08:00
|
|
|
|
2018-01-06 10:17:20 +08:00
|
|
|
mutex_destroy(&md->suspend_lock);
|
|
|
|
mutex_destroy(&md->type_lock);
|
|
|
|
mutex_destroy(&md->table_devices_lock);
|
2024-06-11 20:26:44 +08:00
|
|
|
mutex_destroy(&md->swap_bios_lock);
|
2018-01-06 10:17:20 +08:00
|
|
|
|
2016-05-13 04:28:10 +08:00
|
|
|
dm_mq_cleanup_mapped_device(md);
|
2015-04-28 23:50:29 +08:00
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* Allocate and initialise a blank device with a given minor.
|
|
|
|
*/
|
2006-06-26 15:27:32 +08:00
|
|
|
static struct mapped_device *alloc_dev(int minor)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2016-02-23 01:16:21 +08:00
|
|
|
int r, numa_node_id = dm_get_numa_node();
|
|
|
|
struct mapped_device *md;
|
2006-06-26 15:27:21 +08:00
|
|
|
void *old_md;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2017-11-01 07:33:02 +08:00
|
|
|
md = kvzalloc_node(sizeof(*md), GFP_KERNEL, numa_node_id);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (!md) {
|
|
|
|
DMWARN("unable to allocate device, out of memory.");
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2006-06-26 15:27:25 +08:00
|
|
|
if (!try_module_get(THIS_MODULE))
|
2008-02-08 10:10:19 +08:00
|
|
|
goto bad_module_get;
|
2006-06-26 15:27:25 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/* get a minor number for the dev */
|
2006-06-26 15:27:32 +08:00
|
|
|
if (minor == DM_ANY_MINOR)
|
2008-04-25 05:10:59 +08:00
|
|
|
r = next_free_minor(&minor);
|
2006-06-26 15:27:32 +08:00
|
|
|
else
|
2008-04-25 05:10:59 +08:00
|
|
|
r = specific_minor(minor);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (r < 0)
|
2008-02-08 10:10:19 +08:00
|
|
|
goto bad_minor;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2013-07-11 06:41:18 +08:00
|
|
|
r = init_srcu_struct(&md->io_barrier);
|
|
|
|
if (r < 0)
|
|
|
|
goto bad_io_barrier;
|
|
|
|
|
2016-02-23 01:16:21 +08:00
|
|
|
md->numa_node_id = numa_node_id;
|
2016-02-01 01:05:42 +08:00
|
|
|
md->init_tio_pdu = false;
|
2010-08-12 11:14:01 +08:00
|
|
|
md->type = DM_TYPE_NONE;
|
2008-02-08 10:10:08 +08:00
|
|
|
mutex_init(&md->suspend_lock);
|
2010-08-12 11:14:01 +08:00
|
|
|
mutex_init(&md->type_lock);
|
2014-08-14 02:53:43 +08:00
|
|
|
mutex_init(&md->table_devices_lock);
|
2009-04-03 02:55:39 +08:00
|
|
|
spin_lock_init(&md->deferred_lock);
|
2005-04-17 06:20:36 +08:00
|
|
|
atomic_set(&md->holders, 1);
|
2006-06-26 15:27:34 +08:00
|
|
|
atomic_set(&md->open_count, 0);
|
2005-04-17 06:20:36 +08:00
|
|
|
atomic_set(&md->event_nr, 0);
|
2007-10-20 05:48:01 +08:00
|
|
|
atomic_set(&md->uevent_seq, 0);
|
|
|
|
INIT_LIST_HEAD(&md->uevent_list);
|
2014-08-14 02:53:43 +08:00
|
|
|
INIT_LIST_HEAD(&md->table_devices);
|
2007-10-20 05:48:01 +08:00
|
|
|
spin_lock_init(&md->uevent_lock);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2018-11-15 00:02:18 +08:00
|
|
|
md->queue = blk_alloc_queue_node(GFP_KERNEL, numa_node_id);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (!md->queue)
|
2015-04-28 23:50:29 +08:00
|
|
|
goto bad;
|
2018-01-12 22:32:21 +08:00
|
|
|
md->queue->queuedata = md;
|
2024-06-11 20:08:33 +08:00
|
|
|
/*
|
|
|
|
* default to bio-based required ->make_request_fn until DM
|
|
|
|
* table is loaded and md->type established. If request-based
|
|
|
|
* table is loaded: blk-mq will override accordingly.
|
|
|
|
*/
|
|
|
|
blk_queue_make_request(md->queue, dm_make_request);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2018-01-12 22:32:21 +08:00
|
|
|
md->disk = alloc_disk_node(1, md->numa_node_id);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (!md->disk)
|
2015-04-28 23:50:29 +08:00
|
|
|
goto bad;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2006-06-26 15:27:25 +08:00
|
|
|
init_waitqueue_head(&md->wait);
|
2009-04-03 02:55:37 +08:00
|
|
|
INIT_WORK(&md->work, dm_wq_work);
|
2006-06-26 15:27:25 +08:00
|
|
|
init_waitqueue_head(&md->eventq);
|
2014-01-14 08:37:54 +08:00
|
|
|
init_completion(&md->kobj_holder.completion);
|
2006-06-26 15:27:25 +08:00
|
|
|
|
2024-06-11 20:26:44 +08:00
|
|
|
md->swap_bios = get_swap_bios();
|
|
|
|
sema_init(&md->swap_bios_semaphore, md->swap_bios);
|
|
|
|
mutex_init(&md->swap_bios_lock);
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
md->disk->major = _major;
|
|
|
|
md->disk->first_minor = minor;
|
|
|
|
md->disk->fops = &dm_blk_dops;
|
|
|
|
md->disk->queue = md->queue;
|
|
|
|
md->disk->private_data = md;
|
|
|
|
sprintf(md->disk->disk_name, "dm-%d", minor);
|
2017-04-13 03:35:44 +08:00
|
|
|
|
2018-03-30 08:22:13 +08:00
|
|
|
if (IS_ENABLED(CONFIG_DAX_DRIVER)) {
|
2019-07-05 22:03:24 +08:00
|
|
|
md->dax_dev = alloc_dax(md, md->disk->disk_name,
|
|
|
|
&dm_dax_ops, 0);
|
2019-04-18 21:59:19 +08:00
|
|
|
if (!md->dax_dev)
|
2018-03-30 08:22:13 +08:00
|
|
|
goto bad;
|
|
|
|
}
|
2017-04-13 03:35:44 +08:00
|
|
|
|
2018-01-09 09:03:04 +08:00
|
|
|
add_disk_no_queue_reg(md->disk);
|
2006-03-27 17:17:52 +08:00
|
|
|
format_dev_t(md->name, MKDEV(_major, minor));
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2013-07-30 20:40:21 +08:00
|
|
|
md->wq = alloc_workqueue("kdmflush", WQ_MEM_RECLAIM, 0);
|
2008-02-08 10:11:17 +08:00
|
|
|
if (!md->wq)
|
2015-04-28 23:50:29 +08:00
|
|
|
goto bad;
|
2008-02-08 10:11:17 +08:00
|
|
|
|
2009-06-22 17:12:17 +08:00
|
|
|
md->bdev = bdget_disk(md->disk, 0);
|
|
|
|
if (!md->bdev)
|
2015-04-28 23:50:29 +08:00
|
|
|
goto bad;
|
2009-06-22 17:12:17 +08:00
|
|
|
|
2024-06-12 13:13:20 +08:00
|
|
|
r = dm_stats_init(&md->stats);
|
|
|
|
if (r < 0)
|
|
|
|
goto bad;
|
2013-08-16 22:54:23 +08:00
|
|
|
|
2006-06-26 15:27:21 +08:00
|
|
|
/* Populate the mapping, nobody knows we exist yet */
|
2006-06-26 15:27:22 +08:00
|
|
|
spin_lock(&_minor_lock);
|
2006-06-26 15:27:21 +08:00
|
|
|
old_md = idr_replace(&_minor_idr, md, minor);
|
2006-06-26 15:27:22 +08:00
|
|
|
spin_unlock(&_minor_lock);
|
2006-06-26 15:27:21 +08:00
|
|
|
|
|
|
|
BUG_ON(old_md != MINOR_ALLOCED);
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
return md;
|
|
|
|
|
2015-04-28 23:50:29 +08:00
|
|
|
bad:
|
|
|
|
cleanup_mapped_device(md);
|
2013-07-11 06:41:18 +08:00
|
|
|
bad_io_barrier:
|
2005-04-17 06:20:36 +08:00
|
|
|
free_minor(minor);
|
2008-02-08 10:10:19 +08:00
|
|
|
bad_minor:
|
2006-06-26 15:27:25 +08:00
|
|
|
module_put(THIS_MODULE);
|
2008-02-08 10:10:19 +08:00
|
|
|
bad_module_get:
|
2017-11-01 07:33:02 +08:00
|
|
|
kvfree(md);
|
2005-04-17 06:20:36 +08:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2007-10-20 05:38:43 +08:00
|
|
|
static void unlock_fs(struct mapped_device *md);
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
static void free_dev(struct mapped_device *md)
|
|
|
|
{
|
2008-09-03 15:01:48 +08:00
|
|
|
int minor = MINOR(disk_devt(md->disk));
|
2006-02-25 05:04:25 +08:00
|
|
|
|
2009-06-22 17:12:17 +08:00
|
|
|
unlock_fs(md);
|
2014-10-18 07:46:36 +08:00
|
|
|
|
2015-04-28 23:50:29 +08:00
|
|
|
cleanup_mapped_device(md);
|
2015-03-24 05:01:43 +08:00
|
|
|
|
2014-08-14 02:53:43 +08:00
|
|
|
free_table_devices(&md->table_devices);
|
2015-03-24 05:01:43 +08:00
|
|
|
dm_stats_cleanup(&md->stats);
|
|
|
|
free_minor(minor);
|
|
|
|
|
2006-06-26 15:27:25 +08:00
|
|
|
module_put(THIS_MODULE);
|
2017-11-01 07:33:02 +08:00
|
|
|
kvfree(md);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2018-06-08 04:42:06 +08:00
|
|
|
static int __bind_mempools(struct mapped_device *md, struct dm_table *t)
|
dm: enable request based option
This patch enables request-based dm.
o Request-based dm and bio-based dm coexist, since there are
some target drivers which are more fitting to bio-based dm.
Also, there are other bio-based devices in the kernel
(e.g. md, loop).
Since bio-based device can't receive struct request,
there are some limitations on device stacking between
bio-based and request-based.
type of underlying device
bio-based request-based
----------------------------------------------
bio-based OK OK
request-based -- OK
The device type is recognized by the queue flag in the kernel,
so dm follows that.
o The type of a dm device is decided at the first table binding time.
Once the type of a dm device is decided, the type can't be changed.
o Mempool allocations are deferred to at the table loading time, since
mempools for request-based dm are different from those for bio-based
dm and needed mempool type is fixed by the type of table.
o Currently, request-based dm supports only tables that have a single
target. To support multiple targets, we need to support request
splitting or prevent bio/request from spanning multiple targets.
The former needs lots of changes in the block layer, and the latter
needs that all target drivers support merge() function.
Both will take a time.
Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com>
Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
2009-06-22 17:12:36 +08:00
|
|
|
{
|
2012-12-22 04:23:38 +08:00
|
|
|
struct dm_md_mempools *p = dm_table_get_md_mempools(t);
|
2018-06-08 04:42:06 +08:00
|
|
|
int ret = 0;
|
dm: enable request based option
This patch enables request-based dm.
o Request-based dm and bio-based dm coexist, since there are
some target drivers which are more fitting to bio-based dm.
Also, there are other bio-based devices in the kernel
(e.g. md, loop).
Since bio-based device can't receive struct request,
there are some limitations on device stacking between
bio-based and request-based.
type of underlying device
bio-based request-based
----------------------------------------------
bio-based OK OK
request-based -- OK
The device type is recognized by the queue flag in the kernel,
so dm follows that.
o The type of a dm device is decided at the first table binding time.
Once the type of a dm device is decided, the type can't be changed.
o Mempool allocations are deferred to at the table loading time, since
mempools for request-based dm are different from those for bio-based
dm and needed mempool type is fixed by the type of table.
o Currently, request-based dm supports only tables that have a single
target. To support multiple targets, we need to support request
splitting or prevent bio/request from spanning multiple targets.
The former needs lots of changes in the block layer, and the latter
needs that all target drivers support merge() function.
Both will take a time.
Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com>
Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
2009-06-22 17:12:36 +08:00
|
|
|
|
2017-12-09 03:40:52 +08:00
|
|
|
if (dm_table_bio_based(t)) {
|
2017-12-12 12:17:47 +08:00
|
|
|
/*
|
|
|
|
* The md may already have mempools that need changing.
|
|
|
|
* If so, reload bioset because front_pad may have changed
|
|
|
|
* because a different table was loaded.
|
|
|
|
*/
|
2018-05-21 06:25:53 +08:00
|
|
|
bioset_exit(&md->bs);
|
|
|
|
bioset_exit(&md->io_bs);
|
2017-12-09 03:40:52 +08:00
|
|
|
|
2018-05-21 06:25:53 +08:00
|
|
|
} else if (bioset_initialized(&md->bs)) {
|
2015-06-26 21:42:57 +08:00
|
|
|
/*
|
|
|
|
* There's no need to reload with request-based dm
|
|
|
|
* because the size of front_pad doesn't change.
|
|
|
|
* Note for future: If you are to reload bioset,
|
|
|
|
* prep-ed requests in the queue may refer
|
|
|
|
* to bio from the old bioset, so you must walk
|
|
|
|
* through the queue to unprep.
|
|
|
|
*/
|
|
|
|
goto out;
|
2012-12-22 04:23:38 +08:00
|
|
|
}
|
dm: enable request based option
This patch enables request-based dm.
o Request-based dm and bio-based dm coexist, since there are
some target drivers which are more fitting to bio-based dm.
Also, there are other bio-based devices in the kernel
(e.g. md, loop).
Since bio-based device can't receive struct request,
there are some limitations on device stacking between
bio-based and request-based.
type of underlying device
bio-based request-based
----------------------------------------------
bio-based OK OK
request-based -- OK
The device type is recognized by the queue flag in the kernel,
so dm follows that.
o The type of a dm device is decided at the first table binding time.
Once the type of a dm device is decided, the type can't be changed.
o Mempool allocations are deferred to at the table loading time, since
mempools for request-based dm are different from those for bio-based
dm and needed mempool type is fixed by the type of table.
o Currently, request-based dm supports only tables that have a single
target. To support multiple targets, we need to support request
splitting or prevent bio/request from spanning multiple targets.
The former needs lots of changes in the block layer, and the latter
needs that all target drivers support merge() function.
Both will take a time.
Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com>
Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
2009-06-22 17:12:36 +08:00
|
|
|
|
2018-05-21 06:25:53 +08:00
|
|
|
BUG_ON(!p ||
|
|
|
|
bioset_initialized(&md->bs) ||
|
|
|
|
bioset_initialized(&md->io_bs));
|
2015-04-28 04:37:50 +08:00
|
|
|
|
2018-06-08 04:42:06 +08:00
|
|
|
ret = bioset_init_from_src(&md->bs, &p->bs);
|
|
|
|
if (ret)
|
|
|
|
goto out;
|
|
|
|
ret = bioset_init_from_src(&md->io_bs, &p->io_bs);
|
|
|
|
if (ret)
|
|
|
|
bioset_exit(&md->bs);
|
dm: enable request based option
This patch enables request-based dm.
o Request-based dm and bio-based dm coexist, since there are
some target drivers which are more fitting to bio-based dm.
Also, there are other bio-based devices in the kernel
(e.g. md, loop).
Since bio-based device can't receive struct request,
there are some limitations on device stacking between
bio-based and request-based.
type of underlying device
bio-based request-based
----------------------------------------------
bio-based OK OK
request-based -- OK
The device type is recognized by the queue flag in the kernel,
so dm follows that.
o The type of a dm device is decided at the first table binding time.
Once the type of a dm device is decided, the type can't be changed.
o Mempool allocations are deferred to at the table loading time, since
mempools for request-based dm are different from those for bio-based
dm and needed mempool type is fixed by the type of table.
o Currently, request-based dm supports only tables that have a single
target. To support multiple targets, we need to support request
splitting or prevent bio/request from spanning multiple targets.
The former needs lots of changes in the block layer, and the latter
needs that all target drivers support merge() function.
Both will take a time.
Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com>
Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
2009-06-22 17:12:36 +08:00
|
|
|
out:
|
2015-03-11 11:49:26 +08:00
|
|
|
/* mempool bind completed, no longer need any mempools in the table */
|
dm: enable request based option
This patch enables request-based dm.
o Request-based dm and bio-based dm coexist, since there are
some target drivers which are more fitting to bio-based dm.
Also, there are other bio-based devices in the kernel
(e.g. md, loop).
Since bio-based device can't receive struct request,
there are some limitations on device stacking between
bio-based and request-based.
type of underlying device
bio-based request-based
----------------------------------------------
bio-based OK OK
request-based -- OK
The device type is recognized by the queue flag in the kernel,
so dm follows that.
o The type of a dm device is decided at the first table binding time.
Once the type of a dm device is decided, the type can't be changed.
o Mempool allocations are deferred to at the table loading time, since
mempools for request-based dm are different from those for bio-based
dm and needed mempool type is fixed by the type of table.
o Currently, request-based dm supports only tables that have a single
target. To support multiple targets, we need to support request
splitting or prevent bio/request from spanning multiple targets.
The former needs lots of changes in the block layer, and the latter
needs that all target drivers support merge() function.
Both will take a time.
Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com>
Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
2009-06-22 17:12:36 +08:00
|
|
|
dm_table_free_md_mempools(t);
|
2018-06-08 04:42:06 +08:00
|
|
|
return ret;
|
dm: enable request based option
This patch enables request-based dm.
o Request-based dm and bio-based dm coexist, since there are
some target drivers which are more fitting to bio-based dm.
Also, there are other bio-based devices in the kernel
(e.g. md, loop).
Since bio-based device can't receive struct request,
there are some limitations on device stacking between
bio-based and request-based.
type of underlying device
bio-based request-based
----------------------------------------------
bio-based OK OK
request-based -- OK
The device type is recognized by the queue flag in the kernel,
so dm follows that.
o The type of a dm device is decided at the first table binding time.
Once the type of a dm device is decided, the type can't be changed.
o Mempool allocations are deferred to at the table loading time, since
mempools for request-based dm are different from those for bio-based
dm and needed mempool type is fixed by the type of table.
o Currently, request-based dm supports only tables that have a single
target. To support multiple targets, we need to support request
splitting or prevent bio/request from spanning multiple targets.
The former needs lots of changes in the block layer, and the latter
needs that all target drivers support merge() function.
Both will take a time.
Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com>
Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
2009-06-22 17:12:36 +08:00
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* Bind a table to the device.
|
|
|
|
*/
|
|
|
|
static void event_callback(void *context)
|
|
|
|
{
|
2007-10-20 05:48:01 +08:00
|
|
|
unsigned long flags;
|
|
|
|
LIST_HEAD(uevents);
|
2005-04-17 06:20:36 +08:00
|
|
|
struct mapped_device *md = (struct mapped_device *) context;
|
|
|
|
|
2007-10-20 05:48:01 +08:00
|
|
|
spin_lock_irqsave(&md->uevent_lock, flags);
|
|
|
|
list_splice_init(&md->uevent_list, &uevents);
|
|
|
|
spin_unlock_irqrestore(&md->uevent_lock, flags);
|
|
|
|
|
2008-08-25 18:56:05 +08:00
|
|
|
dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
|
2007-10-20 05:48:01 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
atomic_inc(&md->event_nr);
|
|
|
|
wake_up(&md->eventq);
|
2017-09-20 19:29:49 +08:00
|
|
|
dm_issue_global_event();
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2011-01-14 03:53:46 +08:00
|
|
|
/*
|
|
|
|
* Protected by md->suspend_lock obtained by dm_swap_table().
|
|
|
|
*/
|
2005-07-29 12:15:59 +08:00
|
|
|
static void __set_size(struct mapped_device *md, sector_t size)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2017-04-28 01:11:21 +08:00
|
|
|
lockdep_assert_held(&md->suspend_lock);
|
|
|
|
|
2005-07-29 12:15:59 +08:00
|
|
|
set_capacity(md->disk, size);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2009-06-22 17:12:15 +08:00
|
|
|
i_size_write(md->bdev->bd_inode, (loff_t)size << SECTOR_SHIFT);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2009-12-11 07:52:24 +08:00
|
|
|
/*
|
|
|
|
* Returns old map, which caller must destroy.
|
|
|
|
*/
|
|
|
|
static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
|
|
|
|
struct queue_limits *limits)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2009-12-11 07:52:24 +08:00
|
|
|
struct dm_table *old_map;
|
2007-07-24 15:28:11 +08:00
|
|
|
struct request_queue *q = md->queue;
|
2017-12-10 04:16:42 +08:00
|
|
|
bool request_based = dm_table_request_based(t);
|
2005-04-17 06:20:36 +08:00
|
|
|
sector_t size;
|
2018-06-08 04:42:06 +08:00
|
|
|
int ret;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2016-09-01 06:17:04 +08:00
|
|
|
lockdep_assert_held(&md->suspend_lock);
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
size = dm_table_get_size(t);
|
2006-03-27 17:17:54 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Wipe any geometry if the size of the table changed.
|
|
|
|
*/
|
2013-08-16 22:54:23 +08:00
|
|
|
if (size != dm_get_size(md))
|
2006-03-27 17:17:54 +08:00
|
|
|
memset(&md->geometry, 0, sizeof(md->geometry));
|
|
|
|
|
2009-06-22 17:12:17 +08:00
|
|
|
__set_size(md, size);
|
dm table: rework reference counting
Rework table reference counting.
The existing code uses a reference counter. When the last reference is
dropped and the counter reaches zero, the table destructor is called.
Table reference counters are acquired/released from upcalls from other
kernel code (dm_any_congested, dm_merge_bvec, dm_unplug_all).
If the reference counter reaches zero in one of the upcalls, the table
destructor is called from almost random kernel code.
This leads to various problems:
* dm_any_congested being called under a spinlock, which calls the
destructor, which calls some sleeping function.
* the destructor attempting to take a lock that is already taken by the
same process.
* stale reference from some other kernel code keeps the table
constructed, which keeps some devices open, even after successful
return from "dmsetup remove". This can confuse lvm and prevent closing
of underlying devices or reusing device minor numbers.
The patch changes reference counting so that the table destructor can be
called only at predetermined places.
The table has always exactly one reference from either mapped_device->map
or hash_cell->new_map. After this patch, this reference is not counted
in table->holders. A pair of dm_create_table/dm_destroy_table functions
is used for table creation/destruction.
Temporary references from the other code increase table->holders. A pair
of dm_table_get/dm_table_put functions is used to manipulate it.
When the table is about to be destroyed, we wait for table->holders to
reach 0. Then, we call the table destructor. We use active waiting with
msleep(1), because the situation happens rarely (to one user in 5 years)
and removing the device isn't performance-critical task: the user doesn't
care if it takes one tick more or not.
This way, the destructor is called only at specific points
(dm_table_destroy function) and the above problems associated with lazy
destruction can't happen.
Finally remove the temporary protection added to dm_any_congested().
Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
2009-01-06 11:05:10 +08:00
|
|
|
|
2005-07-29 12:16:00 +08:00
|
|
|
dm_table_event_callback(t, event_callback, md);
|
|
|
|
|
dm: enable request based option
This patch enables request-based dm.
o Request-based dm and bio-based dm coexist, since there are
some target drivers which are more fitting to bio-based dm.
Also, there are other bio-based devices in the kernel
(e.g. md, loop).
Since bio-based device can't receive struct request,
there are some limitations on device stacking between
bio-based and request-based.
type of underlying device
bio-based request-based
----------------------------------------------
bio-based OK OK
request-based -- OK
The device type is recognized by the queue flag in the kernel,
so dm follows that.
o The type of a dm device is decided at the first table binding time.
Once the type of a dm device is decided, the type can't be changed.
o Mempool allocations are deferred to at the table loading time, since
mempools for request-based dm are different from those for bio-based
dm and needed mempool type is fixed by the type of table.
o Currently, request-based dm supports only tables that have a single
target. To support multiple targets, we need to support request
splitting or prevent bio/request from spanning multiple targets.
The former needs lots of changes in the block layer, and the latter
needs that all target drivers support merge() function.
Both will take a time.
Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com>
Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
2009-06-22 17:12:36 +08:00
|
|
|
/*
|
|
|
|
* The queue hasn't been stopped yet, if the old table type wasn't
|
|
|
|
* for request-based during suspension. So stop it to prevent
|
|
|
|
* I/O mapping before resume.
|
|
|
|
* This must be done before setting the queue restrictions,
|
|
|
|
* because request-based dm may be run just after the setting.
|
|
|
|
*/
|
2017-12-10 04:16:42 +08:00
|
|
|
if (request_based)
|
2016-02-21 02:45:38 +08:00
|
|
|
dm_stop_queue(q);
|
2017-12-10 04:16:42 +08:00
|
|
|
|
2024-06-12 13:13:20 +08:00
|
|
|
if (request_based) {
|
2016-02-01 06:22:27 +08:00
|
|
|
/*
|
2024-06-12 13:13:20 +08:00
|
|
|
* Leverage the fact that request-based DM targets are
|
|
|
|
* immutable singletons - used to optimize dm_mq_queue_rq.
|
2016-02-01 06:22:27 +08:00
|
|
|
*/
|
|
|
|
md->immutable_target = dm_table_get_immutable_target(t);
|
|
|
|
}
|
dm: enable request based option
This patch enables request-based dm.
o Request-based dm and bio-based dm coexist, since there are
some target drivers which are more fitting to bio-based dm.
Also, there are other bio-based devices in the kernel
(e.g. md, loop).
Since bio-based device can't receive struct request,
there are some limitations on device stacking between
bio-based and request-based.
type of underlying device
bio-based request-based
----------------------------------------------
bio-based OK OK
request-based -- OK
The device type is recognized by the queue flag in the kernel,
so dm follows that.
o The type of a dm device is decided at the first table binding time.
Once the type of a dm device is decided, the type can't be changed.
o Mempool allocations are deferred to at the table loading time, since
mempools for request-based dm are different from those for bio-based
dm and needed mempool type is fixed by the type of table.
o Currently, request-based dm supports only tables that have a single
target. To support multiple targets, we need to support request
splitting or prevent bio/request from spanning multiple targets.
The former needs lots of changes in the block layer, and the latter
needs that all target drivers support merge() function.
Both will take a time.
Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com>
Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
2009-06-22 17:12:36 +08:00
|
|
|
|
2018-06-08 04:42:06 +08:00
|
|
|
ret = __bind_mempools(md, t);
|
|
|
|
if (ret) {
|
|
|
|
old_map = ERR_PTR(ret);
|
|
|
|
goto out;
|
|
|
|
}
|
dm: enable request based option
This patch enables request-based dm.
o Request-based dm and bio-based dm coexist, since there are
some target drivers which are more fitting to bio-based dm.
Also, there are other bio-based devices in the kernel
(e.g. md, loop).
Since bio-based device can't receive struct request,
there are some limitations on device stacking between
bio-based and request-based.
type of underlying device
bio-based request-based
----------------------------------------------
bio-based OK OK
request-based -- OK
The device type is recognized by the queue flag in the kernel,
so dm follows that.
o The type of a dm device is decided at the first table binding time.
Once the type of a dm device is decided, the type can't be changed.
o Mempool allocations are deferred to at the table loading time, since
mempools for request-based dm are different from those for bio-based
dm and needed mempool type is fixed by the type of table.
o Currently, request-based dm supports only tables that have a single
target. To support multiple targets, we need to support request
splitting or prevent bio/request from spanning multiple targets.
The former needs lots of changes in the block layer, and the latter
needs that all target drivers support merge() function.
Both will take a time.
Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com>
Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
2009-06-22 17:12:36 +08:00
|
|
|
|
2014-11-24 01:34:29 +08:00
|
|
|
old_map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
|
2016-02-23 03:14:24 +08:00
|
|
|
rcu_assign_pointer(md->map, (void *)t);
|
2011-11-01 04:19:04 +08:00
|
|
|
md->immutable_target_type = dm_table_get_immutable_target_type(t);
|
|
|
|
|
2009-06-22 17:12:34 +08:00
|
|
|
dm_table_set_restrictions(t, q, limits);
|
2014-11-05 21:35:50 +08:00
|
|
|
if (old_map)
|
|
|
|
dm_sync_table(md);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2018-06-08 04:42:06 +08:00
|
|
|
out:
|
2009-12-11 07:52:24 +08:00
|
|
|
return old_map;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2009-12-11 07:52:23 +08:00
|
|
|
/*
|
|
|
|
* Returns unbound table for the caller to free.
|
|
|
|
*/
|
|
|
|
static struct dm_table *__unbind(struct mapped_device *md)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2014-11-24 01:34:29 +08:00
|
|
|
struct dm_table *map = rcu_dereference_protected(md->map, 1);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
if (!map)
|
2009-12-11 07:52:23 +08:00
|
|
|
return NULL;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
dm_table_event_callback(map, NULL, NULL);
|
2014-03-24 02:28:27 +08:00
|
|
|
RCU_INIT_POINTER(md->map, NULL);
|
2013-07-11 06:41:18 +08:00
|
|
|
dm_sync_table(md);
|
2009-12-11 07:52:23 +08:00
|
|
|
|
|
|
|
return map;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Constructor for a new device.
|
|
|
|
*/
|
2006-06-26 15:27:32 +08:00
|
|
|
int dm_create(int minor, struct mapped_device **result)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2018-01-12 22:32:21 +08:00
|
|
|
int r;
|
2005-04-17 06:20:36 +08:00
|
|
|
struct mapped_device *md;
|
|
|
|
|
2006-06-26 15:27:32 +08:00
|
|
|
md = alloc_dev(minor);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (!md)
|
|
|
|
return -ENXIO;
|
|
|
|
|
2018-01-12 22:32:21 +08:00
|
|
|
r = dm_sysfs_init(md);
|
|
|
|
if (r) {
|
|
|
|
free_dev(md);
|
|
|
|
return r;
|
|
|
|
}
|
2009-01-06 11:05:12 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
*result = md;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2010-08-12 11:14:01 +08:00
|
|
|
/*
|
|
|
|
* Functions to manage md->type.
|
|
|
|
* All are required to hold md->type_lock.
|
|
|
|
*/
|
|
|
|
void dm_lock_md_type(struct mapped_device *md)
|
|
|
|
{
|
|
|
|
mutex_lock(&md->type_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
void dm_unlock_md_type(struct mapped_device *md)
|
|
|
|
{
|
|
|
|
mutex_unlock(&md->type_lock);
|
|
|
|
}
|
|
|
|
|
2017-04-28 01:11:23 +08:00
|
|
|
void dm_set_md_type(struct mapped_device *md, enum dm_queue_mode type)
|
2010-08-12 11:14:01 +08:00
|
|
|
{
|
2013-08-28 06:57:03 +08:00
|
|
|
BUG_ON(!mutex_is_locked(&md->type_lock));
|
2010-08-12 11:14:01 +08:00
|
|
|
md->type = type;
|
|
|
|
}
|
|
|
|
|
2017-04-28 01:11:23 +08:00
|
|
|
enum dm_queue_mode dm_get_md_type(struct mapped_device *md)
|
2010-08-12 11:14:01 +08:00
|
|
|
{
|
|
|
|
return md->type;
|
|
|
|
}
|
|
|
|
|
2011-11-01 04:19:04 +08:00
|
|
|
struct target_type *dm_get_immutable_target_type(struct mapped_device *md)
|
|
|
|
{
|
|
|
|
return md->immutable_target_type;
|
|
|
|
}
|
|
|
|
|
dm mpath: disable WRITE SAME if it fails
Workaround the SCSI layer's problematic WRITE SAME heuristics by
disabling WRITE SAME in the DM multipath device's queue_limits if an
underlying device disabled it.
The WRITE SAME heuristics, with both the original commit 5db44863b6eb
("[SCSI] sd: Implement support for WRITE SAME") and the updated commit
66c28f971 ("[SCSI] sd: Update WRITE SAME heuristics"), default to enabling
WRITE SAME(10) even without successfully determining it is supported.
After the first failed WRITE SAME the SCSI layer will disable WRITE SAME
for the device (by setting sdkp->device->no_write_same which results in
'max_write_same_sectors' in device's queue_limits to be set to 0).
When a device is stacked ontop of such a SCSI device any changes to that
SCSI device's queue_limits do not automatically propagate up the stack.
As such, a DM multipath device will not have its WRITE SAME support
disabled. This causes the block layer to continue to issue WRITE SAME
requests to the mpath device which causes paths to fail and (if mpath IO
isn't configured to queue when no paths are available) it will result in
actual IO errors to the upper layers.
This fix doesn't help configurations that have additional devices
stacked ontop of the mpath device (e.g. LVM created linear DM devices
ontop). A proper fix that restacks all the queue_limits from the bottom
of the device stack up will need to be explored if SCSI will continue to
use this model of optimistically allowing op codes and then disabling
them after they fail for the first time.
Before this patch:
EXT4-fs (dm-6): mounted filesystem with ordered data mode. Opts: (null)
device-mapper: multipath: XXX snitm debugging: got -EREMOTEIO (-121)
device-mapper: multipath: XXX snitm debugging: failing WRITE SAME IO with error=-121
end_request: critical target error, dev dm-6, sector 528
dm-6: WRITE SAME failed. Manually zeroing.
device-mapper: multipath: Failing path 8:112.
end_request: I/O error, dev dm-6, sector 4616
dm-6: WRITE SAME failed. Manually zeroing.
end_request: I/O error, dev dm-6, sector 4616
end_request: I/O error, dev dm-6, sector 5640
end_request: I/O error, dev dm-6, sector 6664
end_request: I/O error, dev dm-6, sector 7688
end_request: I/O error, dev dm-6, sector 524288
Buffer I/O error on device dm-6, logical block 65536
lost page write due to I/O error on dm-6
JBD2: Error -5 detected when updating journal superblock for dm-6-8.
end_request: I/O error, dev dm-6, sector 524296
Aborting journal on device dm-6-8.
end_request: I/O error, dev dm-6, sector 524288
Buffer I/O error on device dm-6, logical block 65536
lost page write due to I/O error on dm-6
JBD2: Error -5 detected when updating journal superblock for dm-6-8.
# cat /sys/block/sdh/queue/write_same_max_bytes
0
# cat /sys/block/dm-6/queue/write_same_max_bytes
33553920
After this patch:
EXT4-fs (dm-6): mounted filesystem with ordered data mode. Opts: (null)
device-mapper: multipath: XXX snitm debugging: got -EREMOTEIO (-121)
device-mapper: multipath: XXX snitm debugging: WRITE SAME I/O failed with error=-121
end_request: critical target error, dev dm-6, sector 528
dm-6: WRITE SAME failed. Manually zeroing.
# cat /sys/block/sdh/queue/write_same_max_bytes
0
# cat /sys/block/dm-6/queue/write_same_max_bytes
0
It should be noted that WRITE SAME support wasn't enabled in DM
multipath until v3.10.
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Cc: Martin K. Petersen <martin.petersen@oracle.com>
Cc: Hannes Reinecke <hare@suse.de>
Cc: stable@vger.kernel.org # 3.10+
2013-09-20 00:13:58 +08:00
|
|
|
/*
|
|
|
|
* The queue_limits are only valid as long as you have a reference
|
|
|
|
* count on 'md'.
|
|
|
|
*/
|
|
|
|
struct queue_limits *dm_get_queue_limits(struct mapped_device *md)
|
|
|
|
{
|
|
|
|
BUG_ON(!atomic_read(&md->holders));
|
|
|
|
return &md->queue->limits;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(dm_get_queue_limits);
|
|
|
|
|
2024-06-11 20:08:33 +08:00
|
|
|
static void dm_init_congested_fn(struct mapped_device *md)
|
|
|
|
{
|
|
|
|
md->queue->backing_dev_info->congested_data = md;
|
|
|
|
md->queue->backing_dev_info->congested_fn = dm_any_congested;
|
|
|
|
}
|
|
|
|
|
2010-08-12 11:14:02 +08:00
|
|
|
/*
|
|
|
|
* Setup the DM device's queue based on md's type
|
|
|
|
*/
|
2016-02-01 01:05:42 +08:00
|
|
|
int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
|
2010-08-12 11:14:02 +08:00
|
|
|
{
|
2015-03-08 13:51:47 +08:00
|
|
|
int r;
|
2018-01-09 09:03:04 +08:00
|
|
|
struct queue_limits limits;
|
2017-04-28 01:11:23 +08:00
|
|
|
enum dm_queue_mode type = dm_get_md_type(md);
|
2015-03-08 13:51:47 +08:00
|
|
|
|
2016-06-23 07:54:53 +08:00
|
|
|
switch (type) {
|
2015-03-08 13:51:47 +08:00
|
|
|
case DM_TYPE_REQUEST_BASED:
|
2016-05-25 09:16:51 +08:00
|
|
|
r = dm_mq_init_request_queue(md, t);
|
2015-03-08 13:51:47 +08:00
|
|
|
if (r) {
|
2016-02-21 02:45:38 +08:00
|
|
|
DMERR("Cannot initialize queue for request-based dm-mq mapped device");
|
2015-03-08 13:51:47 +08:00
|
|
|
return r;
|
|
|
|
}
|
2024-06-11 20:08:33 +08:00
|
|
|
dm_init_congested_fn(md);
|
2015-03-08 13:51:47 +08:00
|
|
|
break;
|
|
|
|
case DM_TYPE_BIO_BASED:
|
2016-06-23 07:54:53 +08:00
|
|
|
case DM_TYPE_DAX_BIO_BASED:
|
2024-06-11 20:08:33 +08:00
|
|
|
dm_init_congested_fn(md);
|
2015-03-08 13:51:47 +08:00
|
|
|
break;
|
2017-04-28 01:11:23 +08:00
|
|
|
case DM_TYPE_NONE:
|
|
|
|
WARN_ON_ONCE(true);
|
|
|
|
break;
|
2010-08-12 11:14:02 +08:00
|
|
|
}
|
|
|
|
|
2018-01-09 09:03:04 +08:00
|
|
|
r = dm_calculate_queue_limits(t, &limits);
|
|
|
|
if (r) {
|
|
|
|
DMERR("Cannot calculate initial queue limits");
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
dm_table_set_restrictions(t, md->queue, &limits);
|
|
|
|
blk_register_queue(md->disk);
|
|
|
|
|
2010-08-12 11:14:02 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
dm: fix a race condition in dm_get_md
The function dm_get_md finds a device mapper device with a given dev_t,
increases the reference count and returns the pointer.
dm_get_md calls dm_find_md, dm_find_md takes _minor_lock, finds the
device, tests that the device doesn't have DMF_DELETING or DMF_FREEING
flag, drops _minor_lock and returns pointer to the device. dm_get_md then
calls dm_get. dm_get calls BUG if the device has the DMF_FREEING flag,
otherwise it increments the reference count.
There is a possible race condition - after dm_find_md exits and before
dm_get is called, there are no locks held, so the device may disappear or
DMF_FREEING flag may be set, which results in BUG.
To fix this bug, we need to call dm_get while we hold _minor_lock. This
patch renames dm_find_md to dm_get_md and changes it so that it calls
dm_get while holding the lock.
Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Cc: stable@vger.kernel.org
2015-02-18 03:30:53 +08:00
|
|
|
struct mapped_device *dm_get_md(dev_t dev)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
struct mapped_device *md;
|
|
|
|
unsigned minor = MINOR(dev);
|
|
|
|
|
|
|
|
if (MAJOR(dev) != _major || minor >= (1 << MINORBITS))
|
|
|
|
return NULL;
|
|
|
|
|
2006-06-26 15:27:22 +08:00
|
|
|
spin_lock(&_minor_lock);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
md = idr_find(&_minor_idr, minor);
|
2017-11-07 05:40:10 +08:00
|
|
|
if (!md || md == MINOR_ALLOCED || (MINOR(disk_devt(dm_disk(md))) != minor) ||
|
|
|
|
test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) {
|
|
|
|
md = NULL;
|
|
|
|
goto out;
|
2006-06-26 15:27:23 +08:00
|
|
|
}
|
2017-11-07 05:40:10 +08:00
|
|
|
dm_get(md);
|
2006-06-26 15:27:23 +08:00
|
|
|
out:
|
2006-06-26 15:27:22 +08:00
|
|
|
spin_unlock(&_minor_lock);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2006-01-06 16:20:00 +08:00
|
|
|
return md;
|
|
|
|
}
|
2011-11-01 04:19:06 +08:00
|
|
|
EXPORT_SYMBOL_GPL(dm_get_md);
|
2006-01-06 16:20:01 +08:00
|
|
|
|
2006-03-27 17:17:53 +08:00
|
|
|
void *dm_get_mdptr(struct mapped_device *md)
|
2006-01-06 16:20:00 +08:00
|
|
|
{
|
2006-03-27 17:17:53 +08:00
|
|
|
return md->interface_ptr;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void dm_set_mdptr(struct mapped_device *md, void *ptr)
|
|
|
|
{
|
|
|
|
md->interface_ptr = ptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
void dm_get(struct mapped_device *md)
|
|
|
|
{
|
|
|
|
atomic_inc(&md->holders);
|
dm: separate device deletion from dm_put
This patch separates the device deletion code from dm_put()
to make sure the deletion happens in the process context.
By this patch, device deletion always occurs in an ioctl (process)
context and dm_put() can be called in interrupt context.
As a result, the request-based dm's bad dm_put() usage pointed out
by Mikulas below disappears.
http://marc.info/?l=dm-devel&m=126699981019735&w=2
Without this patch, I confirmed there is a case to crash the system:
dm_put() => dm_table_destroy() => vfree() => BUG_ON(in_interrupt())
Some more backgrounds and details:
In request-based dm, a device opener can remove a mapped_device
while the last request is still completing, because bios in the last
request complete first and then the device opener can close and remove
the mapped_device before the last request completes:
CPU0 CPU1
=================================================================
<<INTERRUPT>>
blk_end_request_all(clone_rq)
blk_update_request(clone_rq)
bio_endio(clone_bio) == end_clone_bio
blk_update_request(orig_rq)
bio_endio(orig_bio)
<<I/O completed>>
dm_blk_close()
dev_remove()
dm_put(md)
<<Free md>>
blk_finish_request(clone_rq)
....
dm_end_request(clone_rq)
free_rq_clone(clone_rq)
blk_end_request_all(orig_rq)
rq_completed(md)
So request-based dm used dm_get()/dm_put() to hold md for each I/O
until its request completion handling is fully done.
However, the final dm_put() can call the device deletion code which
must not be run in interrupt context and may cause kernel panic.
To solve the problem, this patch moves the device deletion code,
dm_destroy(), to predetermined places that is actually deleting
the mapped_device in ioctl (process) context, and changes dm_put()
just to decrement the reference count of the mapped_device.
By this change, dm_put() can be used in any context and the symmetric
model below is introduced:
dm_create(): create a mapped_device
dm_destroy(): destroy a mapped_device
dm_get(): increment the reference count of a mapped_device
dm_put(): decrement the reference count of a mapped_device
dm_destroy() waits for all references of the mapped_device to disappear,
then deletes the mapped_device.
dm_destroy() uses active waiting with msleep(1), since deleting
the mapped_device isn't performance-critical task.
And since at this point, nobody opens the mapped_device and no new
reference will be taken, the pending counts are just for racing
completing activity and will eventually decrease to zero.
For the unlikely case of the forced module unload, dm_destroy_immediate(),
which doesn't wait and forcibly deletes the mapped_device, is also
introduced and used in dm_hash_remove_all(). Otherwise, "rmmod -f"
may be stuck and never return.
And now, because the mapped_device is deleted at this point, subsequent
accesses to the mapped_device may cause NULL pointer references.
Cc: stable@kernel.org
Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com>
Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
2010-08-12 11:13:56 +08:00
|
|
|
BUG_ON(test_bit(DMF_FREEING, &md->flags));
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
dm snapshot: suspend merging snapshot when doing exception handover
The "dm snapshot: suspend origin when doing exception handover" commit
fixed a exception store handover bug associated with pending exceptions
to the "snapshot-origin" target.
However, a similar problem exists in snapshot merging. When snapshot
merging is in progress, we use the target "snapshot-merge" instead of
"snapshot-origin". Consequently, during exception store handover, we
must find the snapshot-merge target and suspend its associated
mapped_device.
To avoid lockdep warnings, the target must be suspended and resumed
without holding _origins_lock.
Introduce a dm_hold() function that grabs a reference on a
mapped_device, but unlike dm_get(), it doesn't crash if the device has
the DMF_FREEING flag set, it returns an error in this case.
In snapshot_resume() we grab the reference to the origin device using
dm_hold() while holding _origins_lock (_origins_lock guarantees that the
device won't disappear). Then we release _origins_lock, suspend the
device and grab _origins_lock again.
NOTE to stable@ people:
When backporting to kernels 3.18 and older, use dm_internal_suspend and
dm_internal_resume instead of dm_internal_suspend_fast and
dm_internal_resume_fast.
Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Cc: stable@vger.kernel.org
2015-02-27 00:41:28 +08:00
|
|
|
int dm_hold(struct mapped_device *md)
|
|
|
|
{
|
|
|
|
spin_lock(&_minor_lock);
|
|
|
|
if (test_bit(DMF_FREEING, &md->flags)) {
|
|
|
|
spin_unlock(&_minor_lock);
|
|
|
|
return -EBUSY;
|
|
|
|
}
|
|
|
|
dm_get(md);
|
|
|
|
spin_unlock(&_minor_lock);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(dm_hold);
|
|
|
|
|
2006-06-26 15:27:35 +08:00
|
|
|
const char *dm_device_name(struct mapped_device *md)
|
|
|
|
{
|
|
|
|
return md->name;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(dm_device_name);
|
|
|
|
|
dm: separate device deletion from dm_put
This patch separates the device deletion code from dm_put()
to make sure the deletion happens in the process context.
By this patch, device deletion always occurs in an ioctl (process)
context and dm_put() can be called in interrupt context.
As a result, the request-based dm's bad dm_put() usage pointed out
by Mikulas below disappears.
http://marc.info/?l=dm-devel&m=126699981019735&w=2
Without this patch, I confirmed there is a case to crash the system:
dm_put() => dm_table_destroy() => vfree() => BUG_ON(in_interrupt())
Some more backgrounds and details:
In request-based dm, a device opener can remove a mapped_device
while the last request is still completing, because bios in the last
request complete first and then the device opener can close and remove
the mapped_device before the last request completes:
CPU0 CPU1
=================================================================
<<INTERRUPT>>
blk_end_request_all(clone_rq)
blk_update_request(clone_rq)
bio_endio(clone_bio) == end_clone_bio
blk_update_request(orig_rq)
bio_endio(orig_bio)
<<I/O completed>>
dm_blk_close()
dev_remove()
dm_put(md)
<<Free md>>
blk_finish_request(clone_rq)
....
dm_end_request(clone_rq)
free_rq_clone(clone_rq)
blk_end_request_all(orig_rq)
rq_completed(md)
So request-based dm used dm_get()/dm_put() to hold md for each I/O
until its request completion handling is fully done.
However, the final dm_put() can call the device deletion code which
must not be run in interrupt context and may cause kernel panic.
To solve the problem, this patch moves the device deletion code,
dm_destroy(), to predetermined places that is actually deleting
the mapped_device in ioctl (process) context, and changes dm_put()
just to decrement the reference count of the mapped_device.
By this change, dm_put() can be used in any context and the symmetric
model below is introduced:
dm_create(): create a mapped_device
dm_destroy(): destroy a mapped_device
dm_get(): increment the reference count of a mapped_device
dm_put(): decrement the reference count of a mapped_device
dm_destroy() waits for all references of the mapped_device to disappear,
then deletes the mapped_device.
dm_destroy() uses active waiting with msleep(1), since deleting
the mapped_device isn't performance-critical task.
And since at this point, nobody opens the mapped_device and no new
reference will be taken, the pending counts are just for racing
completing activity and will eventually decrease to zero.
For the unlikely case of the forced module unload, dm_destroy_immediate(),
which doesn't wait and forcibly deletes the mapped_device, is also
introduced and used in dm_hash_remove_all(). Otherwise, "rmmod -f"
may be stuck and never return.
And now, because the mapped_device is deleted at this point, subsequent
accesses to the mapped_device may cause NULL pointer references.
Cc: stable@kernel.org
Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com>
Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
2010-08-12 11:13:56 +08:00
|
|
|
static void __dm_destroy(struct mapped_device *md, bool wait)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2006-03-27 17:17:54 +08:00
|
|
|
struct dm_table *map;
|
2013-07-11 06:41:18 +08:00
|
|
|
int srcu_idx;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
dm: separate device deletion from dm_put
This patch separates the device deletion code from dm_put()
to make sure the deletion happens in the process context.
By this patch, device deletion always occurs in an ioctl (process)
context and dm_put() can be called in interrupt context.
As a result, the request-based dm's bad dm_put() usage pointed out
by Mikulas below disappears.
http://marc.info/?l=dm-devel&m=126699981019735&w=2
Without this patch, I confirmed there is a case to crash the system:
dm_put() => dm_table_destroy() => vfree() => BUG_ON(in_interrupt())
Some more backgrounds and details:
In request-based dm, a device opener can remove a mapped_device
while the last request is still completing, because bios in the last
request complete first and then the device opener can close and remove
the mapped_device before the last request completes:
CPU0 CPU1
=================================================================
<<INTERRUPT>>
blk_end_request_all(clone_rq)
blk_update_request(clone_rq)
bio_endio(clone_bio) == end_clone_bio
blk_update_request(orig_rq)
bio_endio(orig_bio)
<<I/O completed>>
dm_blk_close()
dev_remove()
dm_put(md)
<<Free md>>
blk_finish_request(clone_rq)
....
dm_end_request(clone_rq)
free_rq_clone(clone_rq)
blk_end_request_all(orig_rq)
rq_completed(md)
So request-based dm used dm_get()/dm_put() to hold md for each I/O
until its request completion handling is fully done.
However, the final dm_put() can call the device deletion code which
must not be run in interrupt context and may cause kernel panic.
To solve the problem, this patch moves the device deletion code,
dm_destroy(), to predetermined places that is actually deleting
the mapped_device in ioctl (process) context, and changes dm_put()
just to decrement the reference count of the mapped_device.
By this change, dm_put() can be used in any context and the symmetric
model below is introduced:
dm_create(): create a mapped_device
dm_destroy(): destroy a mapped_device
dm_get(): increment the reference count of a mapped_device
dm_put(): decrement the reference count of a mapped_device
dm_destroy() waits for all references of the mapped_device to disappear,
then deletes the mapped_device.
dm_destroy() uses active waiting with msleep(1), since deleting
the mapped_device isn't performance-critical task.
And since at this point, nobody opens the mapped_device and no new
reference will be taken, the pending counts are just for racing
completing activity and will eventually decrease to zero.
For the unlikely case of the forced module unload, dm_destroy_immediate(),
which doesn't wait and forcibly deletes the mapped_device, is also
introduced and used in dm_hash_remove_all(). Otherwise, "rmmod -f"
may be stuck and never return.
And now, because the mapped_device is deleted at this point, subsequent
accesses to the mapped_device may cause NULL pointer references.
Cc: stable@kernel.org
Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com>
Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
2010-08-12 11:13:56 +08:00
|
|
|
might_sleep();
|
2006-06-26 15:27:23 +08:00
|
|
|
|
2015-03-24 05:01:43 +08:00
|
|
|
spin_lock(&_minor_lock);
|
dm: separate device deletion from dm_put
This patch separates the device deletion code from dm_put()
to make sure the deletion happens in the process context.
By this patch, device deletion always occurs in an ioctl (process)
context and dm_put() can be called in interrupt context.
As a result, the request-based dm's bad dm_put() usage pointed out
by Mikulas below disappears.
http://marc.info/?l=dm-devel&m=126699981019735&w=2
Without this patch, I confirmed there is a case to crash the system:
dm_put() => dm_table_destroy() => vfree() => BUG_ON(in_interrupt())
Some more backgrounds and details:
In request-based dm, a device opener can remove a mapped_device
while the last request is still completing, because bios in the last
request complete first and then the device opener can close and remove
the mapped_device before the last request completes:
CPU0 CPU1
=================================================================
<<INTERRUPT>>
blk_end_request_all(clone_rq)
blk_update_request(clone_rq)
bio_endio(clone_bio) == end_clone_bio
blk_update_request(orig_rq)
bio_endio(orig_bio)
<<I/O completed>>
dm_blk_close()
dev_remove()
dm_put(md)
<<Free md>>
blk_finish_request(clone_rq)
....
dm_end_request(clone_rq)
free_rq_clone(clone_rq)
blk_end_request_all(orig_rq)
rq_completed(md)
So request-based dm used dm_get()/dm_put() to hold md for each I/O
until its request completion handling is fully done.
However, the final dm_put() can call the device deletion code which
must not be run in interrupt context and may cause kernel panic.
To solve the problem, this patch moves the device deletion code,
dm_destroy(), to predetermined places that is actually deleting
the mapped_device in ioctl (process) context, and changes dm_put()
just to decrement the reference count of the mapped_device.
By this change, dm_put() can be used in any context and the symmetric
model below is introduced:
dm_create(): create a mapped_device
dm_destroy(): destroy a mapped_device
dm_get(): increment the reference count of a mapped_device
dm_put(): decrement the reference count of a mapped_device
dm_destroy() waits for all references of the mapped_device to disappear,
then deletes the mapped_device.
dm_destroy() uses active waiting with msleep(1), since deleting
the mapped_device isn't performance-critical task.
And since at this point, nobody opens the mapped_device and no new
reference will be taken, the pending counts are just for racing
completing activity and will eventually decrease to zero.
For the unlikely case of the forced module unload, dm_destroy_immediate(),
which doesn't wait and forcibly deletes the mapped_device, is also
introduced and used in dm_hash_remove_all(). Otherwise, "rmmod -f"
may be stuck and never return.
And now, because the mapped_device is deleted at this point, subsequent
accesses to the mapped_device may cause NULL pointer references.
Cc: stable@kernel.org
Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com>
Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
2010-08-12 11:13:56 +08:00
|
|
|
idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md))));
|
|
|
|
set_bit(DMF_FREEING, &md->flags);
|
|
|
|
spin_unlock(&_minor_lock);
|
2016-09-01 06:17:49 +08:00
|
|
|
|
2018-01-12 22:32:21 +08:00
|
|
|
blk_set_queue_dying(md->queue);
|
dm: separate device deletion from dm_put
This patch separates the device deletion code from dm_put()
to make sure the deletion happens in the process context.
By this patch, device deletion always occurs in an ioctl (process)
context and dm_put() can be called in interrupt context.
As a result, the request-based dm's bad dm_put() usage pointed out
by Mikulas below disappears.
http://marc.info/?l=dm-devel&m=126699981019735&w=2
Without this patch, I confirmed there is a case to crash the system:
dm_put() => dm_table_destroy() => vfree() => BUG_ON(in_interrupt())
Some more backgrounds and details:
In request-based dm, a device opener can remove a mapped_device
while the last request is still completing, because bios in the last
request complete first and then the device opener can close and remove
the mapped_device before the last request completes:
CPU0 CPU1
=================================================================
<<INTERRUPT>>
blk_end_request_all(clone_rq)
blk_update_request(clone_rq)
bio_endio(clone_bio) == end_clone_bio
blk_update_request(orig_rq)
bio_endio(orig_bio)
<<I/O completed>>
dm_blk_close()
dev_remove()
dm_put(md)
<<Free md>>
blk_finish_request(clone_rq)
....
dm_end_request(clone_rq)
free_rq_clone(clone_rq)
blk_end_request_all(orig_rq)
rq_completed(md)
So request-based dm used dm_get()/dm_put() to hold md for each I/O
until its request completion handling is fully done.
However, the final dm_put() can call the device deletion code which
must not be run in interrupt context and may cause kernel panic.
To solve the problem, this patch moves the device deletion code,
dm_destroy(), to predetermined places that is actually deleting
the mapped_device in ioctl (process) context, and changes dm_put()
just to decrement the reference count of the mapped_device.
By this change, dm_put() can be used in any context and the symmetric
model below is introduced:
dm_create(): create a mapped_device
dm_destroy(): destroy a mapped_device
dm_get(): increment the reference count of a mapped_device
dm_put(): decrement the reference count of a mapped_device
dm_destroy() waits for all references of the mapped_device to disappear,
then deletes the mapped_device.
dm_destroy() uses active waiting with msleep(1), since deleting
the mapped_device isn't performance-critical task.
And since at this point, nobody opens the mapped_device and no new
reference will be taken, the pending counts are just for racing
completing activity and will eventually decrease to zero.
For the unlikely case of the forced module unload, dm_destroy_immediate(),
which doesn't wait and forcibly deletes the mapped_device, is also
introduced and used in dm_hash_remove_all(). Otherwise, "rmmod -f"
may be stuck and never return.
And now, because the mapped_device is deleted at this point, subsequent
accesses to the mapped_device may cause NULL pointer references.
Cc: stable@kernel.org
Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com>
Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
2010-08-12 11:13:56 +08:00
|
|
|
|
2015-02-28 03:04:27 +08:00
|
|
|
/*
|
|
|
|
* Take suspend_lock so that presuspend and postsuspend methods
|
|
|
|
* do not race with internal suspend.
|
|
|
|
*/
|
|
|
|
mutex_lock(&md->suspend_lock);
|
2015-10-01 16:31:51 +08:00
|
|
|
map = dm_get_live_table(md, &srcu_idx);
|
dm: separate device deletion from dm_put
This patch separates the device deletion code from dm_put()
to make sure the deletion happens in the process context.
By this patch, device deletion always occurs in an ioctl (process)
context and dm_put() can be called in interrupt context.
As a result, the request-based dm's bad dm_put() usage pointed out
by Mikulas below disappears.
http://marc.info/?l=dm-devel&m=126699981019735&w=2
Without this patch, I confirmed there is a case to crash the system:
dm_put() => dm_table_destroy() => vfree() => BUG_ON(in_interrupt())
Some more backgrounds and details:
In request-based dm, a device opener can remove a mapped_device
while the last request is still completing, because bios in the last
request complete first and then the device opener can close and remove
the mapped_device before the last request completes:
CPU0 CPU1
=================================================================
<<INTERRUPT>>
blk_end_request_all(clone_rq)
blk_update_request(clone_rq)
bio_endio(clone_bio) == end_clone_bio
blk_update_request(orig_rq)
bio_endio(orig_bio)
<<I/O completed>>
dm_blk_close()
dev_remove()
dm_put(md)
<<Free md>>
blk_finish_request(clone_rq)
....
dm_end_request(clone_rq)
free_rq_clone(clone_rq)
blk_end_request_all(orig_rq)
rq_completed(md)
So request-based dm used dm_get()/dm_put() to hold md for each I/O
until its request completion handling is fully done.
However, the final dm_put() can call the device deletion code which
must not be run in interrupt context and may cause kernel panic.
To solve the problem, this patch moves the device deletion code,
dm_destroy(), to predetermined places that is actually deleting
the mapped_device in ioctl (process) context, and changes dm_put()
just to decrement the reference count of the mapped_device.
By this change, dm_put() can be used in any context and the symmetric
model below is introduced:
dm_create(): create a mapped_device
dm_destroy(): destroy a mapped_device
dm_get(): increment the reference count of a mapped_device
dm_put(): decrement the reference count of a mapped_device
dm_destroy() waits for all references of the mapped_device to disappear,
then deletes the mapped_device.
dm_destroy() uses active waiting with msleep(1), since deleting
the mapped_device isn't performance-critical task.
And since at this point, nobody opens the mapped_device and no new
reference will be taken, the pending counts are just for racing
completing activity and will eventually decrease to zero.
For the unlikely case of the forced module unload, dm_destroy_immediate(),
which doesn't wait and forcibly deletes the mapped_device, is also
introduced and used in dm_hash_remove_all(). Otherwise, "rmmod -f"
may be stuck and never return.
And now, because the mapped_device is deleted at this point, subsequent
accesses to the mapped_device may cause NULL pointer references.
Cc: stable@kernel.org
Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com>
Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
2010-08-12 11:13:56 +08:00
|
|
|
if (!dm_suspended_md(md)) {
|
|
|
|
dm_table_presuspend_targets(map);
|
2024-06-11 20:08:33 +08:00
|
|
|
set_bit(DMF_SUSPENDED, &md->flags);
|
2024-06-11 20:26:44 +08:00
|
|
|
set_bit(DMF_POST_SUSPENDING, &md->flags);
|
dm: separate device deletion from dm_put
This patch separates the device deletion code from dm_put()
to make sure the deletion happens in the process context.
By this patch, device deletion always occurs in an ioctl (process)
context and dm_put() can be called in interrupt context.
As a result, the request-based dm's bad dm_put() usage pointed out
by Mikulas below disappears.
http://marc.info/?l=dm-devel&m=126699981019735&w=2
Without this patch, I confirmed there is a case to crash the system:
dm_put() => dm_table_destroy() => vfree() => BUG_ON(in_interrupt())
Some more backgrounds and details:
In request-based dm, a device opener can remove a mapped_device
while the last request is still completing, because bios in the last
request complete first and then the device opener can close and remove
the mapped_device before the last request completes:
CPU0 CPU1
=================================================================
<<INTERRUPT>>
blk_end_request_all(clone_rq)
blk_update_request(clone_rq)
bio_endio(clone_bio) == end_clone_bio
blk_update_request(orig_rq)
bio_endio(orig_bio)
<<I/O completed>>
dm_blk_close()
dev_remove()
dm_put(md)
<<Free md>>
blk_finish_request(clone_rq)
....
dm_end_request(clone_rq)
free_rq_clone(clone_rq)
blk_end_request_all(orig_rq)
rq_completed(md)
So request-based dm used dm_get()/dm_put() to hold md for each I/O
until its request completion handling is fully done.
However, the final dm_put() can call the device deletion code which
must not be run in interrupt context and may cause kernel panic.
To solve the problem, this patch moves the device deletion code,
dm_destroy(), to predetermined places that is actually deleting
the mapped_device in ioctl (process) context, and changes dm_put()
just to decrement the reference count of the mapped_device.
By this change, dm_put() can be used in any context and the symmetric
model below is introduced:
dm_create(): create a mapped_device
dm_destroy(): destroy a mapped_device
dm_get(): increment the reference count of a mapped_device
dm_put(): decrement the reference count of a mapped_device
dm_destroy() waits for all references of the mapped_device to disappear,
then deletes the mapped_device.
dm_destroy() uses active waiting with msleep(1), since deleting
the mapped_device isn't performance-critical task.
And since at this point, nobody opens the mapped_device and no new
reference will be taken, the pending counts are just for racing
completing activity and will eventually decrease to zero.
For the unlikely case of the forced module unload, dm_destroy_immediate(),
which doesn't wait and forcibly deletes the mapped_device, is also
introduced and used in dm_hash_remove_all(). Otherwise, "rmmod -f"
may be stuck and never return.
And now, because the mapped_device is deleted at this point, subsequent
accesses to the mapped_device may cause NULL pointer references.
Cc: stable@kernel.org
Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com>
Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
2010-08-12 11:13:56 +08:00
|
|
|
dm_table_postsuspend_targets(map);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2013-07-11 06:41:18 +08:00
|
|
|
/* dm_put_live_table must be before msleep, otherwise deadlock is possible */
|
|
|
|
dm_put_live_table(md, srcu_idx);
|
2015-10-01 16:31:51 +08:00
|
|
|
mutex_unlock(&md->suspend_lock);
|
2013-07-11 06:41:18 +08:00
|
|
|
|
dm: separate device deletion from dm_put
This patch separates the device deletion code from dm_put()
to make sure the deletion happens in the process context.
By this patch, device deletion always occurs in an ioctl (process)
context and dm_put() can be called in interrupt context.
As a result, the request-based dm's bad dm_put() usage pointed out
by Mikulas below disappears.
http://marc.info/?l=dm-devel&m=126699981019735&w=2
Without this patch, I confirmed there is a case to crash the system:
dm_put() => dm_table_destroy() => vfree() => BUG_ON(in_interrupt())
Some more backgrounds and details:
In request-based dm, a device opener can remove a mapped_device
while the last request is still completing, because bios in the last
request complete first and then the device opener can close and remove
the mapped_device before the last request completes:
CPU0 CPU1
=================================================================
<<INTERRUPT>>
blk_end_request_all(clone_rq)
blk_update_request(clone_rq)
bio_endio(clone_bio) == end_clone_bio
blk_update_request(orig_rq)
bio_endio(orig_bio)
<<I/O completed>>
dm_blk_close()
dev_remove()
dm_put(md)
<<Free md>>
blk_finish_request(clone_rq)
....
dm_end_request(clone_rq)
free_rq_clone(clone_rq)
blk_end_request_all(orig_rq)
rq_completed(md)
So request-based dm used dm_get()/dm_put() to hold md for each I/O
until its request completion handling is fully done.
However, the final dm_put() can call the device deletion code which
must not be run in interrupt context and may cause kernel panic.
To solve the problem, this patch moves the device deletion code,
dm_destroy(), to predetermined places that is actually deleting
the mapped_device in ioctl (process) context, and changes dm_put()
just to decrement the reference count of the mapped_device.
By this change, dm_put() can be used in any context and the symmetric
model below is introduced:
dm_create(): create a mapped_device
dm_destroy(): destroy a mapped_device
dm_get(): increment the reference count of a mapped_device
dm_put(): decrement the reference count of a mapped_device
dm_destroy() waits for all references of the mapped_device to disappear,
then deletes the mapped_device.
dm_destroy() uses active waiting with msleep(1), since deleting
the mapped_device isn't performance-critical task.
And since at this point, nobody opens the mapped_device and no new
reference will be taken, the pending counts are just for racing
completing activity and will eventually decrease to zero.
For the unlikely case of the forced module unload, dm_destroy_immediate(),
which doesn't wait and forcibly deletes the mapped_device, is also
introduced and used in dm_hash_remove_all(). Otherwise, "rmmod -f"
may be stuck and never return.
And now, because the mapped_device is deleted at this point, subsequent
accesses to the mapped_device may cause NULL pointer references.
Cc: stable@kernel.org
Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com>
Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
2010-08-12 11:13:56 +08:00
|
|
|
/*
|
|
|
|
* Rare, but there may be I/O requests still going to complete,
|
|
|
|
* for example. Wait for all references to disappear.
|
|
|
|
* No one should increment the reference count of the mapped_device,
|
|
|
|
* after the mapped_device state becomes DMF_FREEING.
|
|
|
|
*/
|
|
|
|
if (wait)
|
|
|
|
while (atomic_read(&md->holders))
|
|
|
|
msleep(1);
|
|
|
|
else if (atomic_read(&md->holders))
|
|
|
|
DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)",
|
|
|
|
dm_device_name(md), atomic_read(&md->holders));
|
|
|
|
|
|
|
|
dm_sysfs_exit(md);
|
|
|
|
dm_table_destroy(__unbind(md));
|
|
|
|
free_dev(md);
|
|
|
|
}
|
|
|
|
|
|
|
|
void dm_destroy(struct mapped_device *md)
|
|
|
|
{
|
|
|
|
__dm_destroy(md, true);
|
|
|
|
}
|
|
|
|
|
|
|
|
void dm_destroy_immediate(struct mapped_device *md)
|
|
|
|
{
|
|
|
|
__dm_destroy(md, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
void dm_put(struct mapped_device *md)
|
|
|
|
{
|
|
|
|
atomic_dec(&md->holders);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2007-05-09 17:32:56 +08:00
|
|
|
EXPORT_SYMBOL_GPL(dm_put);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2016-09-01 06:16:02 +08:00
|
|
|
static int dm_wait_for_completion(struct mapped_device *md, long task_state)
|
2008-02-08 10:10:30 +08:00
|
|
|
{
|
|
|
|
int r = 0;
|
2016-09-01 06:16:43 +08:00
|
|
|
DEFINE_WAIT(wait);
|
2008-02-08 10:10:30 +08:00
|
|
|
|
|
|
|
while (1) {
|
2016-09-01 06:16:43 +08:00
|
|
|
prepare_to_wait(&md->wait, &wait, task_state);
|
2008-02-08 10:10:30 +08:00
|
|
|
|
2009-12-11 07:52:16 +08:00
|
|
|
if (!md_in_flight(md))
|
2008-02-08 10:10:30 +08:00
|
|
|
break;
|
|
|
|
|
2016-09-01 06:16:22 +08:00
|
|
|
if (signal_pending_state(task_state, current)) {
|
2008-02-08 10:10:30 +08:00
|
|
|
r = -EINTR;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
io_schedule();
|
|
|
|
}
|
2016-09-01 06:16:43 +08:00
|
|
|
finish_wait(&md->wait, &wait);
|
2009-04-03 02:55:39 +08:00
|
|
|
|
2024-06-12 13:13:20 +08:00
|
|
|
smp_rmb();
|
|
|
|
|
2008-02-08 10:10:30 +08:00
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* Process the deferred bios
|
|
|
|
*/
|
2009-04-03 02:55:38 +08:00
|
|
|
static void dm_wq_work(struct work_struct *work)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2009-04-03 02:55:38 +08:00
|
|
|
struct mapped_device *md = container_of(work, struct mapped_device,
|
|
|
|
work);
|
2008-02-08 10:10:22 +08:00
|
|
|
struct bio *c;
|
2013-07-11 06:41:18 +08:00
|
|
|
int srcu_idx;
|
|
|
|
struct dm_table *map;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2013-07-11 06:41:18 +08:00
|
|
|
map = dm_get_live_table(md, &srcu_idx);
|
2009-04-03 02:55:38 +08:00
|
|
|
|
2009-04-09 07:27:15 +08:00
|
|
|
while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
|
2009-04-09 07:27:13 +08:00
|
|
|
spin_lock_irq(&md->deferred_lock);
|
|
|
|
c = bio_list_pop(&md->deferred);
|
|
|
|
spin_unlock_irq(&md->deferred_lock);
|
|
|
|
|
2010-09-09 00:07:00 +08:00
|
|
|
if (!c)
|
2009-04-09 07:27:13 +08:00
|
|
|
break;
|
2009-04-03 02:55:39 +08:00
|
|
|
|
dm: enable request based option
This patch enables request-based dm.
o Request-based dm and bio-based dm coexist, since there are
some target drivers which are more fitting to bio-based dm.
Also, there are other bio-based devices in the kernel
(e.g. md, loop).
Since bio-based device can't receive struct request,
there are some limitations on device stacking between
bio-based and request-based.
type of underlying device
bio-based request-based
----------------------------------------------
bio-based OK OK
request-based -- OK
The device type is recognized by the queue flag in the kernel,
so dm follows that.
o The type of a dm device is decided at the first table binding time.
Once the type of a dm device is decided, the type can't be changed.
o Mempool allocations are deferred to at the table loading time, since
mempools for request-based dm are different from those for bio-based
dm and needed mempool type is fixed by the type of table.
o Currently, request-based dm supports only tables that have a single
target. To support multiple targets, we need to support request
splitting or prevent bio/request from spanning multiple targets.
The former needs lots of changes in the block layer, and the latter
needs that all target drivers support merge() function.
Both will take a time.
Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com>
Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
2009-06-22 17:12:36 +08:00
|
|
|
if (dm_request_based(md))
|
2019-01-18 03:33:01 +08:00
|
|
|
(void) generic_make_request(c);
|
2010-09-09 00:07:00 +08:00
|
|
|
else
|
2019-01-18 03:33:01 +08:00
|
|
|
(void) dm_process_bio(md, map, c);
|
2009-04-03 02:55:39 +08:00
|
|
|
}
|
2008-02-08 10:10:25 +08:00
|
|
|
|
2013-07-11 06:41:18 +08:00
|
|
|
dm_put_live_table(md, srcu_idx);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2009-04-03 02:55:36 +08:00
|
|
|
static void dm_queue_flush(struct mapped_device *md)
|
2008-02-08 10:11:17 +08:00
|
|
|
{
|
2009-04-09 07:27:15 +08:00
|
|
|
clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
|
2014-03-18 01:06:10 +08:00
|
|
|
smp_mb__after_atomic();
|
2009-04-03 02:55:37 +08:00
|
|
|
queue_work(md->wq, &md->work);
|
2008-02-08 10:11:17 +08:00
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
2009-12-11 07:52:24 +08:00
|
|
|
* Swap in a new table, returning the old one for the caller to destroy.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2009-12-11 07:52:24 +08:00
|
|
|
struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2013-03-02 06:45:48 +08:00
|
|
|
struct dm_table *live_map = NULL, *map = ERR_PTR(-EINVAL);
|
2009-06-22 17:12:34 +08:00
|
|
|
struct queue_limits limits;
|
2009-12-11 07:52:24 +08:00
|
|
|
int r;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2008-02-08 10:10:08 +08:00
|
|
|
mutex_lock(&md->suspend_lock);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/* device must be suspended */
|
2009-12-11 07:52:26 +08:00
|
|
|
if (!dm_suspended_md(md))
|
2005-07-13 06:53:05 +08:00
|
|
|
goto out;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2012-09-27 06:45:45 +08:00
|
|
|
/*
|
|
|
|
* If the new table has no data devices, retain the existing limits.
|
|
|
|
* This helps multipath with queue_if_no_path if all paths disappear,
|
|
|
|
* then new I/O is queued based on these limits, and then some paths
|
|
|
|
* reappear.
|
|
|
|
*/
|
|
|
|
if (dm_table_has_no_data_devices(table)) {
|
2013-07-11 06:41:18 +08:00
|
|
|
live_map = dm_get_live_table_fast(md);
|
2012-09-27 06:45:45 +08:00
|
|
|
if (live_map)
|
|
|
|
limits = md->queue->limits;
|
2013-07-11 06:41:18 +08:00
|
|
|
dm_put_live_table_fast(md);
|
2012-09-27 06:45:45 +08:00
|
|
|
}
|
|
|
|
|
2013-03-02 06:45:48 +08:00
|
|
|
if (!live_map) {
|
|
|
|
r = dm_calculate_queue_limits(table, &limits);
|
|
|
|
if (r) {
|
|
|
|
map = ERR_PTR(r);
|
|
|
|
goto out;
|
|
|
|
}
|
2009-12-11 07:52:24 +08:00
|
|
|
}
|
2009-06-22 17:12:34 +08:00
|
|
|
|
2009-12-11 07:52:24 +08:00
|
|
|
map = __bind(md, table, &limits);
|
2017-09-20 19:29:49 +08:00
|
|
|
dm_issue_global_event();
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2005-07-13 06:53:05 +08:00
|
|
|
out:
|
2008-02-08 10:10:08 +08:00
|
|
|
mutex_unlock(&md->suspend_lock);
|
2009-12-11 07:52:24 +08:00
|
|
|
return map;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Functions to lock and unlock any filesystem running on the
|
|
|
|
* device.
|
|
|
|
*/
|
2005-07-29 12:16:00 +08:00
|
|
|
static int lock_fs(struct mapped_device *md)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2006-01-06 16:20:05 +08:00
|
|
|
int r;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
WARN_ON(md->frozen_sb);
|
2005-05-06 07:16:04 +08:00
|
|
|
|
2009-06-22 17:12:15 +08:00
|
|
|
md->frozen_sb = freeze_bdev(md->bdev);
|
2005-05-06 07:16:04 +08:00
|
|
|
if (IS_ERR(md->frozen_sb)) {
|
2005-07-29 12:15:57 +08:00
|
|
|
r = PTR_ERR(md->frozen_sb);
|
2006-01-06 16:20:05 +08:00
|
|
|
md->frozen_sb = NULL;
|
|
|
|
return r;
|
2005-05-06 07:16:04 +08:00
|
|
|
}
|
|
|
|
|
2006-01-06 16:20:06 +08:00
|
|
|
set_bit(DMF_FROZEN, &md->flags);
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2005-07-29 12:16:00 +08:00
|
|
|
static void unlock_fs(struct mapped_device *md)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2006-01-06 16:20:06 +08:00
|
|
|
if (!test_bit(DMF_FROZEN, &md->flags))
|
|
|
|
return;
|
|
|
|
|
2009-06-22 17:12:15 +08:00
|
|
|
thaw_bdev(md->bdev, md->frozen_sb);
|
2005-04-17 06:20:36 +08:00
|
|
|
md->frozen_sb = NULL;
|
2006-01-06 16:20:06 +08:00
|
|
|
clear_bit(DMF_FROZEN, &md->flags);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2016-09-01 06:16:02 +08:00
|
|
|
* @suspend_flags: DM_SUSPEND_LOCKFS_FLAG and/or DM_SUSPEND_NOFLUSH_FLAG
|
|
|
|
* @task_state: e.g. TASK_INTERRUPTIBLE or TASK_UNINTERRUPTIBLE
|
|
|
|
* @dmf_suspended_flag: DMF_SUSPENDED or DMF_SUSPENDED_INTERNALLY
|
|
|
|
*
|
2014-10-29 06:34:52 +08:00
|
|
|
* If __dm_suspend returns 0, the device is completely quiescent
|
|
|
|
* now. There is no request-processing activity. All new requests
|
|
|
|
* are being added to md->deferred list.
|
dm: prepare for request based option
This patch adds core functions for request-based dm.
When struct mapped device (md) is initialized, md->queue has
an I/O scheduler and the following functions are used for
request-based dm as the queue functions:
make_request_fn: dm_make_request()
pref_fn: dm_prep_fn()
request_fn: dm_request_fn()
softirq_done_fn: dm_softirq_done()
lld_busy_fn: dm_lld_busy()
Actual initializations are done in another patch (PATCH 2).
Below is a brief summary of how request-based dm behaves, including:
- making request from bio
- cloning, mapping and dispatching request
- completing request and bio
- suspending md
- resuming md
bio to request
==============
md->queue->make_request_fn() (dm_make_request()) calls __make_request()
for a bio submitted to the md.
Then, the bio is kept in the queue as a new request or merged into
another request in the queue if possible.
Cloning and Mapping
===================
Cloning and mapping are done in md->queue->request_fn() (dm_request_fn()),
when requests are dispatched after they are sorted by the I/O scheduler.
dm_request_fn() checks busy state of underlying devices using
target's busy() function and stops dispatching requests to keep them
on the dm device's queue if busy.
It helps better I/O merging, since no merge is done for a request
once it is dispatched to underlying devices.
Actual cloning and mapping are done in dm_prep_fn() and map_request()
called from dm_request_fn().
dm_prep_fn() clones not only request but also bios of the request
so that dm can hold bio completion in error cases and prevent
the bio submitter from noticing the error.
(See the "Completion" section below for details.)
After the cloning, the clone is mapped by target's map_rq() function
and inserted to underlying device's queue using
blk_insert_cloned_request().
Completion
==========
Request completion can be hooked by rq->end_io(), but then, all bios
in the request will have been completed even error cases, and the bio
submitter will have noticed the error.
To prevent the bio completion in error cases, request-based dm clones
both bio and request and hooks both bio->bi_end_io() and rq->end_io():
bio->bi_end_io(): end_clone_bio()
rq->end_io(): end_clone_request()
Summary of the request completion flow is below:
blk_end_request() for a clone request
=> blk_update_request()
=> bio->bi_end_io() == end_clone_bio() for each clone bio
=> Free the clone bio
=> Success: Complete the original bio (blk_update_request())
Error: Don't complete the original bio
=> blk_finish_request()
=> rq->end_io() == end_clone_request()
=> blk_complete_request()
=> dm_softirq_done()
=> Free the clone request
=> Success: Complete the original request (blk_end_request())
Error: Requeue the original request
end_clone_bio() completes the original request on the size of
the original bio in successful cases.
Even if all bios in the original request are completed by that
completion, the original request must not be completed yet to keep
the ordering of request completion for the stacking.
So end_clone_bio() uses blk_update_request() instead of
blk_end_request().
In error cases, end_clone_bio() doesn't complete the original bio.
It just frees the cloned bio and gives over the error handling to
end_clone_request().
end_clone_request(), which is called with queue lock held, completes
the clone request and the original request in a softirq context
(dm_softirq_done()), which has no queue lock, to avoid a deadlock
issue on submission of another request during the completion:
- The submitted request may be mapped to the same device
- Request submission requires queue lock, but the queue lock
has been held by itself and it doesn't know that
The clone request has no clone bio when dm_softirq_done() is called.
So target drivers can't resubmit it again even error cases.
Instead, they can ask dm core for requeueing and remapping
the original request in that cases.
suspend
=======
Request-based dm uses stopping md->queue as suspend of the md.
For noflush suspend, just stops md->queue.
For flush suspend, inserts a marker request to the tail of md->queue.
And dispatches all requests in md->queue until the marker comes to
the front of md->queue. Then, stops dispatching request and waits
for the all dispatched requests to complete.
After that, completes the marker request, stops md->queue and
wake up the waiter on the suspend queue, md->wait.
resume
======
Starts md->queue.
Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com>
Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
2009-06-22 17:12:35 +08:00
|
|
|
*/
|
2014-10-29 06:34:52 +08:00
|
|
|
static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
|
2016-09-01 06:16:02 +08:00
|
|
|
unsigned suspend_flags, long task_state,
|
2016-08-03 01:07:20 +08:00
|
|
|
int dmf_suspended_flag)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2014-10-29 06:34:52 +08:00
|
|
|
bool do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG;
|
|
|
|
bool noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG;
|
|
|
|
int r;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2016-09-01 06:17:04 +08:00
|
|
|
lockdep_assert_held(&md->suspend_lock);
|
|
|
|
|
[PATCH] dm: suspend: add noflush pushback
In device-mapper I/O is sometimes queued within targets for later processing.
For example the multipath target can be configured to store I/O when no paths
are available instead of returning it -EIO.
This patch allows the device-mapper core to instruct a target to transfer the
contents of any such in-target queue back into the core. This frees up the
resources used by the target so the core can replace that target with an
alternative one and then resend the I/O to it. Without this patch the only
way to change the target in such circumstances involves returning the I/O with
an error back to the filesystem/application. In the multipath case, this
patch will let us add new paths for existing I/O to try after all the existing
paths have failed.
DMF_NOFLUSH_SUSPENDING
----------------------
If the DM_NOFLUSH_FLAG ioctl option is specified at suspend time, the
DMF_NOFLUSH_SUSPENDING flag is set in md->flags during dm_suspend(). It
is always cleared before dm_suspend() returns.
The flag must be visible while the target is flushing pending I/Os so it
is set before presuspend where the flush starts and unset after the wait
for md->pending where the flush ends.
Target drivers can check this flag by calling dm_noflush_suspending().
DM_MAPIO_REQUEUE / DM_ENDIO_REQUEUE
-----------------------------------
A target's map() function can now return DM_MAPIO_REQUEUE to request the
device mapper core queue the bio.
Similarly, a target's end_io() function can return DM_ENDIO_REQUEUE to request
the same. This has been labelled 'pushback'.
The __map_bio() and clone_endio() functions in the core treat these return
values as errors and call dec_pending() to end the I/O.
dec_pending
-----------
dec_pending() saves the pushback request in struct dm_io->error. Once all
the split clones have ended, dec_pending() will put the original bio on
the md->pushback list. Note that this supercedes any I/O errors.
It is possible for the suspend with DM_NOFLUSH_FLAG to be aborted while
in progress (e.g. by user interrupt). dec_pending() checks for this and
returns -EIO if it happened.
pushdback list and pushback_lock
--------------------------------
The bio is queued on md->pushback temporarily in dec_pending(), and after
all pending I/Os return, md->pushback is merged into md->deferred in
dm_suspend() for re-issuing at resume time.
md->pushback_lock protects md->pushback.
The lock should be held with irq disabled because dec_pending() can be
called from interrupt context.
Queueing bios to md->pushback in dec_pending() must be done atomically
with the check for DMF_NOFLUSH_SUSPENDING flag. So md->pushback_lock is
held when checking the flag. Otherwise dec_pending() may queue a bio to
md->pushback after the interrupted dm_suspend() flushes md->pushback.
Then the bio would be left in md->pushback.
Flag setting in dm_suspend() can be done without md->pushback_lock because
the flag is checked only after presuspend and the set value is already
made visible via the target's presuspend function.
The flag can be checked without md->pushback_lock (e.g. the first part of
the dec_pending() or target drivers), because the flag is checked again
with md->pushback_lock held when the bio is really queued to md->pushback
as described above. So even if the flag is cleared after the lockless
checkings, the bio isn't left in md->pushback but returned to applications
with -EIO.
Other notes on the current patch
--------------------------------
- md->pushback is added to the struct mapped_device instead of using
md->deferred directly because md->io_lock which protects md->deferred is
rw_semaphore and can't be used in interrupt context like dec_pending(),
and md->io_lock protects the DMF_BLOCK_IO flag of md->flags too.
- Don't issue lock_fs() in dm_suspend() if the DM_NOFLUSH_FLAG
ioctl option is specified, because I/Os generated by lock_fs() would be
pushed back and never return if there were no valid devices.
- If an error occurs in dm_suspend() after the DMF_NOFLUSH_SUSPENDING
flag is set, md->pushback must be flushed because I/Os may be queued to
the list already. (flush_and_out label in dm_suspend())
Test results
------------
I have tested using multipath target with the next patch.
The following tests are for regression/compatibility:
- I/Os succeed when valid paths exist;
- I/Os fail when there are no valid paths and queue_if_no_path is not
set;
- I/Os are queued in the multipath target when there are no valid paths and
queue_if_no_path is set;
- The queued I/Os above fail when suspend is issued without the
DM_NOFLUSH_FLAG ioctl option. I/Os spanning 2 multipath targets also
fail.
The following tests are for the normal code path of new pushback feature:
- Queued I/Os in the multipath target are flushed from the target
but don't return when suspend is issued with the DM_NOFLUSH_FLAG
ioctl option;
- The I/Os above are queued in the multipath target again when
resume is issued without path recovery;
- The I/Os above succeed when resume is issued after path recovery
or table load;
- Queued I/Os in the multipath target succeed when resume is issued
with the DM_NOFLUSH_FLAG ioctl option after table load. I/Os
spanning 2 multipath targets also succeed.
The following tests are for the error paths of the new pushback feature:
- When the bdget_disk() fails in dm_suspend(), the
DMF_NOFLUSH_SUSPENDING flag is cleared and I/Os already queued to the
pushback list are flushed properly.
- When suspend with the DM_NOFLUSH_FLAG ioctl option is interrupted,
o I/Os which had already been queued to the pushback list
at the time don't return, and are re-issued at resume time;
o I/Os which hadn't been returned at the time return with EIO.
Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com>
Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
Cc: dm-devel@redhat.com
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-12-08 18:41:09 +08:00
|
|
|
/*
|
|
|
|
* DMF_NOFLUSH_SUSPENDING must be set before presuspend.
|
|
|
|
* This flag is cleared before dm_suspend returns.
|
|
|
|
*/
|
|
|
|
if (noflush)
|
|
|
|
set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
|
2017-04-28 01:11:26 +08:00
|
|
|
else
|
|
|
|
pr_debug("%s: suspending with flush\n", dm_device_name(md));
|
[PATCH] dm: suspend: add noflush pushback
In device-mapper I/O is sometimes queued within targets for later processing.
For example the multipath target can be configured to store I/O when no paths
are available instead of returning it -EIO.
This patch allows the device-mapper core to instruct a target to transfer the
contents of any such in-target queue back into the core. This frees up the
resources used by the target so the core can replace that target with an
alternative one and then resend the I/O to it. Without this patch the only
way to change the target in such circumstances involves returning the I/O with
an error back to the filesystem/application. In the multipath case, this
patch will let us add new paths for existing I/O to try after all the existing
paths have failed.
DMF_NOFLUSH_SUSPENDING
----------------------
If the DM_NOFLUSH_FLAG ioctl option is specified at suspend time, the
DMF_NOFLUSH_SUSPENDING flag is set in md->flags during dm_suspend(). It
is always cleared before dm_suspend() returns.
The flag must be visible while the target is flushing pending I/Os so it
is set before presuspend where the flush starts and unset after the wait
for md->pending where the flush ends.
Target drivers can check this flag by calling dm_noflush_suspending().
DM_MAPIO_REQUEUE / DM_ENDIO_REQUEUE
-----------------------------------
A target's map() function can now return DM_MAPIO_REQUEUE to request the
device mapper core queue the bio.
Similarly, a target's end_io() function can return DM_ENDIO_REQUEUE to request
the same. This has been labelled 'pushback'.
The __map_bio() and clone_endio() functions in the core treat these return
values as errors and call dec_pending() to end the I/O.
dec_pending
-----------
dec_pending() saves the pushback request in struct dm_io->error. Once all
the split clones have ended, dec_pending() will put the original bio on
the md->pushback list. Note that this supercedes any I/O errors.
It is possible for the suspend with DM_NOFLUSH_FLAG to be aborted while
in progress (e.g. by user interrupt). dec_pending() checks for this and
returns -EIO if it happened.
pushdback list and pushback_lock
--------------------------------
The bio is queued on md->pushback temporarily in dec_pending(), and after
all pending I/Os return, md->pushback is merged into md->deferred in
dm_suspend() for re-issuing at resume time.
md->pushback_lock protects md->pushback.
The lock should be held with irq disabled because dec_pending() can be
called from interrupt context.
Queueing bios to md->pushback in dec_pending() must be done atomically
with the check for DMF_NOFLUSH_SUSPENDING flag. So md->pushback_lock is
held when checking the flag. Otherwise dec_pending() may queue a bio to
md->pushback after the interrupted dm_suspend() flushes md->pushback.
Then the bio would be left in md->pushback.
Flag setting in dm_suspend() can be done without md->pushback_lock because
the flag is checked only after presuspend and the set value is already
made visible via the target's presuspend function.
The flag can be checked without md->pushback_lock (e.g. the first part of
the dec_pending() or target drivers), because the flag is checked again
with md->pushback_lock held when the bio is really queued to md->pushback
as described above. So even if the flag is cleared after the lockless
checkings, the bio isn't left in md->pushback but returned to applications
with -EIO.
Other notes on the current patch
--------------------------------
- md->pushback is added to the struct mapped_device instead of using
md->deferred directly because md->io_lock which protects md->deferred is
rw_semaphore and can't be used in interrupt context like dec_pending(),
and md->io_lock protects the DMF_BLOCK_IO flag of md->flags too.
- Don't issue lock_fs() in dm_suspend() if the DM_NOFLUSH_FLAG
ioctl option is specified, because I/Os generated by lock_fs() would be
pushed back and never return if there were no valid devices.
- If an error occurs in dm_suspend() after the DMF_NOFLUSH_SUSPENDING
flag is set, md->pushback must be flushed because I/Os may be queued to
the list already. (flush_and_out label in dm_suspend())
Test results
------------
I have tested using multipath target with the next patch.
The following tests are for regression/compatibility:
- I/Os succeed when valid paths exist;
- I/Os fail when there are no valid paths and queue_if_no_path is not
set;
- I/Os are queued in the multipath target when there are no valid paths and
queue_if_no_path is set;
- The queued I/Os above fail when suspend is issued without the
DM_NOFLUSH_FLAG ioctl option. I/Os spanning 2 multipath targets also
fail.
The following tests are for the normal code path of new pushback feature:
- Queued I/Os in the multipath target are flushed from the target
but don't return when suspend is issued with the DM_NOFLUSH_FLAG
ioctl option;
- The I/Os above are queued in the multipath target again when
resume is issued without path recovery;
- The I/Os above succeed when resume is issued after path recovery
or table load;
- Queued I/Os in the multipath target succeed when resume is issued
with the DM_NOFLUSH_FLAG ioctl option after table load. I/Os
spanning 2 multipath targets also succeed.
The following tests are for the error paths of the new pushback feature:
- When the bdget_disk() fails in dm_suspend(), the
DMF_NOFLUSH_SUSPENDING flag is cleared and I/Os already queued to the
pushback list are flushed properly.
- When suspend with the DM_NOFLUSH_FLAG ioctl option is interrupted,
o I/Os which had already been queued to the pushback list
at the time don't return, and are re-issued at resume time;
o I/Os which hadn't been returned at the time return with EIO.
Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com>
Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
Cc: dm-devel@redhat.com
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-12-08 18:41:09 +08:00
|
|
|
|
2014-10-29 08:13:31 +08:00
|
|
|
/*
|
|
|
|
* This gets reverted if there's an error later and the targets
|
|
|
|
* provide the .presuspend_undo hook.
|
|
|
|
*/
|
2005-07-29 12:15:57 +08:00
|
|
|
dm_table_presuspend_targets(map);
|
|
|
|
|
2009-06-22 17:12:17 +08:00
|
|
|
/*
|
2009-12-11 07:52:16 +08:00
|
|
|
* Flush I/O to the device.
|
|
|
|
* Any I/O submitted after lock_fs() may not be flushed.
|
|
|
|
* noflush takes precedence over do_lockfs.
|
|
|
|
* (lock_fs() flushes I/Os and waits for them to complete.)
|
2009-06-22 17:12:17 +08:00
|
|
|
*/
|
|
|
|
if (!noflush && do_lockfs) {
|
|
|
|
r = lock_fs(md);
|
2014-10-29 08:13:31 +08:00
|
|
|
if (r) {
|
|
|
|
dm_table_presuspend_undo_targets(map);
|
2014-10-29 06:34:52 +08:00
|
|
|
return r;
|
2014-10-29 08:13:31 +08:00
|
|
|
}
|
2006-01-06 16:20:06 +08:00
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/*
|
2009-04-09 07:27:15 +08:00
|
|
|
* Here we must make sure that no processes are submitting requests
|
|
|
|
* to target drivers i.e. no one may be executing
|
|
|
|
* __split_and_process_bio. This is called from dm_request and
|
|
|
|
* dm_wq_work.
|
|
|
|
*
|
|
|
|
* To get all processes out of __split_and_process_bio in dm_request,
|
|
|
|
* we take the write lock. To prevent any process from reentering
|
2010-09-09 00:07:00 +08:00
|
|
|
* __split_and_process_bio from dm_request and quiesce the thread
|
|
|
|
* (dm_wq_work), we set BMF_BLOCK_IO_FOR_SUSPEND and call
|
|
|
|
* flush_workqueue(md->wq).
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2009-04-09 07:27:14 +08:00
|
|
|
set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
|
2014-11-05 21:35:50 +08:00
|
|
|
if (map)
|
|
|
|
synchronize_srcu(&md->io_barrier);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
dm: add request based barrier support
This patch adds barrier support for request-based dm.
CORE DESIGN
The design is basically same as bio-based dm, which emulates barrier
by mapping empty barrier bios before/after a barrier I/O.
But request-based dm has been using struct request_queue for I/O
queueing, so the block-layer's barrier mechanism can be used.
o Summary of the block-layer's behavior (which is depended by dm-core)
Request-based dm uses QUEUE_ORDERED_DRAIN_FLUSH ordered mode for
I/O barrier. It means that when an I/O requiring barrier is found
in the request_queue, the block-layer makes pre-flush request and
post-flush request just before and just after the I/O respectively.
After the ordered sequence starts, the block-layer waits for all
in-flight I/Os to complete, then gives drivers the pre-flush request,
the barrier I/O and the post-flush request one by one.
It means that the request_queue is stopped automatically by
the block-layer until drivers complete each sequence.
o dm-core
For the barrier I/O, treats it as a normal I/O, so no additional
code is needed.
For the pre/post-flush request, flushes caches by the followings:
1. Make the number of empty barrier requests required by target's
num_flush_requests, and map them (dm_rq_barrier()).
2. Waits for the mapped barriers to complete (dm_rq_barrier()).
If error has occurred, save the error value to md->barrier_error
(dm_end_request()).
(*) Basically, the first reported error is taken.
But -EOPNOTSUPP supersedes any error and DM_ENDIO_REQUEUE
follows.
3. Requeue the pre/post-flush request if the error value is
DM_ENDIO_REQUEUE. Otherwise, completes with the error value
(dm_rq_barrier_work()).
The pre/post-flush work above is done in the kernel thread (kdmflush)
context, since memory allocation which might sleep is needed in
dm_rq_barrier() but sleep is not allowed in dm_request_fn(), which is
an irq-disabled context.
Also, clones of the pre/post-flush request share an original, so
such clones can't be completed using the softirq context.
Instead, complete them in the context of underlying device drivers.
It should be safe since there is no I/O dispatching during
the completion of such clones.
For suspend, the workqueue of kdmflush needs to be flushed after
the request_queue has been stopped. Otherwise, the next flush work
can be kicked even after the suspend completes.
TARGET INTERFACE
No new interface is added.
Just use the existing num_flush_requests in struct target_type
as same as bio-based dm.
Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com>
Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
2009-12-11 07:52:18 +08:00
|
|
|
/*
|
2010-09-09 00:07:00 +08:00
|
|
|
* Stop md->queue before flushing md->wq in case request-based
|
|
|
|
* dm defers requests to md->wq from md->queue.
|
dm: add request based barrier support
This patch adds barrier support for request-based dm.
CORE DESIGN
The design is basically same as bio-based dm, which emulates barrier
by mapping empty barrier bios before/after a barrier I/O.
But request-based dm has been using struct request_queue for I/O
queueing, so the block-layer's barrier mechanism can be used.
o Summary of the block-layer's behavior (which is depended by dm-core)
Request-based dm uses QUEUE_ORDERED_DRAIN_FLUSH ordered mode for
I/O barrier. It means that when an I/O requiring barrier is found
in the request_queue, the block-layer makes pre-flush request and
post-flush request just before and just after the I/O respectively.
After the ordered sequence starts, the block-layer waits for all
in-flight I/Os to complete, then gives drivers the pre-flush request,
the barrier I/O and the post-flush request one by one.
It means that the request_queue is stopped automatically by
the block-layer until drivers complete each sequence.
o dm-core
For the barrier I/O, treats it as a normal I/O, so no additional
code is needed.
For the pre/post-flush request, flushes caches by the followings:
1. Make the number of empty barrier requests required by target's
num_flush_requests, and map them (dm_rq_barrier()).
2. Waits for the mapped barriers to complete (dm_rq_barrier()).
If error has occurred, save the error value to md->barrier_error
(dm_end_request()).
(*) Basically, the first reported error is taken.
But -EOPNOTSUPP supersedes any error and DM_ENDIO_REQUEUE
follows.
3. Requeue the pre/post-flush request if the error value is
DM_ENDIO_REQUEUE. Otherwise, completes with the error value
(dm_rq_barrier_work()).
The pre/post-flush work above is done in the kernel thread (kdmflush)
context, since memory allocation which might sleep is needed in
dm_rq_barrier() but sleep is not allowed in dm_request_fn(), which is
an irq-disabled context.
Also, clones of the pre/post-flush request share an original, so
such clones can't be completed using the softirq context.
Instead, complete them in the context of underlying device drivers.
It should be safe since there is no I/O dispatching during
the completion of such clones.
For suspend, the workqueue of kdmflush needs to be flushed after
the request_queue has been stopped. Otherwise, the next flush work
can be kicked even after the suspend completes.
TARGET INTERFACE
No new interface is added.
Just use the existing num_flush_requests in struct target_type
as same as bio-based dm.
Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com>
Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
2009-12-11 07:52:18 +08:00
|
|
|
*/
|
2018-10-11 10:49:26 +08:00
|
|
|
if (dm_request_based(md))
|
2016-02-21 02:45:38 +08:00
|
|
|
dm_stop_queue(md->queue);
|
dm: prepare for request based option
This patch adds core functions for request-based dm.
When struct mapped device (md) is initialized, md->queue has
an I/O scheduler and the following functions are used for
request-based dm as the queue functions:
make_request_fn: dm_make_request()
pref_fn: dm_prep_fn()
request_fn: dm_request_fn()
softirq_done_fn: dm_softirq_done()
lld_busy_fn: dm_lld_busy()
Actual initializations are done in another patch (PATCH 2).
Below is a brief summary of how request-based dm behaves, including:
- making request from bio
- cloning, mapping and dispatching request
- completing request and bio
- suspending md
- resuming md
bio to request
==============
md->queue->make_request_fn() (dm_make_request()) calls __make_request()
for a bio submitted to the md.
Then, the bio is kept in the queue as a new request or merged into
another request in the queue if possible.
Cloning and Mapping
===================
Cloning and mapping are done in md->queue->request_fn() (dm_request_fn()),
when requests are dispatched after they are sorted by the I/O scheduler.
dm_request_fn() checks busy state of underlying devices using
target's busy() function and stops dispatching requests to keep them
on the dm device's queue if busy.
It helps better I/O merging, since no merge is done for a request
once it is dispatched to underlying devices.
Actual cloning and mapping are done in dm_prep_fn() and map_request()
called from dm_request_fn().
dm_prep_fn() clones not only request but also bios of the request
so that dm can hold bio completion in error cases and prevent
the bio submitter from noticing the error.
(See the "Completion" section below for details.)
After the cloning, the clone is mapped by target's map_rq() function
and inserted to underlying device's queue using
blk_insert_cloned_request().
Completion
==========
Request completion can be hooked by rq->end_io(), but then, all bios
in the request will have been completed even error cases, and the bio
submitter will have noticed the error.
To prevent the bio completion in error cases, request-based dm clones
both bio and request and hooks both bio->bi_end_io() and rq->end_io():
bio->bi_end_io(): end_clone_bio()
rq->end_io(): end_clone_request()
Summary of the request completion flow is below:
blk_end_request() for a clone request
=> blk_update_request()
=> bio->bi_end_io() == end_clone_bio() for each clone bio
=> Free the clone bio
=> Success: Complete the original bio (blk_update_request())
Error: Don't complete the original bio
=> blk_finish_request()
=> rq->end_io() == end_clone_request()
=> blk_complete_request()
=> dm_softirq_done()
=> Free the clone request
=> Success: Complete the original request (blk_end_request())
Error: Requeue the original request
end_clone_bio() completes the original request on the size of
the original bio in successful cases.
Even if all bios in the original request are completed by that
completion, the original request must not be completed yet to keep
the ordering of request completion for the stacking.
So end_clone_bio() uses blk_update_request() instead of
blk_end_request().
In error cases, end_clone_bio() doesn't complete the original bio.
It just frees the cloned bio and gives over the error handling to
end_clone_request().
end_clone_request(), which is called with queue lock held, completes
the clone request and the original request in a softirq context
(dm_softirq_done()), which has no queue lock, to avoid a deadlock
issue on submission of another request during the completion:
- The submitted request may be mapped to the same device
- Request submission requires queue lock, but the queue lock
has been held by itself and it doesn't know that
The clone request has no clone bio when dm_softirq_done() is called.
So target drivers can't resubmit it again even error cases.
Instead, they can ask dm core for requeueing and remapping
the original request in that cases.
suspend
=======
Request-based dm uses stopping md->queue as suspend of the md.
For noflush suspend, just stops md->queue.
For flush suspend, inserts a marker request to the tail of md->queue.
And dispatches all requests in md->queue until the marker comes to
the front of md->queue. Then, stops dispatching request and waits
for the all dispatched requests to complete.
After that, completes the marker request, stops md->queue and
wake up the waiter on the suspend queue, md->wait.
resume
======
Starts md->queue.
Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com>
Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
2009-06-22 17:12:35 +08:00
|
|
|
|
dm: add request based barrier support
This patch adds barrier support for request-based dm.
CORE DESIGN
The design is basically same as bio-based dm, which emulates barrier
by mapping empty barrier bios before/after a barrier I/O.
But request-based dm has been using struct request_queue for I/O
queueing, so the block-layer's barrier mechanism can be used.
o Summary of the block-layer's behavior (which is depended by dm-core)
Request-based dm uses QUEUE_ORDERED_DRAIN_FLUSH ordered mode for
I/O barrier. It means that when an I/O requiring barrier is found
in the request_queue, the block-layer makes pre-flush request and
post-flush request just before and just after the I/O respectively.
After the ordered sequence starts, the block-layer waits for all
in-flight I/Os to complete, then gives drivers the pre-flush request,
the barrier I/O and the post-flush request one by one.
It means that the request_queue is stopped automatically by
the block-layer until drivers complete each sequence.
o dm-core
For the barrier I/O, treats it as a normal I/O, so no additional
code is needed.
For the pre/post-flush request, flushes caches by the followings:
1. Make the number of empty barrier requests required by target's
num_flush_requests, and map them (dm_rq_barrier()).
2. Waits for the mapped barriers to complete (dm_rq_barrier()).
If error has occurred, save the error value to md->barrier_error
(dm_end_request()).
(*) Basically, the first reported error is taken.
But -EOPNOTSUPP supersedes any error and DM_ENDIO_REQUEUE
follows.
3. Requeue the pre/post-flush request if the error value is
DM_ENDIO_REQUEUE. Otherwise, completes with the error value
(dm_rq_barrier_work()).
The pre/post-flush work above is done in the kernel thread (kdmflush)
context, since memory allocation which might sleep is needed in
dm_rq_barrier() but sleep is not allowed in dm_request_fn(), which is
an irq-disabled context.
Also, clones of the pre/post-flush request share an original, so
such clones can't be completed using the softirq context.
Instead, complete them in the context of underlying device drivers.
It should be safe since there is no I/O dispatching during
the completion of such clones.
For suspend, the workqueue of kdmflush needs to be flushed after
the request_queue has been stopped. Otherwise, the next flush work
can be kicked even after the suspend completes.
TARGET INTERFACE
No new interface is added.
Just use the existing num_flush_requests in struct target_type
as same as bio-based dm.
Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com>
Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
2009-12-11 07:52:18 +08:00
|
|
|
flush_workqueue(md->wq);
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
2009-04-09 07:27:15 +08:00
|
|
|
* At this point no more requests are entering target request routines.
|
|
|
|
* We call dm_wait_for_completion to wait for all existing requests
|
|
|
|
* to finish.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2016-09-01 06:16:02 +08:00
|
|
|
r = dm_wait_for_completion(md, task_state);
|
2016-08-03 01:07:20 +08:00
|
|
|
if (!r)
|
|
|
|
set_bit(dmf_suspended_flag, &md->flags);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2008-02-08 10:10:22 +08:00
|
|
|
if (noflush)
|
2009-04-03 02:55:39 +08:00
|
|
|
clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
|
2014-11-05 21:35:50 +08:00
|
|
|
if (map)
|
|
|
|
synchronize_srcu(&md->io_barrier);
|
[PATCH] dm: suspend: add noflush pushback
In device-mapper I/O is sometimes queued within targets for later processing.
For example the multipath target can be configured to store I/O when no paths
are available instead of returning it -EIO.
This patch allows the device-mapper core to instruct a target to transfer the
contents of any such in-target queue back into the core. This frees up the
resources used by the target so the core can replace that target with an
alternative one and then resend the I/O to it. Without this patch the only
way to change the target in such circumstances involves returning the I/O with
an error back to the filesystem/application. In the multipath case, this
patch will let us add new paths for existing I/O to try after all the existing
paths have failed.
DMF_NOFLUSH_SUSPENDING
----------------------
If the DM_NOFLUSH_FLAG ioctl option is specified at suspend time, the
DMF_NOFLUSH_SUSPENDING flag is set in md->flags during dm_suspend(). It
is always cleared before dm_suspend() returns.
The flag must be visible while the target is flushing pending I/Os so it
is set before presuspend where the flush starts and unset after the wait
for md->pending where the flush ends.
Target drivers can check this flag by calling dm_noflush_suspending().
DM_MAPIO_REQUEUE / DM_ENDIO_REQUEUE
-----------------------------------
A target's map() function can now return DM_MAPIO_REQUEUE to request the
device mapper core queue the bio.
Similarly, a target's end_io() function can return DM_ENDIO_REQUEUE to request
the same. This has been labelled 'pushback'.
The __map_bio() and clone_endio() functions in the core treat these return
values as errors and call dec_pending() to end the I/O.
dec_pending
-----------
dec_pending() saves the pushback request in struct dm_io->error. Once all
the split clones have ended, dec_pending() will put the original bio on
the md->pushback list. Note that this supercedes any I/O errors.
It is possible for the suspend with DM_NOFLUSH_FLAG to be aborted while
in progress (e.g. by user interrupt). dec_pending() checks for this and
returns -EIO if it happened.
pushdback list and pushback_lock
--------------------------------
The bio is queued on md->pushback temporarily in dec_pending(), and after
all pending I/Os return, md->pushback is merged into md->deferred in
dm_suspend() for re-issuing at resume time.
md->pushback_lock protects md->pushback.
The lock should be held with irq disabled because dec_pending() can be
called from interrupt context.
Queueing bios to md->pushback in dec_pending() must be done atomically
with the check for DMF_NOFLUSH_SUSPENDING flag. So md->pushback_lock is
held when checking the flag. Otherwise dec_pending() may queue a bio to
md->pushback after the interrupted dm_suspend() flushes md->pushback.
Then the bio would be left in md->pushback.
Flag setting in dm_suspend() can be done without md->pushback_lock because
the flag is checked only after presuspend and the set value is already
made visible via the target's presuspend function.
The flag can be checked without md->pushback_lock (e.g. the first part of
the dec_pending() or target drivers), because the flag is checked again
with md->pushback_lock held when the bio is really queued to md->pushback
as described above. So even if the flag is cleared after the lockless
checkings, the bio isn't left in md->pushback but returned to applications
with -EIO.
Other notes on the current patch
--------------------------------
- md->pushback is added to the struct mapped_device instead of using
md->deferred directly because md->io_lock which protects md->deferred is
rw_semaphore and can't be used in interrupt context like dec_pending(),
and md->io_lock protects the DMF_BLOCK_IO flag of md->flags too.
- Don't issue lock_fs() in dm_suspend() if the DM_NOFLUSH_FLAG
ioctl option is specified, because I/Os generated by lock_fs() would be
pushed back and never return if there were no valid devices.
- If an error occurs in dm_suspend() after the DMF_NOFLUSH_SUSPENDING
flag is set, md->pushback must be flushed because I/Os may be queued to
the list already. (flush_and_out label in dm_suspend())
Test results
------------
I have tested using multipath target with the next patch.
The following tests are for regression/compatibility:
- I/Os succeed when valid paths exist;
- I/Os fail when there are no valid paths and queue_if_no_path is not
set;
- I/Os are queued in the multipath target when there are no valid paths and
queue_if_no_path is set;
- The queued I/Os above fail when suspend is issued without the
DM_NOFLUSH_FLAG ioctl option. I/Os spanning 2 multipath targets also
fail.
The following tests are for the normal code path of new pushback feature:
- Queued I/Os in the multipath target are flushed from the target
but don't return when suspend is issued with the DM_NOFLUSH_FLAG
ioctl option;
- The I/Os above are queued in the multipath target again when
resume is issued without path recovery;
- The I/Os above succeed when resume is issued after path recovery
or table load;
- Queued I/Os in the multipath target succeed when resume is issued
with the DM_NOFLUSH_FLAG ioctl option after table load. I/Os
spanning 2 multipath targets also succeed.
The following tests are for the error paths of the new pushback feature:
- When the bdget_disk() fails in dm_suspend(), the
DMF_NOFLUSH_SUSPENDING flag is cleared and I/Os already queued to the
pushback list are flushed properly.
- When suspend with the DM_NOFLUSH_FLAG ioctl option is interrupted,
o I/Os which had already been queued to the pushback list
at the time don't return, and are re-issued at resume time;
o I/Os which hadn't been returned at the time return with EIO.
Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com>
Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
Cc: dm-devel@redhat.com
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-12-08 18:41:09 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/* were we interrupted ? */
|
2008-02-08 10:10:30 +08:00
|
|
|
if (r < 0) {
|
2009-04-03 02:55:36 +08:00
|
|
|
dm_queue_flush(md);
|
2008-02-08 10:10:25 +08:00
|
|
|
|
dm: prepare for request based option
This patch adds core functions for request-based dm.
When struct mapped device (md) is initialized, md->queue has
an I/O scheduler and the following functions are used for
request-based dm as the queue functions:
make_request_fn: dm_make_request()
pref_fn: dm_prep_fn()
request_fn: dm_request_fn()
softirq_done_fn: dm_softirq_done()
lld_busy_fn: dm_lld_busy()
Actual initializations are done in another patch (PATCH 2).
Below is a brief summary of how request-based dm behaves, including:
- making request from bio
- cloning, mapping and dispatching request
- completing request and bio
- suspending md
- resuming md
bio to request
==============
md->queue->make_request_fn() (dm_make_request()) calls __make_request()
for a bio submitted to the md.
Then, the bio is kept in the queue as a new request or merged into
another request in the queue if possible.
Cloning and Mapping
===================
Cloning and mapping are done in md->queue->request_fn() (dm_request_fn()),
when requests are dispatched after they are sorted by the I/O scheduler.
dm_request_fn() checks busy state of underlying devices using
target's busy() function and stops dispatching requests to keep them
on the dm device's queue if busy.
It helps better I/O merging, since no merge is done for a request
once it is dispatched to underlying devices.
Actual cloning and mapping are done in dm_prep_fn() and map_request()
called from dm_request_fn().
dm_prep_fn() clones not only request but also bios of the request
so that dm can hold bio completion in error cases and prevent
the bio submitter from noticing the error.
(See the "Completion" section below for details.)
After the cloning, the clone is mapped by target's map_rq() function
and inserted to underlying device's queue using
blk_insert_cloned_request().
Completion
==========
Request completion can be hooked by rq->end_io(), but then, all bios
in the request will have been completed even error cases, and the bio
submitter will have noticed the error.
To prevent the bio completion in error cases, request-based dm clones
both bio and request and hooks both bio->bi_end_io() and rq->end_io():
bio->bi_end_io(): end_clone_bio()
rq->end_io(): end_clone_request()
Summary of the request completion flow is below:
blk_end_request() for a clone request
=> blk_update_request()
=> bio->bi_end_io() == end_clone_bio() for each clone bio
=> Free the clone bio
=> Success: Complete the original bio (blk_update_request())
Error: Don't complete the original bio
=> blk_finish_request()
=> rq->end_io() == end_clone_request()
=> blk_complete_request()
=> dm_softirq_done()
=> Free the clone request
=> Success: Complete the original request (blk_end_request())
Error: Requeue the original request
end_clone_bio() completes the original request on the size of
the original bio in successful cases.
Even if all bios in the original request are completed by that
completion, the original request must not be completed yet to keep
the ordering of request completion for the stacking.
So end_clone_bio() uses blk_update_request() instead of
blk_end_request().
In error cases, end_clone_bio() doesn't complete the original bio.
It just frees the cloned bio and gives over the error handling to
end_clone_request().
end_clone_request(), which is called with queue lock held, completes
the clone request and the original request in a softirq context
(dm_softirq_done()), which has no queue lock, to avoid a deadlock
issue on submission of another request during the completion:
- The submitted request may be mapped to the same device
- Request submission requires queue lock, but the queue lock
has been held by itself and it doesn't know that
The clone request has no clone bio when dm_softirq_done() is called.
So target drivers can't resubmit it again even error cases.
Instead, they can ask dm core for requeueing and remapping
the original request in that cases.
suspend
=======
Request-based dm uses stopping md->queue as suspend of the md.
For noflush suspend, just stops md->queue.
For flush suspend, inserts a marker request to the tail of md->queue.
And dispatches all requests in md->queue until the marker comes to
the front of md->queue. Then, stops dispatching request and waits
for the all dispatched requests to complete.
After that, completes the marker request, stops md->queue and
wake up the waiter on the suspend queue, md->wait.
resume
======
Starts md->queue.
Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com>
Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
2009-06-22 17:12:35 +08:00
|
|
|
if (dm_request_based(md))
|
2016-02-21 02:45:38 +08:00
|
|
|
dm_start_queue(md->queue);
|
dm: prepare for request based option
This patch adds core functions for request-based dm.
When struct mapped device (md) is initialized, md->queue has
an I/O scheduler and the following functions are used for
request-based dm as the queue functions:
make_request_fn: dm_make_request()
pref_fn: dm_prep_fn()
request_fn: dm_request_fn()
softirq_done_fn: dm_softirq_done()
lld_busy_fn: dm_lld_busy()
Actual initializations are done in another patch (PATCH 2).
Below is a brief summary of how request-based dm behaves, including:
- making request from bio
- cloning, mapping and dispatching request
- completing request and bio
- suspending md
- resuming md
bio to request
==============
md->queue->make_request_fn() (dm_make_request()) calls __make_request()
for a bio submitted to the md.
Then, the bio is kept in the queue as a new request or merged into
another request in the queue if possible.
Cloning and Mapping
===================
Cloning and mapping are done in md->queue->request_fn() (dm_request_fn()),
when requests are dispatched after they are sorted by the I/O scheduler.
dm_request_fn() checks busy state of underlying devices using
target's busy() function and stops dispatching requests to keep them
on the dm device's queue if busy.
It helps better I/O merging, since no merge is done for a request
once it is dispatched to underlying devices.
Actual cloning and mapping are done in dm_prep_fn() and map_request()
called from dm_request_fn().
dm_prep_fn() clones not only request but also bios of the request
so that dm can hold bio completion in error cases and prevent
the bio submitter from noticing the error.
(See the "Completion" section below for details.)
After the cloning, the clone is mapped by target's map_rq() function
and inserted to underlying device's queue using
blk_insert_cloned_request().
Completion
==========
Request completion can be hooked by rq->end_io(), but then, all bios
in the request will have been completed even error cases, and the bio
submitter will have noticed the error.
To prevent the bio completion in error cases, request-based dm clones
both bio and request and hooks both bio->bi_end_io() and rq->end_io():
bio->bi_end_io(): end_clone_bio()
rq->end_io(): end_clone_request()
Summary of the request completion flow is below:
blk_end_request() for a clone request
=> blk_update_request()
=> bio->bi_end_io() == end_clone_bio() for each clone bio
=> Free the clone bio
=> Success: Complete the original bio (blk_update_request())
Error: Don't complete the original bio
=> blk_finish_request()
=> rq->end_io() == end_clone_request()
=> blk_complete_request()
=> dm_softirq_done()
=> Free the clone request
=> Success: Complete the original request (blk_end_request())
Error: Requeue the original request
end_clone_bio() completes the original request on the size of
the original bio in successful cases.
Even if all bios in the original request are completed by that
completion, the original request must not be completed yet to keep
the ordering of request completion for the stacking.
So end_clone_bio() uses blk_update_request() instead of
blk_end_request().
In error cases, end_clone_bio() doesn't complete the original bio.
It just frees the cloned bio and gives over the error handling to
end_clone_request().
end_clone_request(), which is called with queue lock held, completes
the clone request and the original request in a softirq context
(dm_softirq_done()), which has no queue lock, to avoid a deadlock
issue on submission of another request during the completion:
- The submitted request may be mapped to the same device
- Request submission requires queue lock, but the queue lock
has been held by itself and it doesn't know that
The clone request has no clone bio when dm_softirq_done() is called.
So target drivers can't resubmit it again even error cases.
Instead, they can ask dm core for requeueing and remapping
the original request in that cases.
suspend
=======
Request-based dm uses stopping md->queue as suspend of the md.
For noflush suspend, just stops md->queue.
For flush suspend, inserts a marker request to the tail of md->queue.
And dispatches all requests in md->queue until the marker comes to
the front of md->queue. Then, stops dispatching request and waits
for the all dispatched requests to complete.
After that, completes the marker request, stops md->queue and
wake up the waiter on the suspend queue, md->wait.
resume
======
Starts md->queue.
Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com>
Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
2009-06-22 17:12:35 +08:00
|
|
|
|
2005-07-29 12:16:00 +08:00
|
|
|
unlock_fs(md);
|
2014-10-29 08:13:31 +08:00
|
|
|
dm_table_presuspend_undo_targets(map);
|
2014-10-29 06:34:52 +08:00
|
|
|
/* pushback list is already flushed, so skip flush */
|
2005-07-29 12:16:00 +08:00
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2014-10-29 06:34:52 +08:00
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We need to be able to change a mapping table under a mounted
|
|
|
|
* filesystem. For example we might want to move some data in
|
|
|
|
* the background. Before the table can be swapped with
|
|
|
|
* dm_bind_table, dm_suspend must be called to flush any in
|
|
|
|
* flight bios and ensure that any further io gets deferred.
|
|
|
|
*/
|
|
|
|
/*
|
|
|
|
* Suspend mechanism in request-based dm.
|
|
|
|
*
|
|
|
|
* 1. Flush all I/Os by lock_fs() if needed.
|
|
|
|
* 2. Stop dispatching any I/O by stopping the request_queue.
|
|
|
|
* 3. Wait for all in-flight I/Os to be completed or requeued.
|
|
|
|
*
|
|
|
|
* To abort suspend, start the request_queue.
|
|
|
|
*/
|
|
|
|
int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
|
|
|
|
{
|
|
|
|
struct dm_table *map = NULL;
|
|
|
|
int r = 0;
|
|
|
|
|
|
|
|
retry:
|
|
|
|
mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING);
|
|
|
|
|
|
|
|
if (dm_suspended_md(md)) {
|
|
|
|
r = -EINVAL;
|
|
|
|
goto out_unlock;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (dm_suspended_internally_md(md)) {
|
|
|
|
/* already internally suspended, wait for internal resume */
|
|
|
|
mutex_unlock(&md->suspend_lock);
|
|
|
|
r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE);
|
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
goto retry;
|
|
|
|
}
|
|
|
|
|
2014-11-24 01:34:29 +08:00
|
|
|
map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
|
2014-10-29 06:34:52 +08:00
|
|
|
|
2016-08-03 01:07:20 +08:00
|
|
|
r = __dm_suspend(md, map, suspend_flags, TASK_INTERRUPTIBLE, DMF_SUSPENDED);
|
2014-10-29 06:34:52 +08:00
|
|
|
if (r)
|
|
|
|
goto out_unlock;
|
2009-04-09 07:27:15 +08:00
|
|
|
|
2024-06-11 20:26:44 +08:00
|
|
|
set_bit(DMF_POST_SUSPENDING, &md->flags);
|
2009-12-11 07:52:26 +08:00
|
|
|
dm_table_postsuspend_targets(map);
|
2024-06-11 20:26:44 +08:00
|
|
|
clear_bit(DMF_POST_SUSPENDING, &md->flags);
|
2009-12-11 07:52:26 +08:00
|
|
|
|
2006-11-09 09:44:43 +08:00
|
|
|
out_unlock:
|
2008-02-08 10:10:08 +08:00
|
|
|
mutex_unlock(&md->suspend_lock);
|
2005-07-29 12:15:57 +08:00
|
|
|
return r;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2014-10-29 06:34:52 +08:00
|
|
|
static int __dm_resume(struct mapped_device *md, struct dm_table *map)
|
|
|
|
{
|
|
|
|
if (map) {
|
|
|
|
int r = dm_table_resume_targets(map);
|
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
dm_queue_flush(md);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Flushing deferred I/Os must be done after targets are resumed
|
|
|
|
* so that mapping of targets can work correctly.
|
|
|
|
* Request-based dm is queueing the deferred I/Os in its request_queue.
|
|
|
|
*/
|
|
|
|
if (dm_request_based(md))
|
2016-02-21 02:45:38 +08:00
|
|
|
dm_start_queue(md->queue);
|
2014-10-29 06:34:52 +08:00
|
|
|
|
|
|
|
unlock_fs(md);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
int dm_resume(struct mapped_device *md)
|
|
|
|
{
|
2016-09-06 16:00:29 +08:00
|
|
|
int r;
|
2005-07-29 12:15:57 +08:00
|
|
|
struct dm_table *map = NULL;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2014-10-29 06:34:52 +08:00
|
|
|
retry:
|
2016-09-06 16:00:29 +08:00
|
|
|
r = -EINVAL;
|
2014-10-29 06:34:52 +08:00
|
|
|
mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING);
|
|
|
|
|
2009-12-11 07:52:26 +08:00
|
|
|
if (!dm_suspended_md(md))
|
2005-07-29 12:15:57 +08:00
|
|
|
goto out;
|
|
|
|
|
2014-10-29 06:34:52 +08:00
|
|
|
if (dm_suspended_internally_md(md)) {
|
|
|
|
/* already internally suspended, wait for internal resume */
|
|
|
|
mutex_unlock(&md->suspend_lock);
|
|
|
|
r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE);
|
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
goto retry;
|
|
|
|
}
|
|
|
|
|
2014-11-24 01:34:29 +08:00
|
|
|
map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
|
2005-07-29 12:16:00 +08:00
|
|
|
if (!map || !dm_table_get_size(map))
|
2005-07-29 12:15:57 +08:00
|
|
|
goto out;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2014-10-29 06:34:52 +08:00
|
|
|
r = __dm_resume(md, map);
|
2006-10-03 16:15:36 +08:00
|
|
|
if (r)
|
|
|
|
goto out;
|
2005-07-29 12:16:00 +08:00
|
|
|
|
|
|
|
clear_bit(DMF_SUSPENDED, &md->flags);
|
2005-07-29 12:15:57 +08:00
|
|
|
out:
|
2008-02-08 10:10:08 +08:00
|
|
|
mutex_unlock(&md->suspend_lock);
|
2005-07-29 12:16:00 +08:00
|
|
|
|
2005-07-29 12:15:57 +08:00
|
|
|
return r;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2013-08-16 22:54:23 +08:00
|
|
|
/*
|
|
|
|
* Internal suspend/resume works like userspace-driven suspend. It waits
|
|
|
|
* until all bios finish and prevents issuing new bios to the target drivers.
|
|
|
|
* It may be used only from the kernel.
|
|
|
|
*/
|
|
|
|
|
2014-10-29 06:34:52 +08:00
|
|
|
static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_flags)
|
2013-08-16 22:54:23 +08:00
|
|
|
{
|
2014-10-29 06:34:52 +08:00
|
|
|
struct dm_table *map = NULL;
|
|
|
|
|
2017-04-28 01:11:21 +08:00
|
|
|
lockdep_assert_held(&md->suspend_lock);
|
|
|
|
|
2015-01-09 07:52:26 +08:00
|
|
|
if (md->internal_suspend_count++)
|
2014-10-29 06:34:52 +08:00
|
|
|
return; /* nested internal suspend */
|
|
|
|
|
|
|
|
if (dm_suspended_md(md)) {
|
|
|
|
set_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
|
|
|
|
return; /* nest suspend */
|
|
|
|
}
|
|
|
|
|
2014-11-24 01:34:29 +08:00
|
|
|
map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
|
2014-10-29 06:34:52 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Using TASK_UNINTERRUPTIBLE because only NOFLUSH internal suspend is
|
|
|
|
* supported. Properly supporting a TASK_INTERRUPTIBLE internal suspend
|
|
|
|
* would require changing .presuspend to return an error -- avoid this
|
|
|
|
* until there is a need for more elaborate variants of internal suspend.
|
|
|
|
*/
|
2016-08-03 01:07:20 +08:00
|
|
|
(void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE,
|
|
|
|
DMF_SUSPENDED_INTERNALLY);
|
2014-10-29 06:34:52 +08:00
|
|
|
|
2024-06-11 20:26:44 +08:00
|
|
|
set_bit(DMF_POST_SUSPENDING, &md->flags);
|
2014-10-29 06:34:52 +08:00
|
|
|
dm_table_postsuspend_targets(map);
|
2024-06-11 20:26:44 +08:00
|
|
|
clear_bit(DMF_POST_SUSPENDING, &md->flags);
|
2014-10-29 06:34:52 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void __dm_internal_resume(struct mapped_device *md)
|
|
|
|
{
|
2015-01-09 07:52:26 +08:00
|
|
|
BUG_ON(!md->internal_suspend_count);
|
|
|
|
|
|
|
|
if (--md->internal_suspend_count)
|
2014-10-29 06:34:52 +08:00
|
|
|
return; /* resume from nested internal suspend */
|
|
|
|
|
2013-08-16 22:54:23 +08:00
|
|
|
if (dm_suspended_md(md))
|
2014-10-29 06:34:52 +08:00
|
|
|
goto done; /* resume from nested suspend */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* NOTE: existing callers don't need to call dm_table_resume_targets
|
|
|
|
* (which may fail -- so best to avoid it for now by passing NULL map)
|
|
|
|
*/
|
|
|
|
(void) __dm_resume(md, NULL);
|
|
|
|
|
|
|
|
done:
|
|
|
|
clear_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
|
|
|
|
smp_mb__after_atomic();
|
|
|
|
wake_up_bit(&md->flags, DMF_SUSPENDED_INTERNALLY);
|
|
|
|
}
|
|
|
|
|
|
|
|
void dm_internal_suspend_noflush(struct mapped_device *md)
|
|
|
|
{
|
|
|
|
mutex_lock(&md->suspend_lock);
|
|
|
|
__dm_internal_suspend(md, DM_SUSPEND_NOFLUSH_FLAG);
|
|
|
|
mutex_unlock(&md->suspend_lock);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(dm_internal_suspend_noflush);
|
|
|
|
|
|
|
|
void dm_internal_resume(struct mapped_device *md)
|
|
|
|
{
|
|
|
|
mutex_lock(&md->suspend_lock);
|
|
|
|
__dm_internal_resume(md);
|
|
|
|
mutex_unlock(&md->suspend_lock);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(dm_internal_resume);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Fast variants of internal suspend/resume hold md->suspend_lock,
|
|
|
|
* which prevents interaction with userspace-driven suspend.
|
|
|
|
*/
|
|
|
|
|
|
|
|
void dm_internal_suspend_fast(struct mapped_device *md)
|
|
|
|
{
|
|
|
|
mutex_lock(&md->suspend_lock);
|
|
|
|
if (dm_suspended_md(md) || dm_suspended_internally_md(md))
|
2013-08-16 22:54:23 +08:00
|
|
|
return;
|
|
|
|
|
|
|
|
set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
|
|
|
|
synchronize_srcu(&md->io_barrier);
|
|
|
|
flush_workqueue(md->wq);
|
|
|
|
dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE);
|
|
|
|
}
|
2015-02-27 00:40:35 +08:00
|
|
|
EXPORT_SYMBOL_GPL(dm_internal_suspend_fast);
|
2013-08-16 22:54:23 +08:00
|
|
|
|
2014-10-29 06:34:52 +08:00
|
|
|
void dm_internal_resume_fast(struct mapped_device *md)
|
2013-08-16 22:54:23 +08:00
|
|
|
{
|
2014-10-29 06:34:52 +08:00
|
|
|
if (dm_suspended_md(md) || dm_suspended_internally_md(md))
|
2013-08-16 22:54:23 +08:00
|
|
|
goto done;
|
|
|
|
|
|
|
|
dm_queue_flush(md);
|
|
|
|
|
|
|
|
done:
|
|
|
|
mutex_unlock(&md->suspend_lock);
|
|
|
|
}
|
2015-02-27 00:40:35 +08:00
|
|
|
EXPORT_SYMBOL_GPL(dm_internal_resume_fast);
|
2013-08-16 22:54:23 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*-----------------------------------------------------------------
|
|
|
|
* Event notification.
|
|
|
|
*---------------------------------------------------------------*/
|
2010-03-06 10:32:31 +08:00
|
|
|
int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
|
2009-06-22 17:12:30 +08:00
|
|
|
unsigned cookie)
|
2007-12-13 22:15:57 +08:00
|
|
|
{
|
2024-06-11 20:26:44 +08:00
|
|
|
int r;
|
|
|
|
unsigned noio_flag;
|
2009-06-22 17:12:30 +08:00
|
|
|
char udev_cookie[DM_COOKIE_LENGTH];
|
|
|
|
char *envp[] = { udev_cookie, NULL };
|
|
|
|
|
2024-06-11 20:26:44 +08:00
|
|
|
noio_flag = memalloc_noio_save();
|
|
|
|
|
2009-06-22 17:12:30 +08:00
|
|
|
if (!cookie)
|
2024-06-11 20:26:44 +08:00
|
|
|
r = kobject_uevent(&disk_to_dev(md->disk)->kobj, action);
|
2009-06-22 17:12:30 +08:00
|
|
|
else {
|
|
|
|
snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u",
|
|
|
|
DM_COOKIE_ENV_VAR_NAME, cookie);
|
2024-06-11 20:26:44 +08:00
|
|
|
r = kobject_uevent_env(&disk_to_dev(md->disk)->kobj,
|
|
|
|
action, envp);
|
2009-06-22 17:12:30 +08:00
|
|
|
}
|
2024-06-11 20:26:44 +08:00
|
|
|
|
|
|
|
memalloc_noio_restore(noio_flag);
|
|
|
|
|
|
|
|
return r;
|
2007-12-13 22:15:57 +08:00
|
|
|
}
|
|
|
|
|
2007-10-20 05:48:01 +08:00
|
|
|
uint32_t dm_next_uevent_seq(struct mapped_device *md)
|
|
|
|
{
|
|
|
|
return atomic_add_return(1, &md->uevent_seq);
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
uint32_t dm_get_event_nr(struct mapped_device *md)
|
|
|
|
{
|
|
|
|
return atomic_read(&md->event_nr);
|
|
|
|
}
|
|
|
|
|
|
|
|
int dm_wait_event(struct mapped_device *md, int event_nr)
|
|
|
|
{
|
|
|
|
return wait_event_interruptible(md->eventq,
|
|
|
|
(event_nr != atomic_read(&md->event_nr)));
|
|
|
|
}
|
|
|
|
|
2007-10-20 05:48:01 +08:00
|
|
|
void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&md->uevent_lock, flags);
|
|
|
|
list_add(elist, &md->uevent_list);
|
|
|
|
spin_unlock_irqrestore(&md->uevent_lock, flags);
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* The gendisk is only valid as long as you have a reference
|
|
|
|
* count on 'md'.
|
|
|
|
*/
|
|
|
|
struct gendisk *dm_disk(struct mapped_device *md)
|
|
|
|
{
|
|
|
|
return md->disk;
|
|
|
|
}
|
2015-03-18 23:52:14 +08:00
|
|
|
EXPORT_SYMBOL_GPL(dm_disk);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2009-01-06 11:05:12 +08:00
|
|
|
struct kobject *dm_kobject(struct mapped_device *md)
|
|
|
|
{
|
2014-01-14 08:37:54 +08:00
|
|
|
return &md->kobj_holder.kobj;
|
2009-01-06 11:05:12 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
struct mapped_device *dm_get_from_kobject(struct kobject *kobj)
|
|
|
|
{
|
|
|
|
struct mapped_device *md;
|
|
|
|
|
2014-01-14 08:37:54 +08:00
|
|
|
md = container_of(kobj, struct mapped_device, kobj_holder.kobj);
|
2009-01-06 11:05:12 +08:00
|
|
|
|
2017-11-01 15:42:36 +08:00
|
|
|
spin_lock(&_minor_lock);
|
|
|
|
if (test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) {
|
|
|
|
md = NULL;
|
|
|
|
goto out;
|
|
|
|
}
|
2009-01-06 11:05:12 +08:00
|
|
|
dm_get(md);
|
2017-11-01 15:42:36 +08:00
|
|
|
out:
|
|
|
|
spin_unlock(&_minor_lock);
|
|
|
|
|
2009-01-06 11:05:12 +08:00
|
|
|
return md;
|
|
|
|
}
|
|
|
|
|
2009-12-11 07:52:26 +08:00
|
|
|
int dm_suspended_md(struct mapped_device *md)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
return test_bit(DMF_SUSPENDED, &md->flags);
|
|
|
|
}
|
|
|
|
|
2024-06-11 20:26:44 +08:00
|
|
|
static int dm_post_suspending_md(struct mapped_device *md)
|
|
|
|
{
|
|
|
|
return test_bit(DMF_POST_SUSPENDING, &md->flags);
|
|
|
|
}
|
|
|
|
|
2014-10-29 06:34:52 +08:00
|
|
|
int dm_suspended_internally_md(struct mapped_device *md)
|
|
|
|
{
|
|
|
|
return test_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
|
|
|
|
}
|
|
|
|
|
2013-11-02 06:27:41 +08:00
|
|
|
int dm_test_deferred_remove_flag(struct mapped_device *md)
|
|
|
|
{
|
|
|
|
return test_bit(DMF_DEFERRED_REMOVE, &md->flags);
|
|
|
|
}
|
|
|
|
|
2009-12-11 07:52:27 +08:00
|
|
|
int dm_suspended(struct dm_target *ti)
|
|
|
|
{
|
dm table: remove dm_get from dm_table_get_md
Remove the dm_get() in dm_table_get_md() because dm_table_get_md() could
be called from presuspend/postsuspend, which are called while
mapped_device is in DMF_FREEING state, where dm_get() is not allowed.
Justification for that is the lifetime of both objects: As far as the
current dm design/implementation, mapped_device is never freed while
targets are doing something, because dm core waits for targets to become
quiet in dm_put() using presuspend/postsuspend. So targets should be
able to touch mapped_device without holding reference count of the
mapped_device, and we should allow targets to touch mapped_device even
if it is in DMF_FREEING state.
Backgrounds:
I'm trying to remove the multipath internal queue, since dm core now has
a generic queue for request-based dm. In the patch-set, the multipath
target wants to request dm core to start/stop queue. One of such
start/stop requests can happen during postsuspend() while the target
waits for pg-init to complete, because the target stops queue when
starting pg-init and tries to restart it when completing pg-init. Since
queue belongs to mapped_device, it involves calling dm_table_get_md()
and dm_put(). On the other hand, postsuspend() is called in dm_put()
for mapped_device which is in DMF_FREEING state, and that triggers
BUG_ON(DMF_FREEING) in the 2nd dm_put().
I had tried to solve this problem by changing only multipath not to
touch mapped_device which is in DMF_FREEING state, but I couldn't and I
came up with a question why we need dm_get() in dm_table_get_md().
Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com>
Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
2010-03-06 10:29:52 +08:00
|
|
|
return dm_suspended_md(dm_table_get_md(ti->table));
|
2009-12-11 07:52:27 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(dm_suspended);
|
|
|
|
|
2024-06-11 20:26:44 +08:00
|
|
|
int dm_post_suspending(struct dm_target *ti)
|
|
|
|
{
|
|
|
|
return dm_post_suspending_md(dm_table_get_md(ti->table));
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(dm_post_suspending);
|
|
|
|
|
[PATCH] dm: suspend: add noflush pushback
In device-mapper I/O is sometimes queued within targets for later processing.
For example the multipath target can be configured to store I/O when no paths
are available instead of returning it -EIO.
This patch allows the device-mapper core to instruct a target to transfer the
contents of any such in-target queue back into the core. This frees up the
resources used by the target so the core can replace that target with an
alternative one and then resend the I/O to it. Without this patch the only
way to change the target in such circumstances involves returning the I/O with
an error back to the filesystem/application. In the multipath case, this
patch will let us add new paths for existing I/O to try after all the existing
paths have failed.
DMF_NOFLUSH_SUSPENDING
----------------------
If the DM_NOFLUSH_FLAG ioctl option is specified at suspend time, the
DMF_NOFLUSH_SUSPENDING flag is set in md->flags during dm_suspend(). It
is always cleared before dm_suspend() returns.
The flag must be visible while the target is flushing pending I/Os so it
is set before presuspend where the flush starts and unset after the wait
for md->pending where the flush ends.
Target drivers can check this flag by calling dm_noflush_suspending().
DM_MAPIO_REQUEUE / DM_ENDIO_REQUEUE
-----------------------------------
A target's map() function can now return DM_MAPIO_REQUEUE to request the
device mapper core queue the bio.
Similarly, a target's end_io() function can return DM_ENDIO_REQUEUE to request
the same. This has been labelled 'pushback'.
The __map_bio() and clone_endio() functions in the core treat these return
values as errors and call dec_pending() to end the I/O.
dec_pending
-----------
dec_pending() saves the pushback request in struct dm_io->error. Once all
the split clones have ended, dec_pending() will put the original bio on
the md->pushback list. Note that this supercedes any I/O errors.
It is possible for the suspend with DM_NOFLUSH_FLAG to be aborted while
in progress (e.g. by user interrupt). dec_pending() checks for this and
returns -EIO if it happened.
pushdback list and pushback_lock
--------------------------------
The bio is queued on md->pushback temporarily in dec_pending(), and after
all pending I/Os return, md->pushback is merged into md->deferred in
dm_suspend() for re-issuing at resume time.
md->pushback_lock protects md->pushback.
The lock should be held with irq disabled because dec_pending() can be
called from interrupt context.
Queueing bios to md->pushback in dec_pending() must be done atomically
with the check for DMF_NOFLUSH_SUSPENDING flag. So md->pushback_lock is
held when checking the flag. Otherwise dec_pending() may queue a bio to
md->pushback after the interrupted dm_suspend() flushes md->pushback.
Then the bio would be left in md->pushback.
Flag setting in dm_suspend() can be done without md->pushback_lock because
the flag is checked only after presuspend and the set value is already
made visible via the target's presuspend function.
The flag can be checked without md->pushback_lock (e.g. the first part of
the dec_pending() or target drivers), because the flag is checked again
with md->pushback_lock held when the bio is really queued to md->pushback
as described above. So even if the flag is cleared after the lockless
checkings, the bio isn't left in md->pushback but returned to applications
with -EIO.
Other notes on the current patch
--------------------------------
- md->pushback is added to the struct mapped_device instead of using
md->deferred directly because md->io_lock which protects md->deferred is
rw_semaphore and can't be used in interrupt context like dec_pending(),
and md->io_lock protects the DMF_BLOCK_IO flag of md->flags too.
- Don't issue lock_fs() in dm_suspend() if the DM_NOFLUSH_FLAG
ioctl option is specified, because I/Os generated by lock_fs() would be
pushed back and never return if there were no valid devices.
- If an error occurs in dm_suspend() after the DMF_NOFLUSH_SUSPENDING
flag is set, md->pushback must be flushed because I/Os may be queued to
the list already. (flush_and_out label in dm_suspend())
Test results
------------
I have tested using multipath target with the next patch.
The following tests are for regression/compatibility:
- I/Os succeed when valid paths exist;
- I/Os fail when there are no valid paths and queue_if_no_path is not
set;
- I/Os are queued in the multipath target when there are no valid paths and
queue_if_no_path is set;
- The queued I/Os above fail when suspend is issued without the
DM_NOFLUSH_FLAG ioctl option. I/Os spanning 2 multipath targets also
fail.
The following tests are for the normal code path of new pushback feature:
- Queued I/Os in the multipath target are flushed from the target
but don't return when suspend is issued with the DM_NOFLUSH_FLAG
ioctl option;
- The I/Os above are queued in the multipath target again when
resume is issued without path recovery;
- The I/Os above succeed when resume is issued after path recovery
or table load;
- Queued I/Os in the multipath target succeed when resume is issued
with the DM_NOFLUSH_FLAG ioctl option after table load. I/Os
spanning 2 multipath targets also succeed.
The following tests are for the error paths of the new pushback feature:
- When the bdget_disk() fails in dm_suspend(), the
DMF_NOFLUSH_SUSPENDING flag is cleared and I/Os already queued to the
pushback list are flushed properly.
- When suspend with the DM_NOFLUSH_FLAG ioctl option is interrupted,
o I/Os which had already been queued to the pushback list
at the time don't return, and are re-issued at resume time;
o I/Os which hadn't been returned at the time return with EIO.
Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com>
Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
Cc: dm-devel@redhat.com
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-12-08 18:41:09 +08:00
|
|
|
int dm_noflush_suspending(struct dm_target *ti)
|
|
|
|
{
|
dm table: remove dm_get from dm_table_get_md
Remove the dm_get() in dm_table_get_md() because dm_table_get_md() could
be called from presuspend/postsuspend, which are called while
mapped_device is in DMF_FREEING state, where dm_get() is not allowed.
Justification for that is the lifetime of both objects: As far as the
current dm design/implementation, mapped_device is never freed while
targets are doing something, because dm core waits for targets to become
quiet in dm_put() using presuspend/postsuspend. So targets should be
able to touch mapped_device without holding reference count of the
mapped_device, and we should allow targets to touch mapped_device even
if it is in DMF_FREEING state.
Backgrounds:
I'm trying to remove the multipath internal queue, since dm core now has
a generic queue for request-based dm. In the patch-set, the multipath
target wants to request dm core to start/stop queue. One of such
start/stop requests can happen during postsuspend() while the target
waits for pg-init to complete, because the target stops queue when
starting pg-init and tries to restart it when completing pg-init. Since
queue belongs to mapped_device, it involves calling dm_table_get_md()
and dm_put(). On the other hand, postsuspend() is called in dm_put()
for mapped_device which is in DMF_FREEING state, and that triggers
BUG_ON(DMF_FREEING) in the 2nd dm_put().
I had tried to solve this problem by changing only multipath not to
touch mapped_device which is in DMF_FREEING state, but I couldn't and I
came up with a question why we need dm_get() in dm_table_get_md().
Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com>
Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
2010-03-06 10:29:52 +08:00
|
|
|
return __noflush_suspending(dm_table_get_md(ti->table));
|
[PATCH] dm: suspend: add noflush pushback
In device-mapper I/O is sometimes queued within targets for later processing.
For example the multipath target can be configured to store I/O when no paths
are available instead of returning it -EIO.
This patch allows the device-mapper core to instruct a target to transfer the
contents of any such in-target queue back into the core. This frees up the
resources used by the target so the core can replace that target with an
alternative one and then resend the I/O to it. Without this patch the only
way to change the target in such circumstances involves returning the I/O with
an error back to the filesystem/application. In the multipath case, this
patch will let us add new paths for existing I/O to try after all the existing
paths have failed.
DMF_NOFLUSH_SUSPENDING
----------------------
If the DM_NOFLUSH_FLAG ioctl option is specified at suspend time, the
DMF_NOFLUSH_SUSPENDING flag is set in md->flags during dm_suspend(). It
is always cleared before dm_suspend() returns.
The flag must be visible while the target is flushing pending I/Os so it
is set before presuspend where the flush starts and unset after the wait
for md->pending where the flush ends.
Target drivers can check this flag by calling dm_noflush_suspending().
DM_MAPIO_REQUEUE / DM_ENDIO_REQUEUE
-----------------------------------
A target's map() function can now return DM_MAPIO_REQUEUE to request the
device mapper core queue the bio.
Similarly, a target's end_io() function can return DM_ENDIO_REQUEUE to request
the same. This has been labelled 'pushback'.
The __map_bio() and clone_endio() functions in the core treat these return
values as errors and call dec_pending() to end the I/O.
dec_pending
-----------
dec_pending() saves the pushback request in struct dm_io->error. Once all
the split clones have ended, dec_pending() will put the original bio on
the md->pushback list. Note that this supercedes any I/O errors.
It is possible for the suspend with DM_NOFLUSH_FLAG to be aborted while
in progress (e.g. by user interrupt). dec_pending() checks for this and
returns -EIO if it happened.
pushdback list and pushback_lock
--------------------------------
The bio is queued on md->pushback temporarily in dec_pending(), and after
all pending I/Os return, md->pushback is merged into md->deferred in
dm_suspend() for re-issuing at resume time.
md->pushback_lock protects md->pushback.
The lock should be held with irq disabled because dec_pending() can be
called from interrupt context.
Queueing bios to md->pushback in dec_pending() must be done atomically
with the check for DMF_NOFLUSH_SUSPENDING flag. So md->pushback_lock is
held when checking the flag. Otherwise dec_pending() may queue a bio to
md->pushback after the interrupted dm_suspend() flushes md->pushback.
Then the bio would be left in md->pushback.
Flag setting in dm_suspend() can be done without md->pushback_lock because
the flag is checked only after presuspend and the set value is already
made visible via the target's presuspend function.
The flag can be checked without md->pushback_lock (e.g. the first part of
the dec_pending() or target drivers), because the flag is checked again
with md->pushback_lock held when the bio is really queued to md->pushback
as described above. So even if the flag is cleared after the lockless
checkings, the bio isn't left in md->pushback but returned to applications
with -EIO.
Other notes on the current patch
--------------------------------
- md->pushback is added to the struct mapped_device instead of using
md->deferred directly because md->io_lock which protects md->deferred is
rw_semaphore and can't be used in interrupt context like dec_pending(),
and md->io_lock protects the DMF_BLOCK_IO flag of md->flags too.
- Don't issue lock_fs() in dm_suspend() if the DM_NOFLUSH_FLAG
ioctl option is specified, because I/Os generated by lock_fs() would be
pushed back and never return if there were no valid devices.
- If an error occurs in dm_suspend() after the DMF_NOFLUSH_SUSPENDING
flag is set, md->pushback must be flushed because I/Os may be queued to
the list already. (flush_and_out label in dm_suspend())
Test results
------------
I have tested using multipath target with the next patch.
The following tests are for regression/compatibility:
- I/Os succeed when valid paths exist;
- I/Os fail when there are no valid paths and queue_if_no_path is not
set;
- I/Os are queued in the multipath target when there are no valid paths and
queue_if_no_path is set;
- The queued I/Os above fail when suspend is issued without the
DM_NOFLUSH_FLAG ioctl option. I/Os spanning 2 multipath targets also
fail.
The following tests are for the normal code path of new pushback feature:
- Queued I/Os in the multipath target are flushed from the target
but don't return when suspend is issued with the DM_NOFLUSH_FLAG
ioctl option;
- The I/Os above are queued in the multipath target again when
resume is issued without path recovery;
- The I/Os above succeed when resume is issued after path recovery
or table load;
- Queued I/Os in the multipath target succeed when resume is issued
with the DM_NOFLUSH_FLAG ioctl option after table load. I/Os
spanning 2 multipath targets also succeed.
The following tests are for the error paths of the new pushback feature:
- When the bdget_disk() fails in dm_suspend(), the
DMF_NOFLUSH_SUSPENDING flag is cleared and I/Os already queued to the
pushback list are flushed properly.
- When suspend with the DM_NOFLUSH_FLAG ioctl option is interrupted,
o I/Os which had already been queued to the pushback list
at the time don't return, and are re-issued at resume time;
o I/Os which hadn't been returned at the time return with EIO.
Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com>
Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
Cc: dm-devel@redhat.com
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-12-08 18:41:09 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(dm_noflush_suspending);
|
|
|
|
|
2017-04-28 01:11:23 +08:00
|
|
|
struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_queue_mode type,
|
2017-12-09 03:40:52 +08:00
|
|
|
unsigned integrity, unsigned per_io_data_size,
|
|
|
|
unsigned min_pool_size)
|
dm: enable request based option
This patch enables request-based dm.
o Request-based dm and bio-based dm coexist, since there are
some target drivers which are more fitting to bio-based dm.
Also, there are other bio-based devices in the kernel
(e.g. md, loop).
Since bio-based device can't receive struct request,
there are some limitations on device stacking between
bio-based and request-based.
type of underlying device
bio-based request-based
----------------------------------------------
bio-based OK OK
request-based -- OK
The device type is recognized by the queue flag in the kernel,
so dm follows that.
o The type of a dm device is decided at the first table binding time.
Once the type of a dm device is decided, the type can't be changed.
o Mempool allocations are deferred to at the table loading time, since
mempools for request-based dm are different from those for bio-based
dm and needed mempool type is fixed by the type of table.
o Currently, request-based dm supports only tables that have a single
target. To support multiple targets, we need to support request
splitting or prevent bio/request from spanning multiple targets.
The former needs lots of changes in the block layer, and the latter
needs that all target drivers support merge() function.
Both will take a time.
Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com>
Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
2009-06-22 17:12:36 +08:00
|
|
|
{
|
2016-02-23 01:16:21 +08:00
|
|
|
struct dm_md_mempools *pools = kzalloc_node(sizeof(*pools), GFP_KERNEL, md->numa_node_id);
|
2015-06-26 22:01:13 +08:00
|
|
|
unsigned int pool_size = 0;
|
2017-12-12 12:17:47 +08:00
|
|
|
unsigned int front_pad, io_front_pad;
|
2018-05-21 06:25:53 +08:00
|
|
|
int ret;
|
dm: enable request based option
This patch enables request-based dm.
o Request-based dm and bio-based dm coexist, since there are
some target drivers which are more fitting to bio-based dm.
Also, there are other bio-based devices in the kernel
(e.g. md, loop).
Since bio-based device can't receive struct request,
there are some limitations on device stacking between
bio-based and request-based.
type of underlying device
bio-based request-based
----------------------------------------------
bio-based OK OK
request-based -- OK
The device type is recognized by the queue flag in the kernel,
so dm follows that.
o The type of a dm device is decided at the first table binding time.
Once the type of a dm device is decided, the type can't be changed.
o Mempool allocations are deferred to at the table loading time, since
mempools for request-based dm are different from those for bio-based
dm and needed mempool type is fixed by the type of table.
o Currently, request-based dm supports only tables that have a single
target. To support multiple targets, we need to support request
splitting or prevent bio/request from spanning multiple targets.
The former needs lots of changes in the block layer, and the latter
needs that all target drivers support merge() function.
Both will take a time.
Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com>
Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
2009-06-22 17:12:36 +08:00
|
|
|
|
|
|
|
if (!pools)
|
2015-06-26 21:42:57 +08:00
|
|
|
return NULL;
|
dm: enable request based option
This patch enables request-based dm.
o Request-based dm and bio-based dm coexist, since there are
some target drivers which are more fitting to bio-based dm.
Also, there are other bio-based devices in the kernel
(e.g. md, loop).
Since bio-based device can't receive struct request,
there are some limitations on device stacking between
bio-based and request-based.
type of underlying device
bio-based request-based
----------------------------------------------
bio-based OK OK
request-based -- OK
The device type is recognized by the queue flag in the kernel,
so dm follows that.
o The type of a dm device is decided at the first table binding time.
Once the type of a dm device is decided, the type can't be changed.
o Mempool allocations are deferred to at the table loading time, since
mempools for request-based dm are different from those for bio-based
dm and needed mempool type is fixed by the type of table.
o Currently, request-based dm supports only tables that have a single
target. To support multiple targets, we need to support request
splitting or prevent bio/request from spanning multiple targets.
The former needs lots of changes in the block layer, and the latter
needs that all target drivers support merge() function.
Both will take a time.
Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com>
Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
2009-06-22 17:12:36 +08:00
|
|
|
|
2015-06-26 22:01:13 +08:00
|
|
|
switch (type) {
|
|
|
|
case DM_TYPE_BIO_BASED:
|
2016-06-23 07:54:53 +08:00
|
|
|
case DM_TYPE_DAX_BIO_BASED:
|
2017-12-09 03:40:52 +08:00
|
|
|
pool_size = max(dm_get_reserved_bio_based_ios(), min_pool_size);
|
2016-02-01 02:28:26 +08:00
|
|
|
front_pad = roundup(per_io_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone);
|
2017-12-12 12:17:47 +08:00
|
|
|
io_front_pad = roundup(front_pad, __alignof__(struct dm_io)) + offsetof(struct dm_io, tio);
|
2018-05-21 06:25:53 +08:00
|
|
|
ret = bioset_init(&pools->io_bs, pool_size, io_front_pad, 0);
|
|
|
|
if (ret)
|
2017-12-12 12:17:47 +08:00
|
|
|
goto out;
|
2018-05-21 06:25:53 +08:00
|
|
|
if (integrity && bioset_integrity_create(&pools->io_bs, pool_size))
|
2017-01-23 01:32:46 +08:00
|
|
|
goto out;
|
2015-06-26 22:01:13 +08:00
|
|
|
break;
|
|
|
|
case DM_TYPE_REQUEST_BASED:
|
2017-12-09 03:40:52 +08:00
|
|
|
pool_size = max(dm_get_reserved_rq_based_ios(), min_pool_size);
|
2015-06-26 22:01:13 +08:00
|
|
|
front_pad = offsetof(struct dm_rq_clone_bio_info, clone);
|
2016-02-01 01:05:42 +08:00
|
|
|
/* per_io_data_size is used for blk-mq pdu at queue allocation */
|
2015-06-26 22:01:13 +08:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
BUG();
|
|
|
|
}
|
|
|
|
|
2018-05-21 06:25:53 +08:00
|
|
|
ret = bioset_init(&pools->bs, pool_size, front_pad, 0);
|
|
|
|
if (ret)
|
2013-03-02 06:45:48 +08:00
|
|
|
goto out;
|
dm: enable request based option
This patch enables request-based dm.
o Request-based dm and bio-based dm coexist, since there are
some target drivers which are more fitting to bio-based dm.
Also, there are other bio-based devices in the kernel
(e.g. md, loop).
Since bio-based device can't receive struct request,
there are some limitations on device stacking between
bio-based and request-based.
type of underlying device
bio-based request-based
----------------------------------------------
bio-based OK OK
request-based -- OK
The device type is recognized by the queue flag in the kernel,
so dm follows that.
o The type of a dm device is decided at the first table binding time.
Once the type of a dm device is decided, the type can't be changed.
o Mempool allocations are deferred to at the table loading time, since
mempools for request-based dm are different from those for bio-based
dm and needed mempool type is fixed by the type of table.
o Currently, request-based dm supports only tables that have a single
target. To support multiple targets, we need to support request
splitting or prevent bio/request from spanning multiple targets.
The former needs lots of changes in the block layer, and the latter
needs that all target drivers support merge() function.
Both will take a time.
Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com>
Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
2009-06-22 17:12:36 +08:00
|
|
|
|
2018-05-21 06:25:53 +08:00
|
|
|
if (integrity && bioset_integrity_create(&pools->bs, pool_size))
|
2013-03-02 06:45:48 +08:00
|
|
|
goto out;
|
2011-03-17 18:11:05 +08:00
|
|
|
|
dm: enable request based option
This patch enables request-based dm.
o Request-based dm and bio-based dm coexist, since there are
some target drivers which are more fitting to bio-based dm.
Also, there are other bio-based devices in the kernel
(e.g. md, loop).
Since bio-based device can't receive struct request,
there are some limitations on device stacking between
bio-based and request-based.
type of underlying device
bio-based request-based
----------------------------------------------
bio-based OK OK
request-based -- OK
The device type is recognized by the queue flag in the kernel,
so dm follows that.
o The type of a dm device is decided at the first table binding time.
Once the type of a dm device is decided, the type can't be changed.
o Mempool allocations are deferred to at the table loading time, since
mempools for request-based dm are different from those for bio-based
dm and needed mempool type is fixed by the type of table.
o Currently, request-based dm supports only tables that have a single
target. To support multiple targets, we need to support request
splitting or prevent bio/request from spanning multiple targets.
The former needs lots of changes in the block layer, and the latter
needs that all target drivers support merge() function.
Both will take a time.
Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com>
Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
2009-06-22 17:12:36 +08:00
|
|
|
return pools;
|
2015-05-22 21:14:04 +08:00
|
|
|
|
|
|
|
out:
|
|
|
|
dm_free_md_mempools(pools);
|
2015-06-26 22:01:13 +08:00
|
|
|
|
2015-06-26 21:42:57 +08:00
|
|
|
return NULL;
|
dm: enable request based option
This patch enables request-based dm.
o Request-based dm and bio-based dm coexist, since there are
some target drivers which are more fitting to bio-based dm.
Also, there are other bio-based devices in the kernel
(e.g. md, loop).
Since bio-based device can't receive struct request,
there are some limitations on device stacking between
bio-based and request-based.
type of underlying device
bio-based request-based
----------------------------------------------
bio-based OK OK
request-based -- OK
The device type is recognized by the queue flag in the kernel,
so dm follows that.
o The type of a dm device is decided at the first table binding time.
Once the type of a dm device is decided, the type can't be changed.
o Mempool allocations are deferred to at the table loading time, since
mempools for request-based dm are different from those for bio-based
dm and needed mempool type is fixed by the type of table.
o Currently, request-based dm supports only tables that have a single
target. To support multiple targets, we need to support request
splitting or prevent bio/request from spanning multiple targets.
The former needs lots of changes in the block layer, and the latter
needs that all target drivers support merge() function.
Both will take a time.
Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com>
Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
2009-06-22 17:12:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void dm_free_md_mempools(struct dm_md_mempools *pools)
|
|
|
|
{
|
|
|
|
if (!pools)
|
|
|
|
return;
|
|
|
|
|
2018-05-21 06:25:53 +08:00
|
|
|
bioset_exit(&pools->bs);
|
|
|
|
bioset_exit(&pools->io_bs);
|
dm: enable request based option
This patch enables request-based dm.
o Request-based dm and bio-based dm coexist, since there are
some target drivers which are more fitting to bio-based dm.
Also, there are other bio-based devices in the kernel
(e.g. md, loop).
Since bio-based device can't receive struct request,
there are some limitations on device stacking between
bio-based and request-based.
type of underlying device
bio-based request-based
----------------------------------------------
bio-based OK OK
request-based -- OK
The device type is recognized by the queue flag in the kernel,
so dm follows that.
o The type of a dm device is decided at the first table binding time.
Once the type of a dm device is decided, the type can't be changed.
o Mempool allocations are deferred to at the table loading time, since
mempools for request-based dm are different from those for bio-based
dm and needed mempool type is fixed by the type of table.
o Currently, request-based dm supports only tables that have a single
target. To support multiple targets, we need to support request
splitting or prevent bio/request from spanning multiple targets.
The former needs lots of changes in the block layer, and the latter
needs that all target drivers support merge() function.
Both will take a time.
Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com>
Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
2009-06-22 17:12:36 +08:00
|
|
|
|
|
|
|
kfree(pools);
|
|
|
|
}
|
|
|
|
|
2016-07-08 20:23:51 +08:00
|
|
|
struct dm_pr {
|
|
|
|
u64 old_key;
|
|
|
|
u64 new_key;
|
|
|
|
u32 flags;
|
|
|
|
bool fail_early;
|
|
|
|
};
|
|
|
|
|
|
|
|
static int dm_call_pr(struct block_device *bdev, iterate_devices_callout_fn fn,
|
|
|
|
void *data)
|
2015-10-15 20:10:51 +08:00
|
|
|
{
|
|
|
|
struct mapped_device *md = bdev->bd_disk->private_data;
|
2016-07-08 20:23:51 +08:00
|
|
|
struct dm_table *table;
|
|
|
|
struct dm_target *ti;
|
|
|
|
int ret = -ENOTTY, srcu_idx;
|
2015-10-15 20:10:51 +08:00
|
|
|
|
2016-07-08 20:23:51 +08:00
|
|
|
table = dm_get_live_table(md, &srcu_idx);
|
|
|
|
if (!table || !dm_table_get_size(table))
|
|
|
|
goto out;
|
2015-10-15 20:10:51 +08:00
|
|
|
|
2016-07-08 20:23:51 +08:00
|
|
|
/* We only support devices that have a single target */
|
|
|
|
if (dm_table_get_num_targets(table) != 1)
|
|
|
|
goto out;
|
|
|
|
ti = dm_table_get_target(table, 0);
|
2015-10-15 20:10:51 +08:00
|
|
|
|
2024-06-12 13:13:20 +08:00
|
|
|
if (dm_suspended_md(md)) {
|
|
|
|
ret = -EAGAIN;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2016-07-08 20:23:51 +08:00
|
|
|
ret = -EINVAL;
|
|
|
|
if (!ti->type->iterate_devices)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
ret = ti->type->iterate_devices(ti, fn, data);
|
|
|
|
out:
|
|
|
|
dm_put_live_table(md, srcu_idx);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* For register / unregister we need to manually call out to every path.
|
|
|
|
*/
|
|
|
|
static int __dm_pr_register(struct dm_target *ti, struct dm_dev *dev,
|
|
|
|
sector_t start, sector_t len, void *data)
|
|
|
|
{
|
|
|
|
struct dm_pr *pr = data;
|
|
|
|
const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops;
|
|
|
|
|
|
|
|
if (!ops || !ops->pr_register)
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
return ops->pr_register(dev->bdev, pr->old_key, pr->new_key, pr->flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int dm_pr_register(struct block_device *bdev, u64 old_key, u64 new_key,
|
|
|
|
u32 flags)
|
|
|
|
{
|
|
|
|
struct dm_pr pr = {
|
|
|
|
.old_key = old_key,
|
|
|
|
.new_key = new_key,
|
|
|
|
.flags = flags,
|
|
|
|
.fail_early = true,
|
|
|
|
};
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = dm_call_pr(bdev, __dm_pr_register, &pr);
|
|
|
|
if (ret && new_key) {
|
|
|
|
/* unregister all paths if we failed to register any path */
|
|
|
|
pr.old_key = new_key;
|
|
|
|
pr.new_key = 0;
|
|
|
|
pr.flags = 0;
|
|
|
|
pr.fail_early = false;
|
|
|
|
dm_call_pr(bdev, __dm_pr_register, &pr);
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
2015-10-15 20:10:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int dm_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type,
|
2016-02-19 05:13:51 +08:00
|
|
|
u32 flags)
|
2015-10-15 20:10:51 +08:00
|
|
|
{
|
|
|
|
struct mapped_device *md = bdev->bd_disk->private_data;
|
|
|
|
const struct pr_ops *ops;
|
2018-04-04 03:05:12 +08:00
|
|
|
int r, srcu_idx;
|
2015-10-15 20:10:51 +08:00
|
|
|
|
2018-04-04 04:54:10 +08:00
|
|
|
r = dm_prepare_ioctl(md, &srcu_idx, &bdev);
|
2015-10-15 20:10:51 +08:00
|
|
|
if (r < 0)
|
2018-04-04 03:05:12 +08:00
|
|
|
goto out;
|
2015-10-15 20:10:51 +08:00
|
|
|
|
|
|
|
ops = bdev->bd_disk->fops->pr_ops;
|
|
|
|
if (ops && ops->pr_reserve)
|
|
|
|
r = ops->pr_reserve(bdev, key, type, flags);
|
|
|
|
else
|
|
|
|
r = -EOPNOTSUPP;
|
2018-04-04 03:05:12 +08:00
|
|
|
out:
|
|
|
|
dm_unprepare_ioctl(md, srcu_idx);
|
2015-10-15 20:10:51 +08:00
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int dm_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
|
|
|
|
{
|
|
|
|
struct mapped_device *md = bdev->bd_disk->private_data;
|
|
|
|
const struct pr_ops *ops;
|
2018-04-04 03:05:12 +08:00
|
|
|
int r, srcu_idx;
|
2015-10-15 20:10:51 +08:00
|
|
|
|
2018-04-04 04:54:10 +08:00
|
|
|
r = dm_prepare_ioctl(md, &srcu_idx, &bdev);
|
2015-10-15 20:10:51 +08:00
|
|
|
if (r < 0)
|
2018-04-04 03:05:12 +08:00
|
|
|
goto out;
|
2015-10-15 20:10:51 +08:00
|
|
|
|
|
|
|
ops = bdev->bd_disk->fops->pr_ops;
|
|
|
|
if (ops && ops->pr_release)
|
|
|
|
r = ops->pr_release(bdev, key, type);
|
|
|
|
else
|
|
|
|
r = -EOPNOTSUPP;
|
2018-04-04 03:05:12 +08:00
|
|
|
out:
|
|
|
|
dm_unprepare_ioctl(md, srcu_idx);
|
2015-10-15 20:10:51 +08:00
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int dm_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key,
|
2016-02-19 05:13:51 +08:00
|
|
|
enum pr_type type, bool abort)
|
2015-10-15 20:10:51 +08:00
|
|
|
{
|
|
|
|
struct mapped_device *md = bdev->bd_disk->private_data;
|
|
|
|
const struct pr_ops *ops;
|
2018-04-04 03:05:12 +08:00
|
|
|
int r, srcu_idx;
|
2015-10-15 20:10:51 +08:00
|
|
|
|
2018-04-04 04:54:10 +08:00
|
|
|
r = dm_prepare_ioctl(md, &srcu_idx, &bdev);
|
2015-10-15 20:10:51 +08:00
|
|
|
if (r < 0)
|
2018-04-04 03:05:12 +08:00
|
|
|
goto out;
|
2015-10-15 20:10:51 +08:00
|
|
|
|
|
|
|
ops = bdev->bd_disk->fops->pr_ops;
|
|
|
|
if (ops && ops->pr_preempt)
|
|
|
|
r = ops->pr_preempt(bdev, old_key, new_key, type, abort);
|
|
|
|
else
|
|
|
|
r = -EOPNOTSUPP;
|
2018-04-04 03:05:12 +08:00
|
|
|
out:
|
|
|
|
dm_unprepare_ioctl(md, srcu_idx);
|
2015-10-15 20:10:51 +08:00
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int dm_pr_clear(struct block_device *bdev, u64 key)
|
|
|
|
{
|
|
|
|
struct mapped_device *md = bdev->bd_disk->private_data;
|
|
|
|
const struct pr_ops *ops;
|
2018-04-04 03:05:12 +08:00
|
|
|
int r, srcu_idx;
|
2015-10-15 20:10:51 +08:00
|
|
|
|
2018-04-04 04:54:10 +08:00
|
|
|
r = dm_prepare_ioctl(md, &srcu_idx, &bdev);
|
2015-10-15 20:10:51 +08:00
|
|
|
if (r < 0)
|
2018-04-04 03:05:12 +08:00
|
|
|
goto out;
|
2015-10-15 20:10:51 +08:00
|
|
|
|
|
|
|
ops = bdev->bd_disk->fops->pr_ops;
|
|
|
|
if (ops && ops->pr_clear)
|
|
|
|
r = ops->pr_clear(bdev, key);
|
|
|
|
else
|
|
|
|
r = -EOPNOTSUPP;
|
2018-04-04 03:05:12 +08:00
|
|
|
out:
|
|
|
|
dm_unprepare_ioctl(md, srcu_idx);
|
2015-10-15 20:10:51 +08:00
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct pr_ops dm_pr_ops = {
|
|
|
|
.pr_register = dm_pr_register,
|
|
|
|
.pr_reserve = dm_pr_reserve,
|
|
|
|
.pr_release = dm_pr_release,
|
|
|
|
.pr_preempt = dm_pr_preempt,
|
|
|
|
.pr_clear = dm_pr_clear,
|
|
|
|
};
|
|
|
|
|
2009-09-22 08:01:13 +08:00
|
|
|
static const struct block_device_operations dm_blk_dops = {
|
2005-04-17 06:20:36 +08:00
|
|
|
.open = dm_blk_open,
|
|
|
|
.release = dm_blk_close,
|
2006-10-03 16:15:15 +08:00
|
|
|
.ioctl = dm_blk_ioctl,
|
2006-03-27 17:17:54 +08:00
|
|
|
.getgeo = dm_blk_getgeo,
|
2018-10-12 18:08:49 +08:00
|
|
|
.report_zones = dm_blk_report_zones,
|
2015-10-15 20:10:51 +08:00
|
|
|
.pr_ops = &dm_pr_ops,
|
2005-04-17 06:20:36 +08:00
|
|
|
.owner = THIS_MODULE
|
|
|
|
};
|
|
|
|
|
2017-04-13 03:35:44 +08:00
|
|
|
static const struct dax_operations dm_dax_ops = {
|
|
|
|
.direct_access = dm_dax_direct_access,
|
2019-05-17 04:26:29 +08:00
|
|
|
.dax_supported = dm_dax_supported,
|
2017-05-30 03:57:56 +08:00
|
|
|
.copy_from_iter = dm_dax_copy_from_iter,
|
2018-05-02 21:46:33 +08:00
|
|
|
.copy_to_iter = dm_dax_copy_to_iter,
|
2017-04-13 03:35:44 +08:00
|
|
|
};
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* module hooks
|
|
|
|
*/
|
|
|
|
module_init(dm_init);
|
|
|
|
module_exit(dm_exit);
|
|
|
|
|
|
|
|
module_param(major, uint, 0);
|
|
|
|
MODULE_PARM_DESC(major, "The major number of the device mapper");
|
2013-09-13 06:06:12 +08:00
|
|
|
|
2013-09-13 06:06:12 +08:00
|
|
|
module_param(reserved_bio_based_ios, uint, S_IRUGO | S_IWUSR);
|
|
|
|
MODULE_PARM_DESC(reserved_bio_based_ios, "Reserved IOs in bio-based mempools");
|
|
|
|
|
2016-02-23 01:16:21 +08:00
|
|
|
module_param(dm_numa_node, int, S_IRUGO | S_IWUSR);
|
|
|
|
MODULE_PARM_DESC(dm_numa_node, "NUMA node for DM device memory allocations");
|
|
|
|
|
2024-06-11 20:26:44 +08:00
|
|
|
module_param(swap_bios, int, S_IRUGO | S_IWUSR);
|
|
|
|
MODULE_PARM_DESC(swap_bios, "Maximum allowed inflight swap IOs");
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
MODULE_DESCRIPTION(DM_NAME " driver");
|
|
|
|
MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
|
|
|
|
MODULE_LICENSE("GPL");
|