dm integrity: fix flush with external metadata device
With external metadata device, flush requests are not passed down to the data device. Fix this by submitting the flush request in dm_integrity_flush_buffers. In order to not degrade performance, we overlap the data device flush with the metadata device flush. Reported-by: Lukas Straub <lukasstraub2@web.de> Signed-off-by: Mikulas Patocka <mpatocka@redhat.com> Cc: stable@vger.kernel.org Signed-off-by: Mike Snitzer <snitzer@redhat.com>
This commit is contained in:
parent
0378c625af
commit
9b5948267a
|
@ -1534,6 +1534,12 @@ sector_t dm_bufio_get_device_size(struct dm_bufio_client *c)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(dm_bufio_get_device_size);
|
||||
|
||||
struct dm_io_client *dm_bufio_get_dm_io_client(struct dm_bufio_client *c)
|
||||
{
|
||||
return c->dm_io;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dm_bufio_get_dm_io_client);
|
||||
|
||||
sector_t dm_bufio_get_block_number(struct dm_buffer *b)
|
||||
{
|
||||
return b->block;
|
||||
|
|
|
@ -1379,12 +1379,52 @@ thorough_test:
|
|||
#undef MAY_BE_HASH
|
||||
}
|
||||
|
||||
static void dm_integrity_flush_buffers(struct dm_integrity_c *ic)
|
||||
struct flush_request {
|
||||
struct dm_io_request io_req;
|
||||
struct dm_io_region io_reg;
|
||||
struct dm_integrity_c *ic;
|
||||
struct completion comp;
|
||||
};
|
||||
|
||||
static void flush_notify(unsigned long error, void *fr_)
|
||||
{
|
||||
struct flush_request *fr = fr_;
|
||||
if (unlikely(error != 0))
|
||||
dm_integrity_io_error(fr->ic, "flusing disk cache", -EIO);
|
||||
complete(&fr->comp);
|
||||
}
|
||||
|
||||
static void dm_integrity_flush_buffers(struct dm_integrity_c *ic, bool flush_data)
|
||||
{
|
||||
int r;
|
||||
|
||||
struct flush_request fr;
|
||||
|
||||
if (!ic->meta_dev)
|
||||
flush_data = false;
|
||||
if (flush_data) {
|
||||
fr.io_req.bi_op = REQ_OP_WRITE,
|
||||
fr.io_req.bi_op_flags = REQ_PREFLUSH | REQ_SYNC,
|
||||
fr.io_req.mem.type = DM_IO_KMEM,
|
||||
fr.io_req.mem.ptr.addr = NULL,
|
||||
fr.io_req.notify.fn = flush_notify,
|
||||
fr.io_req.notify.context = &fr;
|
||||
fr.io_req.client = dm_bufio_get_dm_io_client(ic->bufio),
|
||||
fr.io_reg.bdev = ic->dev->bdev,
|
||||
fr.io_reg.sector = 0,
|
||||
fr.io_reg.count = 0,
|
||||
fr.ic = ic;
|
||||
init_completion(&fr.comp);
|
||||
r = dm_io(&fr.io_req, 1, &fr.io_reg, NULL);
|
||||
BUG_ON(r);
|
||||
}
|
||||
|
||||
r = dm_bufio_write_dirty_buffers(ic->bufio);
|
||||
if (unlikely(r))
|
||||
dm_integrity_io_error(ic, "writing tags", r);
|
||||
|
||||
if (flush_data)
|
||||
wait_for_completion(&fr.comp);
|
||||
}
|
||||
|
||||
static void sleep_on_endio_wait(struct dm_integrity_c *ic)
|
||||
|
@ -2110,7 +2150,7 @@ offload_to_thread:
|
|||
|
||||
if (unlikely(dio->op == REQ_OP_DISCARD) && likely(ic->mode != 'D')) {
|
||||
integrity_metadata(&dio->work);
|
||||
dm_integrity_flush_buffers(ic);
|
||||
dm_integrity_flush_buffers(ic, false);
|
||||
|
||||
dio->in_flight = (atomic_t)ATOMIC_INIT(1);
|
||||
dio->completion = NULL;
|
||||
|
@ -2195,7 +2235,7 @@ static void integrity_commit(struct work_struct *w)
|
|||
flushes = bio_list_get(&ic->flush_bio_list);
|
||||
if (unlikely(ic->mode != 'J')) {
|
||||
spin_unlock_irq(&ic->endio_wait.lock);
|
||||
dm_integrity_flush_buffers(ic);
|
||||
dm_integrity_flush_buffers(ic, true);
|
||||
goto release_flush_bios;
|
||||
}
|
||||
|
||||
|
@ -2409,7 +2449,7 @@ skip_io:
|
|||
complete_journal_op(&comp);
|
||||
wait_for_completion_io(&comp.comp);
|
||||
|
||||
dm_integrity_flush_buffers(ic);
|
||||
dm_integrity_flush_buffers(ic, true);
|
||||
}
|
||||
|
||||
static void integrity_writer(struct work_struct *w)
|
||||
|
@ -2451,7 +2491,7 @@ static void recalc_write_super(struct dm_integrity_c *ic)
|
|||
{
|
||||
int r;
|
||||
|
||||
dm_integrity_flush_buffers(ic);
|
||||
dm_integrity_flush_buffers(ic, false);
|
||||
if (dm_integrity_failed(ic))
|
||||
return;
|
||||
|
||||
|
@ -2654,7 +2694,7 @@ static void bitmap_flush_work(struct work_struct *work)
|
|||
unsigned long limit;
|
||||
struct bio *bio;
|
||||
|
||||
dm_integrity_flush_buffers(ic);
|
||||
dm_integrity_flush_buffers(ic, false);
|
||||
|
||||
range.logical_sector = 0;
|
||||
range.n_sectors = ic->provided_data_sectors;
|
||||
|
@ -2663,9 +2703,7 @@ static void bitmap_flush_work(struct work_struct *work)
|
|||
add_new_range_and_wait(ic, &range);
|
||||
spin_unlock_irq(&ic->endio_wait.lock);
|
||||
|
||||
dm_integrity_flush_buffers(ic);
|
||||
if (ic->meta_dev)
|
||||
blkdev_issue_flush(ic->dev->bdev, GFP_NOIO);
|
||||
dm_integrity_flush_buffers(ic, true);
|
||||
|
||||
limit = ic->provided_data_sectors;
|
||||
if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) {
|
||||
|
@ -2934,11 +2972,11 @@ static void dm_integrity_postsuspend(struct dm_target *ti)
|
|||
if (ic->meta_dev)
|
||||
queue_work(ic->writer_wq, &ic->writer_work);
|
||||
drain_workqueue(ic->writer_wq);
|
||||
dm_integrity_flush_buffers(ic);
|
||||
dm_integrity_flush_buffers(ic, true);
|
||||
}
|
||||
|
||||
if (ic->mode == 'B') {
|
||||
dm_integrity_flush_buffers(ic);
|
||||
dm_integrity_flush_buffers(ic, true);
|
||||
#if 1
|
||||
/* set to 0 to test bitmap replay code */
|
||||
init_journal(ic, 0, ic->journal_sections, 0);
|
||||
|
|
|
@ -150,6 +150,7 @@ void dm_bufio_set_minimum_buffers(struct dm_bufio_client *c, unsigned n);
|
|||
|
||||
unsigned dm_bufio_get_block_size(struct dm_bufio_client *c);
|
||||
sector_t dm_bufio_get_device_size(struct dm_bufio_client *c);
|
||||
struct dm_io_client *dm_bufio_get_dm_io_client(struct dm_bufio_client *c);
|
||||
sector_t dm_bufio_get_block_number(struct dm_buffer *b);
|
||||
void *dm_bufio_get_block_data(struct dm_buffer *b);
|
||||
void *dm_bufio_get_aux_data(struct dm_buffer *b);
|
||||
|
|
Loading…
Reference in New Issue