2016-05-13 04:28:10 +08:00
|
|
|
/*
|
|
|
|
* Internal header file _only_ for device mapper core
|
|
|
|
*
|
|
|
|
* Copyright (C) 2016 Red Hat, Inc. All rights reserved.
|
|
|
|
*
|
|
|
|
* This file is released under the LGPL.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef DM_CORE_INTERNAL_H
|
|
|
|
#define DM_CORE_INTERNAL_H
|
|
|
|
|
|
|
|
#include <linux/kthread.h>
|
|
|
|
#include <linux/ktime.h>
|
|
|
|
#include <linux/blk-mq.h>
|
|
|
|
|
|
|
|
#include <trace/events/block.h>
|
|
|
|
|
|
|
|
#include "dm.h"
|
|
|
|
|
|
|
|
#define DM_RESERVED_MAX_IOS 1024
|
|
|
|
|
|
|
|
struct dm_kobject_holder {
|
|
|
|
struct kobject kobj;
|
|
|
|
struct completion completion;
|
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* DM core internal structure that used directly by dm.c and dm-rq.c
|
|
|
|
* DM targets must _not_ deference a mapped_device to directly access its members!
|
|
|
|
*/
|
|
|
|
struct mapped_device {
|
|
|
|
struct mutex suspend_lock;
|
|
|
|
|
2018-05-23 06:26:20 +08:00
|
|
|
struct mutex table_devices_lock;
|
|
|
|
struct list_head table_devices;
|
|
|
|
|
2016-05-13 04:28:10 +08:00
|
|
|
/*
|
|
|
|
* The current mapping (struct dm_table *).
|
|
|
|
* Use dm_get_live_table{_fast} or take suspend_lock for
|
|
|
|
* dereference.
|
|
|
|
*/
|
|
|
|
void __rcu *map;
|
|
|
|
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
/* Protect queue and type against concurrent access. */
|
|
|
|
struct mutex type_lock;
|
2018-05-23 06:26:20 +08:00
|
|
|
enum dm_queue_mode type;
|
|
|
|
|
|
|
|
int numa_node_id;
|
|
|
|
struct request_queue *queue;
|
2016-05-13 04:28:10 +08:00
|
|
|
|
|
|
|
atomic_t holders;
|
|
|
|
atomic_t open_count;
|
|
|
|
|
|
|
|
struct dm_target *immutable_target;
|
|
|
|
struct target_type *immutable_target_type;
|
|
|
|
|
2018-05-23 06:26:20 +08:00
|
|
|
char name[16];
|
2016-05-13 04:28:10 +08:00
|
|
|
struct gendisk *disk;
|
2017-04-13 03:35:44 +08:00
|
|
|
struct dax_device *dax_dev;
|
2016-05-13 04:28:10 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* A list of ios that arrived while we were suspended.
|
|
|
|
*/
|
|
|
|
struct work_struct work;
|
2018-05-23 06:26:20 +08:00
|
|
|
wait_queue_head_t wait;
|
2016-05-13 04:28:10 +08:00
|
|
|
spinlock_t deferred_lock;
|
|
|
|
struct bio_list deferred;
|
|
|
|
|
2018-05-23 06:26:20 +08:00
|
|
|
void *interface_ptr;
|
|
|
|
|
2016-05-13 04:28:10 +08:00
|
|
|
/*
|
|
|
|
* Event handling.
|
|
|
|
*/
|
|
|
|
wait_queue_head_t eventq;
|
|
|
|
atomic_t event_nr;
|
|
|
|
atomic_t uevent_seq;
|
|
|
|
struct list_head uevent_list;
|
|
|
|
spinlock_t uevent_lock; /* Protect access to uevent_list */
|
|
|
|
|
|
|
|
/* the number of internal suspends */
|
|
|
|
unsigned internal_suspend_count;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* io objects are allocated from here.
|
|
|
|
*/
|
2018-05-21 06:25:53 +08:00
|
|
|
struct bio_set io_bs;
|
|
|
|
struct bio_set bs;
|
2016-05-13 04:28:10 +08:00
|
|
|
|
2018-05-23 06:26:20 +08:00
|
|
|
/*
|
|
|
|
* Processing queue (flush)
|
|
|
|
*/
|
|
|
|
struct workqueue_struct *wq;
|
|
|
|
|
2016-05-13 04:28:10 +08:00
|
|
|
/*
|
|
|
|
* freeze/thaw support require holding onto a super block
|
|
|
|
*/
|
|
|
|
struct super_block *frozen_sb;
|
|
|
|
|
|
|
|
/* forced geometry settings */
|
|
|
|
struct hd_geometry geometry;
|
|
|
|
|
|
|
|
/* kobject and completion */
|
|
|
|
struct dm_kobject_holder kobj_holder;
|
|
|
|
|
2018-05-23 06:26:20 +08:00
|
|
|
struct block_device *bdev;
|
|
|
|
|
2024-06-11 20:26:44 +08:00
|
|
|
int swap_bios;
|
|
|
|
struct semaphore swap_bios_semaphore;
|
|
|
|
struct mutex swap_bios_lock;
|
|
|
|
|
2016-05-13 04:28:10 +08:00
|
|
|
struct dm_stats stats;
|
|
|
|
|
|
|
|
/* for blk-mq request-based DM support */
|
|
|
|
struct blk_mq_tag_set *tag_set;
|
|
|
|
bool init_tio_pdu:1;
|
2017-11-01 07:33:02 +08:00
|
|
|
|
|
|
|
struct srcu_struct io_barrier;
|
2016-05-13 04:28:10 +08:00
|
|
|
};
|
|
|
|
|
dm: disable DISCARD if the underlying storage no longer supports it
Storage devices which report supporting discard commands like
WRITE_SAME_16 with unmap, but reject discard commands sent to the
storage device. This is a clear storage firmware bug but it doesn't
change the fact that should a program cause discards to be sent to a
multipath device layered on this buggy storage, all paths can end up
failed at the same time from the discards, causing possible I/O loss.
The first discard to a path will fail with Illegal Request, Invalid
field in cdb, e.g.:
kernel: sd 8:0:8:19: [sdfn] tag#0 FAILED Result: hostbyte=DID_OK driverbyte=DRIVER_SENSE
kernel: sd 8:0:8:19: [sdfn] tag#0 Sense Key : Illegal Request [current]
kernel: sd 8:0:8:19: [sdfn] tag#0 Add. Sense: Invalid field in cdb
kernel: sd 8:0:8:19: [sdfn] tag#0 CDB: Write same(16) 93 08 00 00 00 00 00 a0 08 00 00 00 80 00 00 00
kernel: blk_update_request: critical target error, dev sdfn, sector 10487808
The SCSI layer converts this to the BLK_STS_TARGET error number, the sd
device disables its support for discard on this path, and because of the
BLK_STS_TARGET error multipath fails the discard without failing any
path or retrying down a different path. But subsequent discards can
cause path failures. Any discards sent to the path which already failed
a discard ends up failing with EIO from blk_cloned_rq_check_limits with
an "over max size limit" error since the discard limit was set to 0 by
the sd driver for the path. As the error is EIO, this now fails the
path and multipath tries to send the discard down the next path. This
cycle continues as discards are sent until all paths fail.
Fix this by training DM core to disable DISCARD if the underlying
storage already did so.
Also, fix branching in dm_done() and clone_endio() to reflect the
mutually exclussive nature of the IO operations in question.
Cc: stable@vger.kernel.org
Reported-by: David Jeffery <djeffery@redhat.com>
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
2019-04-04 00:23:11 +08:00
|
|
|
void disable_discard(struct mapped_device *md);
|
2016-05-13 04:28:10 +08:00
|
|
|
void disable_write_same(struct mapped_device *md);
|
2017-04-06 01:21:05 +08:00
|
|
|
void disable_write_zeroes(struct mapped_device *md);
|
2016-05-13 04:28:10 +08:00
|
|
|
|
|
|
|
static inline struct completion *dm_get_completion_from_kobject(struct kobject *kobj)
|
|
|
|
{
|
|
|
|
return &container_of(kobj, struct dm_kobject_holder, kobj)->completion;
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned __dm_get_module_param(unsigned *module_param, unsigned def, unsigned max);
|
|
|
|
|
|
|
|
static inline bool dm_message_test_buffer_overflow(char *result, unsigned maxlen)
|
|
|
|
{
|
|
|
|
return !maxlen || strlen(result) + 1 >= maxlen;
|
|
|
|
}
|
|
|
|
|
2017-01-17 05:05:59 +08:00
|
|
|
extern atomic_t dm_global_event_nr;
|
|
|
|
extern wait_queue_head_t dm_global_eventq;
|
2017-09-20 19:29:49 +08:00
|
|
|
void dm_issue_global_event(void);
|
2017-01-17 05:05:59 +08:00
|
|
|
|
2016-05-13 04:28:10 +08:00
|
|
|
#endif
|