Merge branch 'for-3.18/drivers' of git://git.kernel.dk/linux-block
Pull block layer driver update from Jens Axboe: "This is the block driver pull request for 3.18. Not a lot in there this round, and nothing earth shattering. - A round of drbd fixes from the linbit team, and an improvement in asender performance. - Removal of deprecated (and unused) IRQF_DISABLED flag in rsxx and hd from Michael Opdenacker. - Disable entropy collection from flash devices by default, from Mike Snitzer. - A small collection of xen blkfront/back fixes from Roger Pau Monné and Vitaly Kuznetsov" * 'for-3.18/drivers' of git://git.kernel.dk/linux-block: block: disable entropy contributions for nonrot devices xen, blkfront: factor out flush-related checks from do_blkif_request() xen-blkback: fix leak on grant map error path xen/blkback: unmap all persistent grants when frontend gets disconnected rsxx: Remove deprecated IRQF_DISABLED block: hd: remove deprecated IRQF_DISABLED drbd: use RB_DECLARE_CALLBACKS() to define augment callbacks drbd: compute the end before rb_insert_augmented() drbd: Add missing newline in resync progress display in /proc/drbd drbd: reduce lock contention in drbd_worker drbd: Improve asender performance drbd: Get rid of the WORK_PENDING macro drbd: Get rid of the __no_warn and __cond_lock macros drbd: Avoid inconsistent locking warning drbd: Remove superfluous newline from "resync_extents" debugfs entry. drbd: Use consistent names for all the bi_end_io callbacks drbd: Use better variable names
This commit is contained in:
commit
e75437fb93
|
@ -158,14 +158,14 @@ static int _drbd_md_sync_page_io(struct drbd_device *device,
|
|||
if (bio_add_page(bio, device->md_io.page, size, 0) != size)
|
||||
goto out;
|
||||
bio->bi_private = device;
|
||||
bio->bi_end_io = drbd_md_io_complete;
|
||||
bio->bi_end_io = drbd_md_endio;
|
||||
bio->bi_rw = rw;
|
||||
|
||||
if (!(rw & WRITE) && device->state.disk == D_DISKLESS && device->ldev == NULL)
|
||||
/* special case, drbd_md_read() during drbd_adm_attach(): no get_ldev */
|
||||
;
|
||||
else if (!get_ldev_if_state(device, D_ATTACHING)) {
|
||||
/* Corresponding put_ldev in drbd_md_io_complete() */
|
||||
/* Corresponding put_ldev in drbd_md_endio() */
|
||||
drbd_err(device, "ASSERT FAILED: get_ldev_if_state() == 1 in _drbd_md_sync_page_io()\n");
|
||||
err = -ENODEV;
|
||||
goto out;
|
||||
|
|
|
@ -941,7 +941,7 @@ static void drbd_bm_aio_ctx_destroy(struct kref *kref)
|
|||
}
|
||||
|
||||
/* bv_page may be a copy, or may be the original */
|
||||
static void bm_async_io_complete(struct bio *bio, int error)
|
||||
static void drbd_bm_endio(struct bio *bio, int error)
|
||||
{
|
||||
struct drbd_bm_aio_ctx *ctx = bio->bi_private;
|
||||
struct drbd_device *device = ctx->device;
|
||||
|
@ -1027,7 +1027,7 @@ static void bm_page_io_async(struct drbd_bm_aio_ctx *ctx, int page_nr) __must_ho
|
|||
* according to api. Do we want to assert that? */
|
||||
bio_add_page(bio, page, len, 0);
|
||||
bio->bi_private = ctx;
|
||||
bio->bi_end_io = bm_async_io_complete;
|
||||
bio->bi_end_io = drbd_bm_endio;
|
||||
|
||||
if (drbd_insert_fault(device, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD)) {
|
||||
bio->bi_rw |= rw;
|
||||
|
@ -1125,7 +1125,7 @@ static int bm_rw(struct drbd_device *device, const unsigned int flags, unsigned
|
|||
}
|
||||
|
||||
/*
|
||||
* We initialize ctx->in_flight to one to make sure bm_async_io_complete
|
||||
* We initialize ctx->in_flight to one to make sure drbd_bm_endio
|
||||
* will not set ctx->done early, and decrement / test it here. If there
|
||||
* are still some bios in flight, we need to wait for them here.
|
||||
* If all IO is done already (or nothing had been submitted), there is
|
||||
|
|
|
@ -695,7 +695,7 @@ static void resync_dump_detail(struct seq_file *m, struct lc_element *e)
|
|||
{
|
||||
struct bm_extent *bme = lc_entry(e, struct bm_extent, lce);
|
||||
|
||||
seq_printf(m, "%5d %s %s %s\n", bme->rs_left,
|
||||
seq_printf(m, "%5d %s %s %s", bme->rs_left,
|
||||
test_bit(BME_NO_WRITES, &bme->flags) ? "NO_WRITES" : "---------",
|
||||
test_bit(BME_LOCKED, &bme->flags) ? "LOCKED" : "------",
|
||||
test_bit(BME_PRIORITY, &bme->flags) ? "PRIORITY" : "--------"
|
||||
|
|
|
@ -61,8 +61,6 @@
|
|||
# define __must_hold(x)
|
||||
#endif
|
||||
|
||||
#define __no_warn(lock, stmt) do { __acquire(lock); stmt; __release(lock); } while (0)
|
||||
|
||||
/* module parameter, defined in drbd_main.c */
|
||||
extern unsigned int minor_count;
|
||||
extern bool disable_sendpage;
|
||||
|
@ -1483,7 +1481,7 @@ extern int drbd_khelper(struct drbd_device *device, char *cmd);
|
|||
|
||||
/* drbd_worker.c */
|
||||
/* bi_end_io handlers */
|
||||
extern void drbd_md_io_complete(struct bio *bio, int error);
|
||||
extern void drbd_md_endio(struct bio *bio, int error);
|
||||
extern void drbd_peer_request_endio(struct bio *bio, int error);
|
||||
extern void drbd_request_endio(struct bio *bio, int error);
|
||||
extern int drbd_worker(struct drbd_thread *thi);
|
||||
|
@ -2100,16 +2098,19 @@ static inline bool is_sync_state(enum drbd_conns connection_state)
|
|||
|
||||
/**
|
||||
* get_ldev() - Increase the ref count on device->ldev. Returns 0 if there is no ldev
|
||||
* @M: DRBD device.
|
||||
* @_device: DRBD device.
|
||||
* @_min_state: Minimum device state required for success.
|
||||
*
|
||||
* You have to call put_ldev() when finished working with device->ldev.
|
||||
*/
|
||||
#define get_ldev(M) __cond_lock(local, _get_ldev_if_state(M,D_INCONSISTENT))
|
||||
#define get_ldev_if_state(M,MINS) __cond_lock(local, _get_ldev_if_state(M,MINS))
|
||||
#define get_ldev_if_state(_device, _min_state) \
|
||||
(_get_ldev_if_state((_device), (_min_state)) ? \
|
||||
({ __acquire(x); true; }) : false)
|
||||
#define get_ldev(_device) get_ldev_if_state(_device, D_INCONSISTENT)
|
||||
|
||||
static inline void put_ldev(struct drbd_device *device)
|
||||
{
|
||||
enum drbd_disk_state ds = device->state.disk;
|
||||
enum drbd_disk_state disk_state = device->state.disk;
|
||||
/* We must check the state *before* the atomic_dec becomes visible,
|
||||
* or we have a theoretical race where someone hitting zero,
|
||||
* while state still D_FAILED, will then see D_DISKLESS in the
|
||||
|
@ -2122,10 +2123,10 @@ static inline void put_ldev(struct drbd_device *device)
|
|||
__release(local);
|
||||
D_ASSERT(device, i >= 0);
|
||||
if (i == 0) {
|
||||
if (ds == D_DISKLESS)
|
||||
if (disk_state == D_DISKLESS)
|
||||
/* even internal references gone, safe to destroy */
|
||||
drbd_device_post_work(device, DESTROY_DISK);
|
||||
if (ds == D_FAILED)
|
||||
if (disk_state == D_FAILED)
|
||||
/* all application IO references gone. */
|
||||
if (!test_and_set_bit(GOING_DISKLESS, &device->flags))
|
||||
drbd_device_post_work(device, GO_DISKLESS);
|
||||
|
|
|
@ -37,40 +37,8 @@ compute_subtree_last(struct drbd_interval *node)
|
|||
return max;
|
||||
}
|
||||
|
||||
static void augment_propagate(struct rb_node *rb, struct rb_node *stop)
|
||||
{
|
||||
while (rb != stop) {
|
||||
struct drbd_interval *node = rb_entry(rb, struct drbd_interval, rb);
|
||||
sector_t subtree_last = compute_subtree_last(node);
|
||||
if (node->end == subtree_last)
|
||||
break;
|
||||
node->end = subtree_last;
|
||||
rb = rb_parent(&node->rb);
|
||||
}
|
||||
}
|
||||
|
||||
static void augment_copy(struct rb_node *rb_old, struct rb_node *rb_new)
|
||||
{
|
||||
struct drbd_interval *old = rb_entry(rb_old, struct drbd_interval, rb);
|
||||
struct drbd_interval *new = rb_entry(rb_new, struct drbd_interval, rb);
|
||||
|
||||
new->end = old->end;
|
||||
}
|
||||
|
||||
static void augment_rotate(struct rb_node *rb_old, struct rb_node *rb_new)
|
||||
{
|
||||
struct drbd_interval *old = rb_entry(rb_old, struct drbd_interval, rb);
|
||||
struct drbd_interval *new = rb_entry(rb_new, struct drbd_interval, rb);
|
||||
|
||||
new->end = old->end;
|
||||
old->end = compute_subtree_last(old);
|
||||
}
|
||||
|
||||
static const struct rb_augment_callbacks augment_callbacks = {
|
||||
augment_propagate,
|
||||
augment_copy,
|
||||
augment_rotate,
|
||||
};
|
||||
RB_DECLARE_CALLBACKS(static, augment_callbacks, struct drbd_interval, rb,
|
||||
sector_t, end, compute_subtree_last);
|
||||
|
||||
/**
|
||||
* drbd_insert_interval - insert a new interval into a tree
|
||||
|
@ -79,6 +47,7 @@ bool
|
|||
drbd_insert_interval(struct rb_root *root, struct drbd_interval *this)
|
||||
{
|
||||
struct rb_node **new = &root->rb_node, *parent = NULL;
|
||||
sector_t this_end = this->sector + (this->size >> 9);
|
||||
|
||||
BUG_ON(!IS_ALIGNED(this->size, 512));
|
||||
|
||||
|
@ -87,6 +56,8 @@ drbd_insert_interval(struct rb_root *root, struct drbd_interval *this)
|
|||
rb_entry(*new, struct drbd_interval, rb);
|
||||
|
||||
parent = *new;
|
||||
if (here->end < this_end)
|
||||
here->end = this_end;
|
||||
if (this->sector < here->sector)
|
||||
new = &(*new)->rb_left;
|
||||
else if (this->sector > here->sector)
|
||||
|
@ -99,6 +70,7 @@ drbd_insert_interval(struct rb_root *root, struct drbd_interval *this)
|
|||
return false;
|
||||
}
|
||||
|
||||
this->end = this_end;
|
||||
rb_link_node(&this->rb, parent, new);
|
||||
rb_insert_augmented(&this->rb, root, &augment_callbacks);
|
||||
return true;
|
||||
|
|
|
@ -1622,13 +1622,13 @@ int drbd_send_dblock(struct drbd_peer_device *peer_device, struct drbd_request *
|
|||
struct drbd_socket *sock;
|
||||
struct p_data *p;
|
||||
unsigned int dp_flags = 0;
|
||||
int dgs;
|
||||
int digest_size;
|
||||
int err;
|
||||
|
||||
sock = &peer_device->connection->data;
|
||||
p = drbd_prepare_command(peer_device, sock);
|
||||
dgs = peer_device->connection->integrity_tfm ?
|
||||
crypto_hash_digestsize(peer_device->connection->integrity_tfm) : 0;
|
||||
digest_size = peer_device->connection->integrity_tfm ?
|
||||
crypto_hash_digestsize(peer_device->connection->integrity_tfm) : 0;
|
||||
|
||||
if (!p)
|
||||
return -EIO;
|
||||
|
@ -1659,9 +1659,9 @@ int drbd_send_dblock(struct drbd_peer_device *peer_device, struct drbd_request *
|
|||
|
||||
/* our digest is still only over the payload.
|
||||
* TRIM does not carry any payload. */
|
||||
if (dgs)
|
||||
if (digest_size)
|
||||
drbd_csum_bio(peer_device->connection->integrity_tfm, req->master_bio, p + 1);
|
||||
err = __send_command(peer_device->connection, device->vnr, sock, P_DATA, sizeof(*p) + dgs, NULL, req->i.size);
|
||||
err = __send_command(peer_device->connection, device->vnr, sock, P_DATA, sizeof(*p) + digest_size, NULL, req->i.size);
|
||||
if (!err) {
|
||||
/* For protocol A, we have to memcpy the payload into
|
||||
* socket buffers, as we may complete right away
|
||||
|
@ -1674,23 +1674,23 @@ int drbd_send_dblock(struct drbd_peer_device *peer_device, struct drbd_request *
|
|||
* out ok after sending on this side, but does not fit on the
|
||||
* receiving side, we sure have detected corruption elsewhere.
|
||||
*/
|
||||
if (!(req->rq_state & (RQ_EXP_RECEIVE_ACK | RQ_EXP_WRITE_ACK)) || dgs)
|
||||
if (!(req->rq_state & (RQ_EXP_RECEIVE_ACK | RQ_EXP_WRITE_ACK)) || digest_size)
|
||||
err = _drbd_send_bio(peer_device, req->master_bio);
|
||||
else
|
||||
err = _drbd_send_zc_bio(peer_device, req->master_bio);
|
||||
|
||||
/* double check digest, sometimes buffers have been modified in flight. */
|
||||
if (dgs > 0 && dgs <= 64) {
|
||||
if (digest_size > 0 && digest_size <= 64) {
|
||||
/* 64 byte, 512 bit, is the largest digest size
|
||||
* currently supported in kernel crypto. */
|
||||
unsigned char digest[64];
|
||||
drbd_csum_bio(peer_device->connection->integrity_tfm, req->master_bio, digest);
|
||||
if (memcmp(p + 1, digest, dgs)) {
|
||||
if (memcmp(p + 1, digest, digest_size)) {
|
||||
drbd_warn(device,
|
||||
"Digest mismatch, buffer modified by upper layers during write: %llus +%u\n",
|
||||
(unsigned long long)req->i.sector, req->i.size);
|
||||
}
|
||||
} /* else if (dgs > 64) {
|
||||
} /* else if (digest_size > 64) {
|
||||
... Be noisy about digest too large ...
|
||||
} */
|
||||
}
|
||||
|
@ -1711,13 +1711,13 @@ int drbd_send_block(struct drbd_peer_device *peer_device, enum drbd_packet cmd,
|
|||
struct drbd_socket *sock;
|
||||
struct p_data *p;
|
||||
int err;
|
||||
int dgs;
|
||||
int digest_size;
|
||||
|
||||
sock = &peer_device->connection->data;
|
||||
p = drbd_prepare_command(peer_device, sock);
|
||||
|
||||
dgs = peer_device->connection->integrity_tfm ?
|
||||
crypto_hash_digestsize(peer_device->connection->integrity_tfm) : 0;
|
||||
digest_size = peer_device->connection->integrity_tfm ?
|
||||
crypto_hash_digestsize(peer_device->connection->integrity_tfm) : 0;
|
||||
|
||||
if (!p)
|
||||
return -EIO;
|
||||
|
@ -1725,9 +1725,9 @@ int drbd_send_block(struct drbd_peer_device *peer_device, enum drbd_packet cmd,
|
|||
p->block_id = peer_req->block_id;
|
||||
p->seq_num = 0; /* unused */
|
||||
p->dp_flags = 0;
|
||||
if (dgs)
|
||||
if (digest_size)
|
||||
drbd_csum_ee(peer_device->connection->integrity_tfm, peer_req, p + 1);
|
||||
err = __send_command(peer_device->connection, device->vnr, sock, cmd, sizeof(*p) + dgs, NULL, peer_req->i.size);
|
||||
err = __send_command(peer_device->connection, device->vnr, sock, cmd, sizeof(*p) + digest_size, NULL, peer_req->i.size);
|
||||
if (!err)
|
||||
err = _drbd_send_zc_ee(peer_device, peer_req);
|
||||
mutex_unlock(&sock->mutex); /* locked by drbd_prepare_command() */
|
||||
|
|
|
@ -142,10 +142,12 @@ static void drbd_syncer_progress(struct drbd_device *device, struct seq_file *se
|
|||
(unsigned long) Bit2KB(rs_left >> 10),
|
||||
(unsigned long) Bit2KB(rs_total >> 10));
|
||||
else
|
||||
seq_printf(seq, "(%lu/%lu)K\n\t",
|
||||
seq_printf(seq, "(%lu/%lu)K",
|
||||
(unsigned long) Bit2KB(rs_left),
|
||||
(unsigned long) Bit2KB(rs_total));
|
||||
|
||||
seq_printf(seq, "\n\t");
|
||||
|
||||
/* see drivers/md/md.c
|
||||
* We do not want to overflow, so the order of operands and
|
||||
* the * 100 / 100 trick are important. We do a +1 to be
|
||||
|
|
|
@ -1371,9 +1371,9 @@ int drbd_submit_peer_request(struct drbd_device *device,
|
|||
struct bio *bio;
|
||||
struct page *page = peer_req->pages;
|
||||
sector_t sector = peer_req->i.sector;
|
||||
unsigned ds = peer_req->i.size;
|
||||
unsigned data_size = peer_req->i.size;
|
||||
unsigned n_bios = 0;
|
||||
unsigned nr_pages = (ds + PAGE_SIZE -1) >> PAGE_SHIFT;
|
||||
unsigned nr_pages = (data_size + PAGE_SIZE -1) >> PAGE_SHIFT;
|
||||
int err = -ENOMEM;
|
||||
|
||||
if (peer_req->flags & EE_IS_TRIM_USE_ZEROOUT) {
|
||||
|
@ -1388,7 +1388,7 @@ int drbd_submit_peer_request(struct drbd_device *device,
|
|||
list_add_tail(&peer_req->w.list, &device->active_ee);
|
||||
spin_unlock_irq(&device->resource->req_lock);
|
||||
if (blkdev_issue_zeroout(device->ldev->backing_bdev,
|
||||
sector, ds >> 9, GFP_NOIO))
|
||||
sector, data_size >> 9, GFP_NOIO))
|
||||
peer_req->flags |= EE_WAS_ERROR;
|
||||
drbd_endio_write_sec_final(peer_req);
|
||||
return 0;
|
||||
|
@ -1426,12 +1426,12 @@ next_bio:
|
|||
++n_bios;
|
||||
|
||||
if (rw & REQ_DISCARD) {
|
||||
bio->bi_iter.bi_size = ds;
|
||||
bio->bi_iter.bi_size = data_size;
|
||||
goto submit;
|
||||
}
|
||||
|
||||
page_chain_for_each(page) {
|
||||
unsigned len = min_t(unsigned, ds, PAGE_SIZE);
|
||||
unsigned len = min_t(unsigned, data_size, PAGE_SIZE);
|
||||
if (!bio_add_page(bio, page, len, 0)) {
|
||||
/* A single page must always be possible!
|
||||
* But in case it fails anyways,
|
||||
|
@ -1446,11 +1446,11 @@ next_bio:
|
|||
}
|
||||
goto next_bio;
|
||||
}
|
||||
ds -= len;
|
||||
data_size -= len;
|
||||
sector += len >> 9;
|
||||
--nr_pages;
|
||||
}
|
||||
D_ASSERT(device, ds == 0);
|
||||
D_ASSERT(device, data_size == 0);
|
||||
submit:
|
||||
D_ASSERT(device, page == NULL);
|
||||
|
||||
|
@ -1591,24 +1591,24 @@ read_in_block(struct drbd_peer_device *peer_device, u64 id, sector_t sector,
|
|||
const sector_t capacity = drbd_get_capacity(device->this_bdev);
|
||||
struct drbd_peer_request *peer_req;
|
||||
struct page *page;
|
||||
int dgs, ds, err;
|
||||
unsigned int data_size = pi->size;
|
||||
int digest_size, err;
|
||||
unsigned int data_size = pi->size, ds;
|
||||
void *dig_in = peer_device->connection->int_dig_in;
|
||||
void *dig_vv = peer_device->connection->int_dig_vv;
|
||||
unsigned long *data;
|
||||
struct p_trim *trim = (pi->cmd == P_TRIM) ? pi->data : NULL;
|
||||
|
||||
dgs = 0;
|
||||
digest_size = 0;
|
||||
if (!trim && peer_device->connection->peer_integrity_tfm) {
|
||||
dgs = crypto_hash_digestsize(peer_device->connection->peer_integrity_tfm);
|
||||
digest_size = crypto_hash_digestsize(peer_device->connection->peer_integrity_tfm);
|
||||
/*
|
||||
* FIXME: Receive the incoming digest into the receive buffer
|
||||
* here, together with its struct p_data?
|
||||
*/
|
||||
err = drbd_recv_all_warn(peer_device->connection, dig_in, dgs);
|
||||
err = drbd_recv_all_warn(peer_device->connection, dig_in, digest_size);
|
||||
if (err)
|
||||
return NULL;
|
||||
data_size -= dgs;
|
||||
data_size -= digest_size;
|
||||
}
|
||||
|
||||
if (trim) {
|
||||
|
@ -1661,16 +1661,16 @@ read_in_block(struct drbd_peer_device *peer_device, u64 id, sector_t sector,
|
|||
ds -= len;
|
||||
}
|
||||
|
||||
if (dgs) {
|
||||
if (digest_size) {
|
||||
drbd_csum_ee(peer_device->connection->peer_integrity_tfm, peer_req, dig_vv);
|
||||
if (memcmp(dig_in, dig_vv, dgs)) {
|
||||
if (memcmp(dig_in, dig_vv, digest_size)) {
|
||||
drbd_err(device, "Digest integrity check FAILED: %llus +%u\n",
|
||||
(unsigned long long)sector, data_size);
|
||||
drbd_free_peer_req(device, peer_req);
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
device->recv_cnt += data_size>>9;
|
||||
device->recv_cnt += data_size >> 9;
|
||||
return peer_req;
|
||||
}
|
||||
|
||||
|
@ -1708,17 +1708,17 @@ static int recv_dless_read(struct drbd_peer_device *peer_device, struct drbd_req
|
|||
struct bio_vec bvec;
|
||||
struct bvec_iter iter;
|
||||
struct bio *bio;
|
||||
int dgs, err, expect;
|
||||
int digest_size, err, expect;
|
||||
void *dig_in = peer_device->connection->int_dig_in;
|
||||
void *dig_vv = peer_device->connection->int_dig_vv;
|
||||
|
||||
dgs = 0;
|
||||
digest_size = 0;
|
||||
if (peer_device->connection->peer_integrity_tfm) {
|
||||
dgs = crypto_hash_digestsize(peer_device->connection->peer_integrity_tfm);
|
||||
err = drbd_recv_all_warn(peer_device->connection, dig_in, dgs);
|
||||
digest_size = crypto_hash_digestsize(peer_device->connection->peer_integrity_tfm);
|
||||
err = drbd_recv_all_warn(peer_device->connection, dig_in, digest_size);
|
||||
if (err)
|
||||
return err;
|
||||
data_size -= dgs;
|
||||
data_size -= digest_size;
|
||||
}
|
||||
|
||||
/* optimistically update recv_cnt. if receiving fails below,
|
||||
|
@ -1738,9 +1738,9 @@ static int recv_dless_read(struct drbd_peer_device *peer_device, struct drbd_req
|
|||
data_size -= expect;
|
||||
}
|
||||
|
||||
if (dgs) {
|
||||
if (digest_size) {
|
||||
drbd_csum_bio(peer_device->connection->peer_integrity_tfm, bio, dig_vv);
|
||||
if (memcmp(dig_in, dig_vv, dgs)) {
|
||||
if (memcmp(dig_in, dig_vv, digest_size)) {
|
||||
drbd_err(peer_device, "Digest integrity check FAILED. Broken NICs?\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -5561,6 +5561,7 @@ int drbd_asender(struct drbd_thread *thi)
|
|||
* rv < expected: "woken" by signal during receive
|
||||
* rv == 0 : "connection shut down by peer"
|
||||
*/
|
||||
received_more:
|
||||
if (likely(rv > 0)) {
|
||||
received += rv;
|
||||
buf += rv;
|
||||
|
@ -5636,6 +5637,11 @@ int drbd_asender(struct drbd_thread *thi)
|
|||
expect = header_size;
|
||||
cmd = NULL;
|
||||
}
|
||||
if (test_bit(SEND_PING, &connection->flags))
|
||||
continue;
|
||||
rv = drbd_recv_short(connection->meta.socket, buf, expect-received, MSG_DONTWAIT);
|
||||
if (rv > 0)
|
||||
goto received_more;
|
||||
}
|
||||
|
||||
if (0) {
|
||||
|
|
|
@ -1645,6 +1645,6 @@ void request_timer_fn(unsigned long data)
|
|||
? oldest_submit_jif + dt : now + et;
|
||||
nt = time_before(ent, dt) ? ent : dt;
|
||||
out:
|
||||
spin_unlock_irq(&connection->resource->req_lock);
|
||||
spin_unlock_irq(&device->resource->req_lock);
|
||||
mod_timer(&device->request_timer, nt);
|
||||
}
|
||||
|
|
|
@ -136,50 +136,50 @@ enum drbd_role conn_highest_peer(struct drbd_connection *connection)
|
|||
|
||||
enum drbd_disk_state conn_highest_disk(struct drbd_connection *connection)
|
||||
{
|
||||
enum drbd_disk_state ds = D_DISKLESS;
|
||||
enum drbd_disk_state disk_state = D_DISKLESS;
|
||||
struct drbd_peer_device *peer_device;
|
||||
int vnr;
|
||||
|
||||
rcu_read_lock();
|
||||
idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
|
||||
struct drbd_device *device = peer_device->device;
|
||||
ds = max_t(enum drbd_disk_state, ds, device->state.disk);
|
||||
disk_state = max_t(enum drbd_disk_state, disk_state, device->state.disk);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
return ds;
|
||||
return disk_state;
|
||||
}
|
||||
|
||||
enum drbd_disk_state conn_lowest_disk(struct drbd_connection *connection)
|
||||
{
|
||||
enum drbd_disk_state ds = D_MASK;
|
||||
enum drbd_disk_state disk_state = D_MASK;
|
||||
struct drbd_peer_device *peer_device;
|
||||
int vnr;
|
||||
|
||||
rcu_read_lock();
|
||||
idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
|
||||
struct drbd_device *device = peer_device->device;
|
||||
ds = min_t(enum drbd_disk_state, ds, device->state.disk);
|
||||
disk_state = min_t(enum drbd_disk_state, disk_state, device->state.disk);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
return ds;
|
||||
return disk_state;
|
||||
}
|
||||
|
||||
enum drbd_disk_state conn_highest_pdsk(struct drbd_connection *connection)
|
||||
{
|
||||
enum drbd_disk_state ds = D_DISKLESS;
|
||||
enum drbd_disk_state disk_state = D_DISKLESS;
|
||||
struct drbd_peer_device *peer_device;
|
||||
int vnr;
|
||||
|
||||
rcu_read_lock();
|
||||
idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
|
||||
struct drbd_device *device = peer_device->device;
|
||||
ds = max_t(enum drbd_disk_state, ds, device->state.pdsk);
|
||||
disk_state = max_t(enum drbd_disk_state, disk_state, device->state.pdsk);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
return ds;
|
||||
return disk_state;
|
||||
}
|
||||
|
||||
enum drbd_conns conn_lowest_conn(struct drbd_connection *connection)
|
||||
|
|
|
@ -43,10 +43,10 @@ static int make_ov_request(struct drbd_device *, int);
|
|||
static int make_resync_request(struct drbd_device *, int);
|
||||
|
||||
/* endio handlers:
|
||||
* drbd_md_io_complete (defined here)
|
||||
* drbd_md_endio (defined here)
|
||||
* drbd_request_endio (defined here)
|
||||
* drbd_peer_request_endio (defined here)
|
||||
* bm_async_io_complete (defined in drbd_bitmap.c)
|
||||
* drbd_bm_endio (defined in drbd_bitmap.c)
|
||||
*
|
||||
* For all these callbacks, note the following:
|
||||
* The callbacks will be called in irq context by the IDE drivers,
|
||||
|
@ -65,7 +65,7 @@ rwlock_t global_state_lock;
|
|||
/* used for synchronous meta data and bitmap IO
|
||||
* submitted by drbd_md_sync_page_io()
|
||||
*/
|
||||
void drbd_md_io_complete(struct bio *bio, int error)
|
||||
void drbd_md_endio(struct bio *bio, int error)
|
||||
{
|
||||
struct drbd_device *device;
|
||||
|
||||
|
@ -1853,9 +1853,12 @@ static void drbd_ldev_destroy(struct drbd_device *device)
|
|||
device->resync = NULL;
|
||||
lc_destroy(device->act_log);
|
||||
device->act_log = NULL;
|
||||
__no_warn(local,
|
||||
drbd_free_ldev(device->ldev);
|
||||
device->ldev = NULL;);
|
||||
|
||||
__acquire(local);
|
||||
drbd_free_ldev(device->ldev);
|
||||
device->ldev = NULL;
|
||||
__release(local);
|
||||
|
||||
clear_bit(GOING_DISKLESS, &device->flags);
|
||||
wake_up(&device->misc_wait);
|
||||
}
|
||||
|
@ -1928,19 +1931,18 @@ void __update_timing_details(
|
|||
++(*cb_nr);
|
||||
}
|
||||
|
||||
#define WORK_PENDING(work_bit, todo) (todo & (1UL << work_bit))
|
||||
static void do_device_work(struct drbd_device *device, const unsigned long todo)
|
||||
{
|
||||
if (WORK_PENDING(MD_SYNC, todo))
|
||||
if (test_bit(MD_SYNC, &todo))
|
||||
do_md_sync(device);
|
||||
if (WORK_PENDING(RS_DONE, todo) ||
|
||||
WORK_PENDING(RS_PROGRESS, todo))
|
||||
update_on_disk_bitmap(device, WORK_PENDING(RS_DONE, todo));
|
||||
if (WORK_PENDING(GO_DISKLESS, todo))
|
||||
if (test_bit(RS_DONE, &todo) ||
|
||||
test_bit(RS_PROGRESS, &todo))
|
||||
update_on_disk_bitmap(device, test_bit(RS_DONE, &todo));
|
||||
if (test_bit(GO_DISKLESS, &todo))
|
||||
go_diskless(device);
|
||||
if (WORK_PENDING(DESTROY_DISK, todo))
|
||||
if (test_bit(DESTROY_DISK, &todo))
|
||||
drbd_ldev_destroy(device);
|
||||
if (WORK_PENDING(RS_START, todo))
|
||||
if (test_bit(RS_START, &todo))
|
||||
do_start_resync(device);
|
||||
}
|
||||
|
||||
|
@ -1992,22 +1994,13 @@ static bool dequeue_work_batch(struct drbd_work_queue *queue, struct list_head *
|
|||
return !list_empty(work_list);
|
||||
}
|
||||
|
||||
static bool dequeue_work_item(struct drbd_work_queue *queue, struct list_head *work_list)
|
||||
{
|
||||
spin_lock_irq(&queue->q_lock);
|
||||
if (!list_empty(&queue->q))
|
||||
list_move(queue->q.next, work_list);
|
||||
spin_unlock_irq(&queue->q_lock);
|
||||
return !list_empty(work_list);
|
||||
}
|
||||
|
||||
static void wait_for_work(struct drbd_connection *connection, struct list_head *work_list)
|
||||
{
|
||||
DEFINE_WAIT(wait);
|
||||
struct net_conf *nc;
|
||||
int uncork, cork;
|
||||
|
||||
dequeue_work_item(&connection->sender_work, work_list);
|
||||
dequeue_work_batch(&connection->sender_work, work_list);
|
||||
if (!list_empty(work_list))
|
||||
return;
|
||||
|
||||
|
@ -2033,8 +2026,6 @@ static void wait_for_work(struct drbd_connection *connection, struct list_head *
|
|||
prepare_to_wait(&connection->sender_work.q_wait, &wait, TASK_INTERRUPTIBLE);
|
||||
spin_lock_irq(&connection->resource->req_lock);
|
||||
spin_lock(&connection->sender_work.q_lock); /* FIXME get rid of this one? */
|
||||
/* dequeue single item only,
|
||||
* we still use drbd_queue_work_front() in some places */
|
||||
if (!list_empty(&connection->sender_work.q))
|
||||
list_splice_tail_init(&connection->sender_work.q, work_list);
|
||||
spin_unlock(&connection->sender_work.q_lock); /* FIXME get rid of this one? */
|
||||
|
@ -2121,7 +2112,7 @@ int drbd_worker(struct drbd_thread *thi)
|
|||
if (get_t_state(thi) != RUNNING)
|
||||
break;
|
||||
|
||||
while (!list_empty(&work_list)) {
|
||||
if (!list_empty(&work_list)) {
|
||||
w = list_first_entry(&work_list, struct drbd_work, list);
|
||||
list_del_init(&w->list);
|
||||
update_worker_timing_details(connection, w->cb);
|
||||
|
@ -2137,13 +2128,13 @@ int drbd_worker(struct drbd_thread *thi)
|
|||
update_worker_timing_details(connection, do_unqueued_work);
|
||||
do_unqueued_work(connection);
|
||||
}
|
||||
while (!list_empty(&work_list)) {
|
||||
if (!list_empty(&work_list)) {
|
||||
w = list_first_entry(&work_list, struct drbd_work, list);
|
||||
list_del_init(&w->list);
|
||||
update_worker_timing_details(connection, w->cb);
|
||||
w->cb(w, 1);
|
||||
}
|
||||
dequeue_work_batch(&connection->sender_work, &work_list);
|
||||
} else
|
||||
dequeue_work_batch(&connection->sender_work, &work_list);
|
||||
} while (!list_empty(&work_list) || test_bit(DEVICE_WORK_PENDING, &connection->flags));
|
||||
|
||||
rcu_read_lock();
|
||||
|
|
|
@ -694,16 +694,6 @@ static const struct block_device_operations hd_fops = {
|
|||
.getgeo = hd_getgeo,
|
||||
};
|
||||
|
||||
/*
|
||||
* This is the hard disk IRQ description. The IRQF_DISABLED in sa_flags
|
||||
* means we run the IRQ-handler with interrupts disabled: this is bad for
|
||||
* interrupt latency, but anything else has led to problems on some
|
||||
* machines.
|
||||
*
|
||||
* We enable interrupts in some of the routines after making sure it's
|
||||
* safe.
|
||||
*/
|
||||
|
||||
static int __init hd_init(void)
|
||||
{
|
||||
int drive;
|
||||
|
@ -761,7 +751,7 @@ static int __init hd_init(void)
|
|||
p->cyl, p->head, p->sect);
|
||||
}
|
||||
|
||||
if (request_irq(HD_IRQ, hd_interrupt, IRQF_DISABLED, "hd", NULL)) {
|
||||
if (request_irq(HD_IRQ, hd_interrupt, 0, "hd", NULL)) {
|
||||
printk("hd: unable to get IRQ%d for the hard disk driver\n",
|
||||
HD_IRQ);
|
||||
goto out1;
|
||||
|
|
|
@ -3954,6 +3954,7 @@ skip_create_disk:
|
|||
|
||||
/* Set device limits. */
|
||||
set_bit(QUEUE_FLAG_NONROT, &dd->queue->queue_flags);
|
||||
clear_bit(QUEUE_FLAG_ADD_RANDOM, &dd->queue->queue_flags);
|
||||
blk_queue_max_segments(dd->queue, MTIP_MAX_SG);
|
||||
blk_queue_physical_block_size(dd->queue, 4096);
|
||||
blk_queue_max_hw_sectors(dd->queue, 0xffff);
|
||||
|
|
|
@ -847,6 +847,7 @@ static int __init nbd_init(void)
|
|||
* Tell the block layer that we are not a rotational device
|
||||
*/
|
||||
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, disk->queue);
|
||||
queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, disk->queue);
|
||||
disk->queue->limits.discard_granularity = 512;
|
||||
disk->queue->limits.max_discard_sectors = UINT_MAX;
|
||||
disk->queue->limits.discard_zeroes_data = 0;
|
||||
|
|
|
@ -521,6 +521,7 @@ static int null_add_dev(void)
|
|||
|
||||
nullb->q->queuedata = nullb;
|
||||
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q);
|
||||
queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, nullb->q);
|
||||
|
||||
disk = nullb->disk = alloc_disk_node(1, home_node);
|
||||
if (!disk) {
|
||||
|
|
|
@ -1916,6 +1916,7 @@ static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid,
|
|||
ns->queue->queue_flags = QUEUE_FLAG_DEFAULT;
|
||||
queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, ns->queue);
|
||||
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, ns->queue);
|
||||
queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, ns->queue);
|
||||
blk_queue_make_request(ns->queue, nvme_make_request);
|
||||
ns->dev = dev;
|
||||
ns->queue->queuedata = ns;
|
||||
|
|
|
@ -837,7 +837,7 @@ static int rsxx_pci_probe(struct pci_dev *dev,
|
|||
"Failed to enable MSI\n");
|
||||
}
|
||||
|
||||
st = request_irq(dev->irq, rsxx_isr, IRQF_DISABLED | IRQF_SHARED,
|
||||
st = request_irq(dev->irq, rsxx_isr, IRQF_SHARED,
|
||||
DRIVER_NAME, card);
|
||||
if (st) {
|
||||
dev_err(CARD_TO_DEV(card),
|
||||
|
|
|
@ -307,6 +307,7 @@ int rsxx_setup_dev(struct rsxx_cardinfo *card)
|
|||
blk_queue_physical_block_size(card->queue, RSXX_HW_BLK_SIZE);
|
||||
|
||||
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, card->queue);
|
||||
queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, card->queue);
|
||||
if (rsxx_discard_supported(card)) {
|
||||
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, card->queue);
|
||||
blk_queue_max_discard_sectors(card->queue,
|
||||
|
|
|
@ -4426,6 +4426,7 @@ static int skd_cons_disk(struct skd_device *skdev)
|
|||
q->limits.discard_zeroes_data = 1;
|
||||
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
|
||||
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
|
||||
queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q);
|
||||
|
||||
spin_lock_irqsave(&skdev->lock, flags);
|
||||
pr_debug("%s:%s:%d stopping %s queue\n",
|
||||
|
|
|
@ -763,6 +763,7 @@ again:
|
|||
BUG_ON(new_map_idx >= segs_to_map);
|
||||
if (unlikely(map[new_map_idx].status != 0)) {
|
||||
pr_debug(DRV_PFX "invalid buffer -- could not remap it\n");
|
||||
put_free_pages(blkif, &pages[seg_idx]->page, 1);
|
||||
pages[seg_idx]->handle = BLKBACK_INVALID_HANDLE;
|
||||
ret |= 1;
|
||||
goto next;
|
||||
|
|
|
@ -270,6 +270,9 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif)
|
|||
blkif->blk_rings.common.sring = NULL;
|
||||
}
|
||||
|
||||
/* Remove all persistent grants and the cache of ballooned pages. */
|
||||
xen_blkbk_free_caches(blkif);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -281,9 +284,6 @@ static void xen_blkif_free(struct xen_blkif *blkif)
|
|||
xen_blkif_disconnect(blkif);
|
||||
xen_vbd_free(&blkif->vbd);
|
||||
|
||||
/* Remove all persistent grants and the cache of ballooned pages. */
|
||||
xen_blkbk_free_caches(blkif);
|
||||
|
||||
/* Make sure everything is drained before shutting down */
|
||||
BUG_ON(blkif->persistent_gnt_c != 0);
|
||||
BUG_ON(atomic_read(&blkif->persistent_gnt_in_use) != 0);
|
||||
|
|
|
@ -582,6 +582,14 @@ static inline void flush_requests(struct blkfront_info *info)
|
|||
notify_remote_via_irq(info->irq);
|
||||
}
|
||||
|
||||
static inline bool blkif_request_flush_valid(struct request *req,
|
||||
struct blkfront_info *info)
|
||||
{
|
||||
return ((req->cmd_type != REQ_TYPE_FS) ||
|
||||
((req->cmd_flags & (REQ_FLUSH | REQ_FUA)) &&
|
||||
!info->flush_op));
|
||||
}
|
||||
|
||||
/*
|
||||
* do_blkif_request
|
||||
* read a block; request is in a request queue
|
||||
|
@ -604,9 +612,7 @@ static void do_blkif_request(struct request_queue *rq)
|
|||
|
||||
blk_start_request(req);
|
||||
|
||||
if ((req->cmd_type != REQ_TYPE_FS) ||
|
||||
((req->cmd_flags & (REQ_FLUSH | REQ_FUA)) &&
|
||||
!info->flush_op)) {
|
||||
if (blkif_request_flush_valid(req, info)) {
|
||||
__blk_end_request_all(req, -EIO);
|
||||
continue;
|
||||
}
|
||||
|
|
|
@ -1031,6 +1031,7 @@ static int create_device(struct zram *zram, int device_id)
|
|||
set_capacity(zram->disk, 0);
|
||||
/* zram devices sort of resembles non-rotational disks */
|
||||
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue);
|
||||
queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, zram->disk->queue);
|
||||
/*
|
||||
* To ensure that we always get PAGE_SIZE aligned
|
||||
* and n*PAGE_SIZED sized I/O requests.
|
||||
|
|
|
@ -685,8 +685,10 @@ static void ide_disk_setup(ide_drive_t *drive)
|
|||
printk(KERN_INFO "%s: max request size: %dKiB\n", drive->name,
|
||||
queue_max_sectors(q) / 2);
|
||||
|
||||
if (ata_id_is_ssd(id))
|
||||
if (ata_id_is_ssd(id)) {
|
||||
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
|
||||
queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q);
|
||||
}
|
||||
|
||||
/* calculate drive capacity, and select LBA if possible */
|
||||
ide_disk_get_capacity(drive);
|
||||
|
|
|
@ -842,6 +842,7 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size,
|
|||
q->limits.logical_block_size = block_size;
|
||||
q->limits.physical_block_size = block_size;
|
||||
set_bit(QUEUE_FLAG_NONROT, &d->disk->queue->queue_flags);
|
||||
clear_bit(QUEUE_FLAG_ADD_RANDOM, &d->disk->queue->queue_flags);
|
||||
set_bit(QUEUE_FLAG_DISCARD, &d->disk->queue->queue_flags);
|
||||
|
||||
blk_queue_flush(q, REQ_FLUSH|REQ_FUA);
|
||||
|
|
|
@ -210,6 +210,7 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
|
|||
|
||||
blk_queue_prep_rq(mq->queue, mmc_prep_request);
|
||||
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
|
||||
queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, mq->queue);
|
||||
if (mmc_can_erase(card))
|
||||
mmc_queue_setup_discard(mq->queue, card);
|
||||
|
||||
|
|
|
@ -417,6 +417,7 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
|
|||
blk_queue_logical_block_size(new->rq, tr->blksize);
|
||||
|
||||
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, new->rq);
|
||||
queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, new->rq);
|
||||
|
||||
if (tr->discard) {
|
||||
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, new->rq);
|
||||
|
|
|
@ -386,6 +386,7 @@ int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev)
|
|||
blk_queue_max_hw_sectors(rq, nr_max_blk << 3); /* 8 * 512 = blk_size */
|
||||
blk_queue_max_segments(rq, nr_max_blk);
|
||||
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, rq);
|
||||
queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, rq);
|
||||
scm_blk_dev_cluster_setup(bdev);
|
||||
|
||||
bdev->gendisk = alloc_disk(SCM_NR_PARTS);
|
||||
|
|
|
@ -346,6 +346,7 @@ static int __init xpram_setup_blkdev(void)
|
|||
goto out;
|
||||
}
|
||||
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, xpram_queues[i]);
|
||||
queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, xpram_queues[i]);
|
||||
blk_queue_make_request(xpram_queues[i], xpram_make_request);
|
||||
blk_queue_logical_block_size(xpram_queues[i], 4096);
|
||||
}
|
||||
|
|
|
@ -2677,8 +2677,10 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp)
|
|||
|
||||
rot = get_unaligned_be16(&buffer[4]);
|
||||
|
||||
if (rot == 1)
|
||||
if (rot == 1) {
|
||||
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, sdkp->disk->queue);
|
||||
queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, sdkp->disk->queue);
|
||||
}
|
||||
|
||||
out:
|
||||
kfree(buffer);
|
||||
|
|
Loading…
Reference in New Issue