drbd: Move conf_mutex from connection to resource
Signed-off-by: Andreas Gruenbacher <agruen@linbit.com> Signed-off-by: Philipp Reisner <philipp.reisner@linbit.com>
This commit is contained in:
parent
3ab706fe52
commit
0500813fe0
|
@ -518,7 +518,7 @@ struct drbd_backing_dev {
|
||||||
struct block_device *backing_bdev;
|
struct block_device *backing_bdev;
|
||||||
struct block_device *md_bdev;
|
struct block_device *md_bdev;
|
||||||
struct drbd_md md;
|
struct drbd_md md;
|
||||||
struct disk_conf *disk_conf; /* RCU, for updates: first_peer_device(device)->connection->conf_update */
|
struct disk_conf *disk_conf; /* RCU, for updates: resource->conf_update */
|
||||||
sector_t known_size; /* last known size of that backing device */
|
sector_t known_size; /* last known size of that backing device */
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -578,6 +578,8 @@ struct drbd_resource {
|
||||||
struct list_head connections;
|
struct list_head connections;
|
||||||
struct list_head resources;
|
struct list_head resources;
|
||||||
struct res_opts res_opts;
|
struct res_opts res_opts;
|
||||||
|
struct mutex conf_update; /* mutex for ready-copy-update of net_conf and disk_conf */
|
||||||
|
spinlock_t req_lock;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct drbd_connection {
|
struct drbd_connection {
|
||||||
|
@ -594,7 +596,6 @@ struct drbd_connection {
|
||||||
|
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
struct net_conf *net_conf; /* content protected by rcu */
|
struct net_conf *net_conf; /* content protected by rcu */
|
||||||
struct mutex conf_update; /* mutex for ready-copy-update of net_conf and disk_conf */
|
|
||||||
wait_queue_head_t ping_wait; /* Woken upon reception of a ping, and a state change */
|
wait_queue_head_t ping_wait; /* Woken upon reception of a ping, and a state change */
|
||||||
|
|
||||||
struct sockaddr_storage my_addr;
|
struct sockaddr_storage my_addr;
|
||||||
|
@ -608,8 +609,6 @@ struct drbd_connection {
|
||||||
unsigned long last_received; /* in jiffies, either socket */
|
unsigned long last_received; /* in jiffies, either socket */
|
||||||
unsigned int ko_count;
|
unsigned int ko_count;
|
||||||
|
|
||||||
spinlock_t req_lock;
|
|
||||||
|
|
||||||
struct list_head transfer_log; /* all requests not yet fully processed */
|
struct list_head transfer_log; /* all requests not yet fully processed */
|
||||||
|
|
||||||
struct crypto_hash *cram_hmac_tfm;
|
struct crypto_hash *cram_hmac_tfm;
|
||||||
|
@ -1595,9 +1594,9 @@ static inline void drbd_chk_io_error_(struct drbd_device *device,
|
||||||
{
|
{
|
||||||
if (error) {
|
if (error) {
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
spin_lock_irqsave(&first_peer_device(device)->connection->req_lock, flags);
|
spin_lock_irqsave(&device->resource->req_lock, flags);
|
||||||
__drbd_chk_io_error_(device, forcedetach, where);
|
__drbd_chk_io_error_(device, forcedetach, where);
|
||||||
spin_unlock_irqrestore(&first_peer_device(device)->connection->req_lock, flags);
|
spin_unlock_irqrestore(&device->resource->req_lock, flags);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2069,11 +2068,11 @@ static inline bool inc_ap_bio_cond(struct drbd_device *device)
|
||||||
{
|
{
|
||||||
bool rv = false;
|
bool rv = false;
|
||||||
|
|
||||||
spin_lock_irq(&first_peer_device(device)->connection->req_lock);
|
spin_lock_irq(&device->resource->req_lock);
|
||||||
rv = may_inc_ap_bio(device);
|
rv = may_inc_ap_bio(device);
|
||||||
if (rv)
|
if (rv)
|
||||||
atomic_inc(&device->ap_bio_cnt);
|
atomic_inc(&device->ap_bio_cnt);
|
||||||
spin_unlock_irq(&first_peer_device(device)->connection->req_lock);
|
spin_unlock_irq(&device->resource->req_lock);
|
||||||
|
|
||||||
return rv;
|
return rv;
|
||||||
}
|
}
|
||||||
|
|
|
@ -198,7 +198,7 @@ void tl_release(struct drbd_connection *connection, unsigned int barrier_nr,
|
||||||
int expect_epoch = 0;
|
int expect_epoch = 0;
|
||||||
int expect_size = 0;
|
int expect_size = 0;
|
||||||
|
|
||||||
spin_lock_irq(&connection->req_lock);
|
spin_lock_irq(&connection->resource->req_lock);
|
||||||
|
|
||||||
/* find oldest not yet barrier-acked write request,
|
/* find oldest not yet barrier-acked write request,
|
||||||
* count writes in its epoch. */
|
* count writes in its epoch. */
|
||||||
|
@ -255,12 +255,12 @@ void tl_release(struct drbd_connection *connection, unsigned int barrier_nr,
|
||||||
break;
|
break;
|
||||||
_req_mod(req, BARRIER_ACKED);
|
_req_mod(req, BARRIER_ACKED);
|
||||||
}
|
}
|
||||||
spin_unlock_irq(&connection->req_lock);
|
spin_unlock_irq(&connection->resource->req_lock);
|
||||||
|
|
||||||
return;
|
return;
|
||||||
|
|
||||||
bail:
|
bail:
|
||||||
spin_unlock_irq(&connection->req_lock);
|
spin_unlock_irq(&connection->resource->req_lock);
|
||||||
conn_request_state(connection, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
|
conn_request_state(connection, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -284,9 +284,9 @@ void _tl_restart(struct drbd_connection *connection, enum drbd_req_event what)
|
||||||
|
|
||||||
void tl_restart(struct drbd_connection *connection, enum drbd_req_event what)
|
void tl_restart(struct drbd_connection *connection, enum drbd_req_event what)
|
||||||
{
|
{
|
||||||
spin_lock_irq(&connection->req_lock);
|
spin_lock_irq(&connection->resource->req_lock);
|
||||||
_tl_restart(connection, what);
|
_tl_restart(connection, what);
|
||||||
spin_unlock_irq(&connection->req_lock);
|
spin_unlock_irq(&connection->resource->req_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -311,7 +311,7 @@ void tl_abort_disk_io(struct drbd_device *device)
|
||||||
struct drbd_connection *connection = first_peer_device(device)->connection;
|
struct drbd_connection *connection = first_peer_device(device)->connection;
|
||||||
struct drbd_request *req, *r;
|
struct drbd_request *req, *r;
|
||||||
|
|
||||||
spin_lock_irq(&connection->req_lock);
|
spin_lock_irq(&connection->resource->req_lock);
|
||||||
list_for_each_entry_safe(req, r, &connection->transfer_log, tl_requests) {
|
list_for_each_entry_safe(req, r, &connection->transfer_log, tl_requests) {
|
||||||
if (!(req->rq_state & RQ_LOCAL_PENDING))
|
if (!(req->rq_state & RQ_LOCAL_PENDING))
|
||||||
continue;
|
continue;
|
||||||
|
@ -319,7 +319,7 @@ void tl_abort_disk_io(struct drbd_device *device)
|
||||||
continue;
|
continue;
|
||||||
_req_mod(req, ABORT_DISK_IO);
|
_req_mod(req, ABORT_DISK_IO);
|
||||||
}
|
}
|
||||||
spin_unlock_irq(&connection->req_lock);
|
spin_unlock_irq(&connection->resource->req_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int drbd_thread_setup(void *arg)
|
static int drbd_thread_setup(void *arg)
|
||||||
|
@ -1836,7 +1836,7 @@ static int drbd_open(struct block_device *bdev, fmode_t mode)
|
||||||
int rv = 0;
|
int rv = 0;
|
||||||
|
|
||||||
mutex_lock(&drbd_main_mutex);
|
mutex_lock(&drbd_main_mutex);
|
||||||
spin_lock_irqsave(&first_peer_device(device)->connection->req_lock, flags);
|
spin_lock_irqsave(&device->resource->req_lock, flags);
|
||||||
/* to have a stable device->state.role
|
/* to have a stable device->state.role
|
||||||
* and no race with updating open_cnt */
|
* and no race with updating open_cnt */
|
||||||
|
|
||||||
|
@ -1849,7 +1849,7 @@ static int drbd_open(struct block_device *bdev, fmode_t mode)
|
||||||
|
|
||||||
if (!rv)
|
if (!rv)
|
||||||
device->open_cnt++;
|
device->open_cnt++;
|
||||||
spin_unlock_irqrestore(&first_peer_device(device)->connection->req_lock, flags);
|
spin_unlock_irqrestore(&device->resource->req_lock, flags);
|
||||||
mutex_unlock(&drbd_main_mutex);
|
mutex_unlock(&drbd_main_mutex);
|
||||||
|
|
||||||
return rv;
|
return rv;
|
||||||
|
@ -2546,6 +2546,8 @@ struct drbd_resource *drbd_create_resource(const char *name)
|
||||||
idr_init(&resource->devices);
|
idr_init(&resource->devices);
|
||||||
INIT_LIST_HEAD(&resource->connections);
|
INIT_LIST_HEAD(&resource->connections);
|
||||||
list_add_tail_rcu(&resource->resources, &drbd_resources);
|
list_add_tail_rcu(&resource->resources, &drbd_resources);
|
||||||
|
mutex_init(&resource->conf_update);
|
||||||
|
spin_lock_init(&resource->req_lock);
|
||||||
return resource;
|
return resource;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2588,8 +2590,6 @@ struct drbd_connection *conn_create(const char *name, struct res_opts *res_opts)
|
||||||
|
|
||||||
connection->cstate = C_STANDALONE;
|
connection->cstate = C_STANDALONE;
|
||||||
mutex_init(&connection->cstate_mutex);
|
mutex_init(&connection->cstate_mutex);
|
||||||
spin_lock_init(&connection->req_lock);
|
|
||||||
mutex_init(&connection->conf_update);
|
|
||||||
init_waitqueue_head(&connection->ping_wait);
|
init_waitqueue_head(&connection->ping_wait);
|
||||||
idr_init(&connection->peer_devices);
|
idr_init(&connection->peer_devices);
|
||||||
|
|
||||||
|
@ -2720,7 +2720,7 @@ enum drbd_ret_code drbd_create_device(struct drbd_resource *resource, unsigned i
|
||||||
blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE_SAFE >> 8);
|
blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE_SAFE >> 8);
|
||||||
blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
|
blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
|
||||||
blk_queue_merge_bvec(q, drbd_merge_bvec);
|
blk_queue_merge_bvec(q, drbd_merge_bvec);
|
||||||
q->queue_lock = &connection->req_lock;
|
q->queue_lock = &resource->req_lock;
|
||||||
|
|
||||||
device->md_io_page = alloc_page(GFP_KERNEL);
|
device->md_io_page = alloc_page(GFP_KERNEL);
|
||||||
if (!device->md_io_page)
|
if (!device->md_io_page)
|
||||||
|
@ -3281,14 +3281,14 @@ int drbd_md_read(struct drbd_device *device, struct drbd_backing_dev *bdev)
|
||||||
|
|
||||||
rv = NO_ERROR;
|
rv = NO_ERROR;
|
||||||
|
|
||||||
spin_lock_irq(&first_peer_device(device)->connection->req_lock);
|
spin_lock_irq(&device->resource->req_lock);
|
||||||
if (device->state.conn < C_CONNECTED) {
|
if (device->state.conn < C_CONNECTED) {
|
||||||
unsigned int peer;
|
unsigned int peer;
|
||||||
peer = be32_to_cpu(buffer->la_peer_max_bio_size);
|
peer = be32_to_cpu(buffer->la_peer_max_bio_size);
|
||||||
peer = max(peer, DRBD_MAX_BIO_SIZE_SAFE);
|
peer = max(peer, DRBD_MAX_BIO_SIZE_SAFE);
|
||||||
device->peer_max_bio_size = peer;
|
device->peer_max_bio_size = peer;
|
||||||
}
|
}
|
||||||
spin_unlock_irq(&first_peer_device(device)->connection->req_lock);
|
spin_unlock_irq(&device->resource->req_lock);
|
||||||
|
|
||||||
err:
|
err:
|
||||||
drbd_md_put_buffer(device);
|
drbd_md_put_buffer(device);
|
||||||
|
@ -3577,13 +3577,13 @@ void drbd_queue_bitmap_io(struct drbd_device *device,
|
||||||
device->bm_io_work.why = why;
|
device->bm_io_work.why = why;
|
||||||
device->bm_io_work.flags = flags;
|
device->bm_io_work.flags = flags;
|
||||||
|
|
||||||
spin_lock_irq(&first_peer_device(device)->connection->req_lock);
|
spin_lock_irq(&device->resource->req_lock);
|
||||||
set_bit(BITMAP_IO, &device->flags);
|
set_bit(BITMAP_IO, &device->flags);
|
||||||
if (atomic_read(&device->ap_bio_cnt) == 0) {
|
if (atomic_read(&device->ap_bio_cnt) == 0) {
|
||||||
if (!test_and_set_bit(BITMAP_IO_QUEUED, &device->flags))
|
if (!test_and_set_bit(BITMAP_IO_QUEUED, &device->flags))
|
||||||
drbd_queue_work(&first_peer_device(device)->connection->sender_work, &device->bm_io_work.w);
|
drbd_queue_work(&first_peer_device(device)->connection->sender_work, &device->bm_io_work.w);
|
||||||
}
|
}
|
||||||
spin_unlock_irq(&first_peer_device(device)->connection->req_lock);
|
spin_unlock_irq(&device->resource->req_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -3751,10 +3751,10 @@ int drbd_wait_misc(struct drbd_device *device, struct drbd_interval *i)
|
||||||
/* Indicate to wake up device->misc_wait on progress. */
|
/* Indicate to wake up device->misc_wait on progress. */
|
||||||
i->waiting = true;
|
i->waiting = true;
|
||||||
prepare_to_wait(&device->misc_wait, &wait, TASK_INTERRUPTIBLE);
|
prepare_to_wait(&device->misc_wait, &wait, TASK_INTERRUPTIBLE);
|
||||||
spin_unlock_irq(&first_peer_device(device)->connection->req_lock);
|
spin_unlock_irq(&device->resource->req_lock);
|
||||||
timeout = schedule_timeout(timeout);
|
timeout = schedule_timeout(timeout);
|
||||||
finish_wait(&device->misc_wait, &wait);
|
finish_wait(&device->misc_wait, &wait);
|
||||||
spin_lock_irq(&first_peer_device(device)->connection->req_lock);
|
spin_lock_irq(&device->resource->req_lock);
|
||||||
if (!timeout || device->state.conn < C_CONNECTED)
|
if (!timeout || device->state.conn < C_CONNECTED)
|
||||||
return -ETIMEDOUT;
|
return -ETIMEDOUT;
|
||||||
if (signal_pending(current))
|
if (signal_pending(current))
|
||||||
|
|
|
@ -443,9 +443,9 @@ bool conn_try_outdate_peer(struct drbd_connection *connection)
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_lock_irq(&connection->req_lock);
|
spin_lock_irq(&connection->resource->req_lock);
|
||||||
connect_cnt = connection->connect_cnt;
|
connect_cnt = connection->connect_cnt;
|
||||||
spin_unlock_irq(&connection->req_lock);
|
spin_unlock_irq(&connection->resource->req_lock);
|
||||||
|
|
||||||
fp = highest_fencing_policy(connection);
|
fp = highest_fencing_policy(connection);
|
||||||
switch (fp) {
|
switch (fp) {
|
||||||
|
@ -510,7 +510,7 @@ bool conn_try_outdate_peer(struct drbd_connection *connection)
|
||||||
conn_request_state(connection, mask, val, CS_VERBOSE);
|
conn_request_state(connection, mask, val, CS_VERBOSE);
|
||||||
here, because we might were able to re-establish the connection in the
|
here, because we might were able to re-establish the connection in the
|
||||||
meantime. */
|
meantime. */
|
||||||
spin_lock_irq(&connection->req_lock);
|
spin_lock_irq(&connection->resource->req_lock);
|
||||||
if (connection->cstate < C_WF_REPORT_PARAMS && !test_bit(STATE_SENT, &connection->flags)) {
|
if (connection->cstate < C_WF_REPORT_PARAMS && !test_bit(STATE_SENT, &connection->flags)) {
|
||||||
if (connection->connect_cnt != connect_cnt)
|
if (connection->connect_cnt != connect_cnt)
|
||||||
/* In case the connection was established and droped
|
/* In case the connection was established and droped
|
||||||
|
@ -519,7 +519,7 @@ bool conn_try_outdate_peer(struct drbd_connection *connection)
|
||||||
else
|
else
|
||||||
_conn_request_state(connection, mask, val, CS_VERBOSE);
|
_conn_request_state(connection, mask, val, CS_VERBOSE);
|
||||||
}
|
}
|
||||||
spin_unlock_irq(&connection->req_lock);
|
spin_unlock_irq(&connection->resource->req_lock);
|
||||||
|
|
||||||
return conn_highest_pdsk(connection) <= D_OUTDATED;
|
return conn_highest_pdsk(connection) <= D_OUTDATED;
|
||||||
}
|
}
|
||||||
|
@ -654,11 +654,11 @@ drbd_set_role(struct drbd_device *device, enum drbd_role new_role, int force)
|
||||||
put_ldev(device);
|
put_ldev(device);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
mutex_lock(&first_peer_device(device)->connection->conf_update);
|
mutex_lock(&device->resource->conf_update);
|
||||||
nc = first_peer_device(device)->connection->net_conf;
|
nc = first_peer_device(device)->connection->net_conf;
|
||||||
if (nc)
|
if (nc)
|
||||||
nc->discard_my_data = 0; /* without copy; single bit op is atomic */
|
nc->discard_my_data = 0; /* without copy; single bit op is atomic */
|
||||||
mutex_unlock(&first_peer_device(device)->connection->conf_update);
|
mutex_unlock(&device->resource->conf_update);
|
||||||
|
|
||||||
set_disk_ro(device->vdisk, false);
|
set_disk_ro(device->vdisk, false);
|
||||||
if (get_ldev(device)) {
|
if (get_ldev(device)) {
|
||||||
|
@ -1188,10 +1188,10 @@ static void conn_reconfig_start(struct drbd_connection *connection)
|
||||||
static void conn_reconfig_done(struct drbd_connection *connection)
|
static void conn_reconfig_done(struct drbd_connection *connection)
|
||||||
{
|
{
|
||||||
bool stop_threads;
|
bool stop_threads;
|
||||||
spin_lock_irq(&connection->req_lock);
|
spin_lock_irq(&connection->resource->req_lock);
|
||||||
stop_threads = conn_all_vols_unconf(connection) &&
|
stop_threads = conn_all_vols_unconf(connection) &&
|
||||||
connection->cstate == C_STANDALONE;
|
connection->cstate == C_STANDALONE;
|
||||||
spin_unlock_irq(&connection->req_lock);
|
spin_unlock_irq(&connection->resource->req_lock);
|
||||||
if (stop_threads) {
|
if (stop_threads) {
|
||||||
/* asender is implicitly stopped by receiver
|
/* asender is implicitly stopped by receiver
|
||||||
* in conn_disconnect() */
|
* in conn_disconnect() */
|
||||||
|
@ -1211,10 +1211,10 @@ static void drbd_suspend_al(struct drbd_device *device)
|
||||||
}
|
}
|
||||||
|
|
||||||
drbd_al_shrink(device);
|
drbd_al_shrink(device);
|
||||||
spin_lock_irq(&first_peer_device(device)->connection->req_lock);
|
spin_lock_irq(&device->resource->req_lock);
|
||||||
if (device->state.conn < C_CONNECTED)
|
if (device->state.conn < C_CONNECTED)
|
||||||
s = !test_and_set_bit(AL_SUSPENDED, &device->flags);
|
s = !test_and_set_bit(AL_SUSPENDED, &device->flags);
|
||||||
spin_unlock_irq(&first_peer_device(device)->connection->req_lock);
|
spin_unlock_irq(&device->resource->req_lock);
|
||||||
lc_unlock(device->act_log);
|
lc_unlock(device->act_log);
|
||||||
|
|
||||||
if (s)
|
if (s)
|
||||||
|
@ -1285,7 +1285,7 @@ int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
mutex_lock(&first_peer_device(device)->connection->conf_update);
|
mutex_lock(&device->resource->conf_update);
|
||||||
old_disk_conf = device->ldev->disk_conf;
|
old_disk_conf = device->ldev->disk_conf;
|
||||||
*new_disk_conf = *old_disk_conf;
|
*new_disk_conf = *old_disk_conf;
|
||||||
if (should_set_defaults(info))
|
if (should_set_defaults(info))
|
||||||
|
@ -1348,7 +1348,7 @@ int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
|
||||||
rcu_assign_pointer(device->rs_plan_s, new_plan);
|
rcu_assign_pointer(device->rs_plan_s, new_plan);
|
||||||
}
|
}
|
||||||
|
|
||||||
mutex_unlock(&first_peer_device(device)->connection->conf_update);
|
mutex_unlock(&device->resource->conf_update);
|
||||||
|
|
||||||
if (new_disk_conf->al_updates)
|
if (new_disk_conf->al_updates)
|
||||||
device->ldev->md.flags &= ~MDF_AL_DISABLED;
|
device->ldev->md.flags &= ~MDF_AL_DISABLED;
|
||||||
|
@ -1374,7 +1374,7 @@ int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
|
||||||
goto success;
|
goto success;
|
||||||
|
|
||||||
fail_unlock:
|
fail_unlock:
|
||||||
mutex_unlock(&first_peer_device(device)->connection->conf_update);
|
mutex_unlock(&device->resource->conf_update);
|
||||||
fail:
|
fail:
|
||||||
kfree(new_disk_conf);
|
kfree(new_disk_conf);
|
||||||
kfree(new_plan);
|
kfree(new_plan);
|
||||||
|
@ -1724,7 +1724,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
|
||||||
if (_drbd_bm_total_weight(device) == drbd_bm_bits(device))
|
if (_drbd_bm_total_weight(device) == drbd_bm_bits(device))
|
||||||
drbd_suspend_al(device); /* IO is still suspended here... */
|
drbd_suspend_al(device); /* IO is still suspended here... */
|
||||||
|
|
||||||
spin_lock_irq(&first_peer_device(device)->connection->req_lock);
|
spin_lock_irq(&device->resource->req_lock);
|
||||||
os = drbd_read_state(device);
|
os = drbd_read_state(device);
|
||||||
ns = os;
|
ns = os;
|
||||||
/* If MDF_CONSISTENT is not set go into inconsistent state,
|
/* If MDF_CONSISTENT is not set go into inconsistent state,
|
||||||
|
@ -1776,7 +1776,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
|
||||||
}
|
}
|
||||||
|
|
||||||
rv = _drbd_set_state(device, ns, CS_VERBOSE, NULL);
|
rv = _drbd_set_state(device, ns, CS_VERBOSE, NULL);
|
||||||
spin_unlock_irq(&first_peer_device(device)->connection->req_lock);
|
spin_unlock_irq(&device->resource->req_lock);
|
||||||
|
|
||||||
if (rv < SS_SUCCESS)
|
if (rv < SS_SUCCESS)
|
||||||
goto force_diskless_dec;
|
goto force_diskless_dec;
|
||||||
|
@ -2077,7 +2077,7 @@ int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info)
|
||||||
conn_reconfig_start(connection);
|
conn_reconfig_start(connection);
|
||||||
|
|
||||||
mutex_lock(&connection->data.mutex);
|
mutex_lock(&connection->data.mutex);
|
||||||
mutex_lock(&connection->conf_update);
|
mutex_lock(&connection->resource->conf_update);
|
||||||
old_net_conf = connection->net_conf;
|
old_net_conf = connection->net_conf;
|
||||||
|
|
||||||
if (!old_net_conf) {
|
if (!old_net_conf) {
|
||||||
|
@ -2141,7 +2141,7 @@ int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info)
|
||||||
crypto_free_hash(connection->cram_hmac_tfm);
|
crypto_free_hash(connection->cram_hmac_tfm);
|
||||||
connection->cram_hmac_tfm = crypto.cram_hmac_tfm;
|
connection->cram_hmac_tfm = crypto.cram_hmac_tfm;
|
||||||
|
|
||||||
mutex_unlock(&connection->conf_update);
|
mutex_unlock(&connection->resource->conf_update);
|
||||||
mutex_unlock(&connection->data.mutex);
|
mutex_unlock(&connection->data.mutex);
|
||||||
synchronize_rcu();
|
synchronize_rcu();
|
||||||
kfree(old_net_conf);
|
kfree(old_net_conf);
|
||||||
|
@ -2152,7 +2152,7 @@ int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info)
|
||||||
goto done;
|
goto done;
|
||||||
|
|
||||||
fail:
|
fail:
|
||||||
mutex_unlock(&connection->conf_update);
|
mutex_unlock(&connection->resource->conf_update);
|
||||||
mutex_unlock(&connection->data.mutex);
|
mutex_unlock(&connection->data.mutex);
|
||||||
free_crypto(&crypto);
|
free_crypto(&crypto);
|
||||||
kfree(new_net_conf);
|
kfree(new_net_conf);
|
||||||
|
@ -2243,11 +2243,11 @@ int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info)
|
||||||
|
|
||||||
conn_flush_workqueue(connection);
|
conn_flush_workqueue(connection);
|
||||||
|
|
||||||
mutex_lock(&connection->conf_update);
|
mutex_lock(&adm_ctx.resource->conf_update);
|
||||||
old_net_conf = connection->net_conf;
|
old_net_conf = connection->net_conf;
|
||||||
if (old_net_conf) {
|
if (old_net_conf) {
|
||||||
retcode = ERR_NET_CONFIGURED;
|
retcode = ERR_NET_CONFIGURED;
|
||||||
mutex_unlock(&connection->conf_update);
|
mutex_unlock(&adm_ctx.resource->conf_update);
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
rcu_assign_pointer(connection->net_conf, new_net_conf);
|
rcu_assign_pointer(connection->net_conf, new_net_conf);
|
||||||
|
@ -2263,7 +2263,7 @@ int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info)
|
||||||
connection->peer_addr_len = nla_len(adm_ctx.peer_addr);
|
connection->peer_addr_len = nla_len(adm_ctx.peer_addr);
|
||||||
memcpy(&connection->peer_addr, nla_data(adm_ctx.peer_addr), connection->peer_addr_len);
|
memcpy(&connection->peer_addr, nla_data(adm_ctx.peer_addr), connection->peer_addr_len);
|
||||||
|
|
||||||
mutex_unlock(&connection->conf_update);
|
mutex_unlock(&adm_ctx.resource->conf_update);
|
||||||
|
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
idr_for_each_entry(&connection->peer_devices, peer_device, i) {
|
idr_for_each_entry(&connection->peer_devices, peer_device, i) {
|
||||||
|
@ -2486,12 +2486,12 @@ int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info)
|
||||||
device->ldev->known_size = drbd_get_capacity(device->ldev->backing_bdev);
|
device->ldev->known_size = drbd_get_capacity(device->ldev->backing_bdev);
|
||||||
|
|
||||||
if (new_disk_conf) {
|
if (new_disk_conf) {
|
||||||
mutex_lock(&first_peer_device(device)->connection->conf_update);
|
mutex_lock(&device->resource->conf_update);
|
||||||
old_disk_conf = device->ldev->disk_conf;
|
old_disk_conf = device->ldev->disk_conf;
|
||||||
*new_disk_conf = *old_disk_conf;
|
*new_disk_conf = *old_disk_conf;
|
||||||
new_disk_conf->disk_size = (sector_t)rs.resize_size;
|
new_disk_conf->disk_size = (sector_t)rs.resize_size;
|
||||||
rcu_assign_pointer(device->ldev->disk_conf, new_disk_conf);
|
rcu_assign_pointer(device->ldev->disk_conf, new_disk_conf);
|
||||||
mutex_unlock(&first_peer_device(device)->connection->conf_update);
|
mutex_unlock(&device->resource->conf_update);
|
||||||
synchronize_rcu();
|
synchronize_rcu();
|
||||||
kfree(old_disk_conf);
|
kfree(old_disk_conf);
|
||||||
}
|
}
|
||||||
|
@ -3248,10 +3248,10 @@ int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info)
|
||||||
drbd_send_uuids_skip_initial_sync(device);
|
drbd_send_uuids_skip_initial_sync(device);
|
||||||
_drbd_uuid_set(device, UI_BITMAP, 0);
|
_drbd_uuid_set(device, UI_BITMAP, 0);
|
||||||
drbd_print_uuids(device, "cleared bitmap UUID");
|
drbd_print_uuids(device, "cleared bitmap UUID");
|
||||||
spin_lock_irq(&first_peer_device(device)->connection->req_lock);
|
spin_lock_irq(&device->resource->req_lock);
|
||||||
_drbd_set_state(_NS2(device, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
|
_drbd_set_state(_NS2(device, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
|
||||||
CS_VERBOSE, NULL);
|
CS_VERBOSE, NULL);
|
||||||
spin_unlock_irq(&first_peer_device(device)->connection->req_lock);
|
spin_unlock_irq(&device->resource->req_lock);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -221,9 +221,9 @@ static void drbd_kick_lo_and_reclaim_net(struct drbd_device *device)
|
||||||
LIST_HEAD(reclaimed);
|
LIST_HEAD(reclaimed);
|
||||||
struct drbd_peer_request *peer_req, *t;
|
struct drbd_peer_request *peer_req, *t;
|
||||||
|
|
||||||
spin_lock_irq(&first_peer_device(device)->connection->req_lock);
|
spin_lock_irq(&device->resource->req_lock);
|
||||||
reclaim_finished_net_peer_reqs(device, &reclaimed);
|
reclaim_finished_net_peer_reqs(device, &reclaimed);
|
||||||
spin_unlock_irq(&first_peer_device(device)->connection->req_lock);
|
spin_unlock_irq(&device->resource->req_lock);
|
||||||
|
|
||||||
list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)
|
list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)
|
||||||
drbd_free_net_peer_req(device, peer_req);
|
drbd_free_net_peer_req(device, peer_req);
|
||||||
|
@ -288,7 +288,7 @@ struct page *drbd_alloc_pages(struct drbd_device *device, unsigned int number,
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Must not be used from irq, as that may deadlock: see drbd_alloc_pages.
|
/* Must not be used from irq, as that may deadlock: see drbd_alloc_pages.
|
||||||
* Is also used from inside an other spin_lock_irq(&first_peer_device(device)->connection->req_lock);
|
* Is also used from inside an other spin_lock_irq(&resource->req_lock);
|
||||||
* Either links the page chain back to the global pool,
|
* Either links the page chain back to the global pool,
|
||||||
* or returns all pages to the system. */
|
* or returns all pages to the system. */
|
||||||
static void drbd_free_pages(struct drbd_device *device, struct page *page, int is_net)
|
static void drbd_free_pages(struct drbd_device *device, struct page *page, int is_net)
|
||||||
|
@ -396,9 +396,9 @@ int drbd_free_peer_reqs(struct drbd_device *device, struct list_head *list)
|
||||||
int count = 0;
|
int count = 0;
|
||||||
int is_net = list == &device->net_ee;
|
int is_net = list == &device->net_ee;
|
||||||
|
|
||||||
spin_lock_irq(&first_peer_device(device)->connection->req_lock);
|
spin_lock_irq(&device->resource->req_lock);
|
||||||
list_splice_init(list, &work_list);
|
list_splice_init(list, &work_list);
|
||||||
spin_unlock_irq(&first_peer_device(device)->connection->req_lock);
|
spin_unlock_irq(&device->resource->req_lock);
|
||||||
|
|
||||||
list_for_each_entry_safe(peer_req, t, &work_list, w.list) {
|
list_for_each_entry_safe(peer_req, t, &work_list, w.list) {
|
||||||
__drbd_free_peer_req(device, peer_req, is_net);
|
__drbd_free_peer_req(device, peer_req, is_net);
|
||||||
|
@ -417,10 +417,10 @@ static int drbd_finish_peer_reqs(struct drbd_device *device)
|
||||||
struct drbd_peer_request *peer_req, *t;
|
struct drbd_peer_request *peer_req, *t;
|
||||||
int err = 0;
|
int err = 0;
|
||||||
|
|
||||||
spin_lock_irq(&first_peer_device(device)->connection->req_lock);
|
spin_lock_irq(&device->resource->req_lock);
|
||||||
reclaim_finished_net_peer_reqs(device, &reclaimed);
|
reclaim_finished_net_peer_reqs(device, &reclaimed);
|
||||||
list_splice_init(&device->done_ee, &work_list);
|
list_splice_init(&device->done_ee, &work_list);
|
||||||
spin_unlock_irq(&first_peer_device(device)->connection->req_lock);
|
spin_unlock_irq(&device->resource->req_lock);
|
||||||
|
|
||||||
list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)
|
list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)
|
||||||
drbd_free_net_peer_req(device, peer_req);
|
drbd_free_net_peer_req(device, peer_req);
|
||||||
|
@ -452,19 +452,19 @@ static void _drbd_wait_ee_list_empty(struct drbd_device *device,
|
||||||
* and calling prepare_to_wait in the fast path */
|
* and calling prepare_to_wait in the fast path */
|
||||||
while (!list_empty(head)) {
|
while (!list_empty(head)) {
|
||||||
prepare_to_wait(&device->ee_wait, &wait, TASK_UNINTERRUPTIBLE);
|
prepare_to_wait(&device->ee_wait, &wait, TASK_UNINTERRUPTIBLE);
|
||||||
spin_unlock_irq(&first_peer_device(device)->connection->req_lock);
|
spin_unlock_irq(&device->resource->req_lock);
|
||||||
io_schedule();
|
io_schedule();
|
||||||
finish_wait(&device->ee_wait, &wait);
|
finish_wait(&device->ee_wait, &wait);
|
||||||
spin_lock_irq(&first_peer_device(device)->connection->req_lock);
|
spin_lock_irq(&device->resource->req_lock);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void drbd_wait_ee_list_empty(struct drbd_device *device,
|
static void drbd_wait_ee_list_empty(struct drbd_device *device,
|
||||||
struct list_head *head)
|
struct list_head *head)
|
||||||
{
|
{
|
||||||
spin_lock_irq(&first_peer_device(device)->connection->req_lock);
|
spin_lock_irq(&device->resource->req_lock);
|
||||||
_drbd_wait_ee_list_empty(device, head);
|
_drbd_wait_ee_list_empty(device, head);
|
||||||
spin_unlock_irq(&first_peer_device(device)->connection->req_lock);
|
spin_unlock_irq(&device->resource->req_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int drbd_recv_short(struct socket *sock, void *buf, size_t size, int flags)
|
static int drbd_recv_short(struct socket *sock, void *buf, size_t size, int flags)
|
||||||
|
@ -1072,13 +1072,13 @@ randomize:
|
||||||
|
|
||||||
drbd_thread_start(&connection->asender);
|
drbd_thread_start(&connection->asender);
|
||||||
|
|
||||||
mutex_lock(&connection->conf_update);
|
mutex_lock(&connection->resource->conf_update);
|
||||||
/* The discard_my_data flag is a single-shot modifier to the next
|
/* The discard_my_data flag is a single-shot modifier to the next
|
||||||
* connection attempt, the handshake of which is now well underway.
|
* connection attempt, the handshake of which is now well underway.
|
||||||
* No need for rcu style copying of the whole struct
|
* No need for rcu style copying of the whole struct
|
||||||
* just to clear a single value. */
|
* just to clear a single value. */
|
||||||
connection->net_conf->discard_my_data = 0;
|
connection->net_conf->discard_my_data = 0;
|
||||||
mutex_unlock(&connection->conf_update);
|
mutex_unlock(&connection->resource->conf_update);
|
||||||
|
|
||||||
return h;
|
return h;
|
||||||
|
|
||||||
|
@ -1692,9 +1692,9 @@ static int recv_resync_read(struct drbd_device *device, sector_t sector, int dat
|
||||||
|
|
||||||
peer_req->w.cb = e_end_resync_block;
|
peer_req->w.cb = e_end_resync_block;
|
||||||
|
|
||||||
spin_lock_irq(&first_peer_device(device)->connection->req_lock);
|
spin_lock_irq(&device->resource->req_lock);
|
||||||
list_add(&peer_req->w.list, &device->sync_ee);
|
list_add(&peer_req->w.list, &device->sync_ee);
|
||||||
spin_unlock_irq(&first_peer_device(device)->connection->req_lock);
|
spin_unlock_irq(&device->resource->req_lock);
|
||||||
|
|
||||||
atomic_add(data_size >> 9, &device->rs_sect_ev);
|
atomic_add(data_size >> 9, &device->rs_sect_ev);
|
||||||
if (drbd_submit_peer_request(device, peer_req, WRITE, DRBD_FAULT_RS_WR) == 0)
|
if (drbd_submit_peer_request(device, peer_req, WRITE, DRBD_FAULT_RS_WR) == 0)
|
||||||
|
@ -1702,9 +1702,9 @@ static int recv_resync_read(struct drbd_device *device, sector_t sector, int dat
|
||||||
|
|
||||||
/* don't care for the reason here */
|
/* don't care for the reason here */
|
||||||
drbd_err(device, "submit failed, triggering re-connect\n");
|
drbd_err(device, "submit failed, triggering re-connect\n");
|
||||||
spin_lock_irq(&first_peer_device(device)->connection->req_lock);
|
spin_lock_irq(&device->resource->req_lock);
|
||||||
list_del(&peer_req->w.list);
|
list_del(&peer_req->w.list);
|
||||||
spin_unlock_irq(&first_peer_device(device)->connection->req_lock);
|
spin_unlock_irq(&device->resource->req_lock);
|
||||||
|
|
||||||
drbd_free_peer_req(device, peer_req);
|
drbd_free_peer_req(device, peer_req);
|
||||||
fail:
|
fail:
|
||||||
|
@ -1743,9 +1743,9 @@ static int receive_DataReply(struct drbd_connection *connection, struct packet_i
|
||||||
|
|
||||||
sector = be64_to_cpu(p->sector);
|
sector = be64_to_cpu(p->sector);
|
||||||
|
|
||||||
spin_lock_irq(&first_peer_device(device)->connection->req_lock);
|
spin_lock_irq(&device->resource->req_lock);
|
||||||
req = find_request(device, &device->read_requests, p->block_id, sector, false, __func__);
|
req = find_request(device, &device->read_requests, p->block_id, sector, false, __func__);
|
||||||
spin_unlock_irq(&first_peer_device(device)->connection->req_lock);
|
spin_unlock_irq(&device->resource->req_lock);
|
||||||
if (unlikely(!req))
|
if (unlikely(!req))
|
||||||
return -EIO;
|
return -EIO;
|
||||||
|
|
||||||
|
@ -1844,12 +1844,12 @@ static int e_end_block(struct drbd_work *w, int cancel)
|
||||||
/* we delete from the conflict detection hash _after_ we sent out the
|
/* we delete from the conflict detection hash _after_ we sent out the
|
||||||
* P_WRITE_ACK / P_NEG_ACK, to get the sequence number right. */
|
* P_WRITE_ACK / P_NEG_ACK, to get the sequence number right. */
|
||||||
if (peer_req->flags & EE_IN_INTERVAL_TREE) {
|
if (peer_req->flags & EE_IN_INTERVAL_TREE) {
|
||||||
spin_lock_irq(&first_peer_device(device)->connection->req_lock);
|
spin_lock_irq(&device->resource->req_lock);
|
||||||
D_ASSERT(device, !drbd_interval_empty(&peer_req->i));
|
D_ASSERT(device, !drbd_interval_empty(&peer_req->i));
|
||||||
drbd_remove_epoch_entry_interval(device, peer_req);
|
drbd_remove_epoch_entry_interval(device, peer_req);
|
||||||
if (peer_req->flags & EE_RESTART_REQUESTS)
|
if (peer_req->flags & EE_RESTART_REQUESTS)
|
||||||
restart_conflicting_writes(device, sector, peer_req->i.size);
|
restart_conflicting_writes(device, sector, peer_req->i.size);
|
||||||
spin_unlock_irq(&first_peer_device(device)->connection->req_lock);
|
spin_unlock_irq(&device->resource->req_lock);
|
||||||
} else
|
} else
|
||||||
D_ASSERT(device, drbd_interval_empty(&peer_req->i));
|
D_ASSERT(device, drbd_interval_empty(&peer_req->i));
|
||||||
|
|
||||||
|
@ -1925,7 +1925,7 @@ static bool overlapping_resync_write(struct drbd_device *device, struct drbd_pee
|
||||||
struct drbd_peer_request *rs_req;
|
struct drbd_peer_request *rs_req;
|
||||||
bool rv = 0;
|
bool rv = 0;
|
||||||
|
|
||||||
spin_lock_irq(&first_peer_device(device)->connection->req_lock);
|
spin_lock_irq(&device->resource->req_lock);
|
||||||
list_for_each_entry(rs_req, &device->sync_ee, w.list) {
|
list_for_each_entry(rs_req, &device->sync_ee, w.list) {
|
||||||
if (overlaps(peer_req->i.sector, peer_req->i.size,
|
if (overlaps(peer_req->i.sector, peer_req->i.size,
|
||||||
rs_req->i.sector, rs_req->i.size)) {
|
rs_req->i.sector, rs_req->i.size)) {
|
||||||
|
@ -1933,7 +1933,7 @@ static bool overlapping_resync_write(struct drbd_device *device, struct drbd_pee
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
spin_unlock_irq(&first_peer_device(device)->connection->req_lock);
|
spin_unlock_irq(&device->resource->req_lock);
|
||||||
|
|
||||||
return rv;
|
return rv;
|
||||||
}
|
}
|
||||||
|
@ -2034,10 +2034,10 @@ static void fail_postponed_requests(struct drbd_device *device, sector_t sector,
|
||||||
continue;
|
continue;
|
||||||
req->rq_state &= ~RQ_POSTPONED;
|
req->rq_state &= ~RQ_POSTPONED;
|
||||||
__req_mod(req, NEG_ACKED, &m);
|
__req_mod(req, NEG_ACKED, &m);
|
||||||
spin_unlock_irq(&first_peer_device(device)->connection->req_lock);
|
spin_unlock_irq(&device->resource->req_lock);
|
||||||
if (m.bio)
|
if (m.bio)
|
||||||
complete_master_bio(device, &m);
|
complete_master_bio(device, &m);
|
||||||
spin_lock_irq(&first_peer_device(device)->connection->req_lock);
|
spin_lock_irq(&device->resource->req_lock);
|
||||||
goto repeat;
|
goto repeat;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2218,10 +2218,10 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info *
|
||||||
err = wait_for_and_update_peer_seq(device, peer_seq);
|
err = wait_for_and_update_peer_seq(device, peer_seq);
|
||||||
if (err)
|
if (err)
|
||||||
goto out_interrupted;
|
goto out_interrupted;
|
||||||
spin_lock_irq(&first_peer_device(device)->connection->req_lock);
|
spin_lock_irq(&device->resource->req_lock);
|
||||||
err = handle_write_conflicts(device, peer_req);
|
err = handle_write_conflicts(device, peer_req);
|
||||||
if (err) {
|
if (err) {
|
||||||
spin_unlock_irq(&first_peer_device(device)->connection->req_lock);
|
spin_unlock_irq(&device->resource->req_lock);
|
||||||
if (err == -ENOENT) {
|
if (err == -ENOENT) {
|
||||||
put_ldev(device);
|
put_ldev(device);
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -2230,10 +2230,10 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info *
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
update_peer_seq(device, peer_seq);
|
update_peer_seq(device, peer_seq);
|
||||||
spin_lock_irq(&first_peer_device(device)->connection->req_lock);
|
spin_lock_irq(&device->resource->req_lock);
|
||||||
}
|
}
|
||||||
list_add(&peer_req->w.list, &device->active_ee);
|
list_add(&peer_req->w.list, &device->active_ee);
|
||||||
spin_unlock_irq(&first_peer_device(device)->connection->req_lock);
|
spin_unlock_irq(&device->resource->req_lock);
|
||||||
|
|
||||||
if (device->state.conn == C_SYNC_TARGET)
|
if (device->state.conn == C_SYNC_TARGET)
|
||||||
wait_event(device->ee_wait, !overlapping_resync_write(device, peer_req));
|
wait_event(device->ee_wait, !overlapping_resync_write(device, peer_req));
|
||||||
|
@ -2278,10 +2278,10 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info *
|
||||||
|
|
||||||
/* don't care for the reason here */
|
/* don't care for the reason here */
|
||||||
drbd_err(device, "submit failed, triggering re-connect\n");
|
drbd_err(device, "submit failed, triggering re-connect\n");
|
||||||
spin_lock_irq(&first_peer_device(device)->connection->req_lock);
|
spin_lock_irq(&device->resource->req_lock);
|
||||||
list_del(&peer_req->w.list);
|
list_del(&peer_req->w.list);
|
||||||
drbd_remove_epoch_entry_interval(device, peer_req);
|
drbd_remove_epoch_entry_interval(device, peer_req);
|
||||||
spin_unlock_irq(&first_peer_device(device)->connection->req_lock);
|
spin_unlock_irq(&device->resource->req_lock);
|
||||||
if (peer_req->flags & EE_CALL_AL_COMPLETE_IO)
|
if (peer_req->flags & EE_CALL_AL_COMPLETE_IO)
|
||||||
drbd_al_complete_io(device, &peer_req->i);
|
drbd_al_complete_io(device, &peer_req->i);
|
||||||
|
|
||||||
|
@ -2532,18 +2532,18 @@ submit_for_resync:
|
||||||
|
|
||||||
submit:
|
submit:
|
||||||
inc_unacked(device);
|
inc_unacked(device);
|
||||||
spin_lock_irq(&first_peer_device(device)->connection->req_lock);
|
spin_lock_irq(&device->resource->req_lock);
|
||||||
list_add_tail(&peer_req->w.list, &device->read_ee);
|
list_add_tail(&peer_req->w.list, &device->read_ee);
|
||||||
spin_unlock_irq(&first_peer_device(device)->connection->req_lock);
|
spin_unlock_irq(&device->resource->req_lock);
|
||||||
|
|
||||||
if (drbd_submit_peer_request(device, peer_req, READ, fault_type) == 0)
|
if (drbd_submit_peer_request(device, peer_req, READ, fault_type) == 0)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
/* don't care for the reason here */
|
/* don't care for the reason here */
|
||||||
drbd_err(device, "submit failed, triggering re-connect\n");
|
drbd_err(device, "submit failed, triggering re-connect\n");
|
||||||
spin_lock_irq(&first_peer_device(device)->connection->req_lock);
|
spin_lock_irq(&device->resource->req_lock);
|
||||||
list_del(&peer_req->w.list);
|
list_del(&peer_req->w.list);
|
||||||
spin_unlock_irq(&first_peer_device(device)->connection->req_lock);
|
spin_unlock_irq(&device->resource->req_lock);
|
||||||
/* no drbd_rs_complete_io(), we are dropping the connection anyways */
|
/* no drbd_rs_complete_io(), we are dropping the connection anyways */
|
||||||
|
|
||||||
out_free_e:
|
out_free_e:
|
||||||
|
@ -3221,7 +3221,7 @@ static int receive_protocol(struct drbd_connection *connection, struct packet_in
|
||||||
}
|
}
|
||||||
|
|
||||||
mutex_lock(&connection->data.mutex);
|
mutex_lock(&connection->data.mutex);
|
||||||
mutex_lock(&connection->conf_update);
|
mutex_lock(&connection->resource->conf_update);
|
||||||
old_net_conf = connection->net_conf;
|
old_net_conf = connection->net_conf;
|
||||||
*new_net_conf = *old_net_conf;
|
*new_net_conf = *old_net_conf;
|
||||||
|
|
||||||
|
@ -3232,7 +3232,7 @@ static int receive_protocol(struct drbd_connection *connection, struct packet_in
|
||||||
new_net_conf->two_primaries = p_two_primaries;
|
new_net_conf->two_primaries = p_two_primaries;
|
||||||
|
|
||||||
rcu_assign_pointer(connection->net_conf, new_net_conf);
|
rcu_assign_pointer(connection->net_conf, new_net_conf);
|
||||||
mutex_unlock(&connection->conf_update);
|
mutex_unlock(&connection->resource->conf_update);
|
||||||
mutex_unlock(&connection->data.mutex);
|
mutex_unlock(&connection->data.mutex);
|
||||||
|
|
||||||
crypto_free_hash(connection->peer_integrity_tfm);
|
crypto_free_hash(connection->peer_integrity_tfm);
|
||||||
|
@ -3372,13 +3372,13 @@ static int receive_SyncParam(struct drbd_connection *connection, struct packet_i
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
mutex_lock(&first_peer_device(device)->connection->conf_update);
|
mutex_lock(&connection->resource->conf_update);
|
||||||
old_net_conf = first_peer_device(device)->connection->net_conf;
|
old_net_conf = first_peer_device(device)->connection->net_conf;
|
||||||
if (get_ldev(device)) {
|
if (get_ldev(device)) {
|
||||||
new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL);
|
new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL);
|
||||||
if (!new_disk_conf) {
|
if (!new_disk_conf) {
|
||||||
put_ldev(device);
|
put_ldev(device);
|
||||||
mutex_unlock(&first_peer_device(device)->connection->conf_update);
|
mutex_unlock(&connection->resource->conf_update);
|
||||||
drbd_err(device, "Allocation of new disk_conf failed\n");
|
drbd_err(device, "Allocation of new disk_conf failed\n");
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
@ -3498,7 +3498,7 @@ static int receive_SyncParam(struct drbd_connection *connection, struct packet_i
|
||||||
rcu_assign_pointer(device->rs_plan_s, new_plan);
|
rcu_assign_pointer(device->rs_plan_s, new_plan);
|
||||||
}
|
}
|
||||||
|
|
||||||
mutex_unlock(&first_peer_device(device)->connection->conf_update);
|
mutex_unlock(&connection->resource->conf_update);
|
||||||
synchronize_rcu();
|
synchronize_rcu();
|
||||||
if (new_net_conf)
|
if (new_net_conf)
|
||||||
kfree(old_net_conf);
|
kfree(old_net_conf);
|
||||||
|
@ -3512,7 +3512,7 @@ reconnect:
|
||||||
put_ldev(device);
|
put_ldev(device);
|
||||||
kfree(new_disk_conf);
|
kfree(new_disk_conf);
|
||||||
}
|
}
|
||||||
mutex_unlock(&first_peer_device(device)->connection->conf_update);
|
mutex_unlock(&connection->resource->conf_update);
|
||||||
return -EIO;
|
return -EIO;
|
||||||
|
|
||||||
disconnect:
|
disconnect:
|
||||||
|
@ -3521,7 +3521,7 @@ disconnect:
|
||||||
put_ldev(device);
|
put_ldev(device);
|
||||||
kfree(new_disk_conf);
|
kfree(new_disk_conf);
|
||||||
}
|
}
|
||||||
mutex_unlock(&first_peer_device(device)->connection->conf_update);
|
mutex_unlock(&connection->resource->conf_update);
|
||||||
/* just for completeness: actually not needed,
|
/* just for completeness: actually not needed,
|
||||||
* as this is not reached if csums_tfm was ok. */
|
* as this is not reached if csums_tfm was ok. */
|
||||||
crypto_free_hash(csums_tfm);
|
crypto_free_hash(csums_tfm);
|
||||||
|
@ -3601,13 +3601,13 @@ static int receive_sizes(struct drbd_connection *connection, struct packet_info
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
mutex_lock(&first_peer_device(device)->connection->conf_update);
|
mutex_lock(&connection->resource->conf_update);
|
||||||
old_disk_conf = device->ldev->disk_conf;
|
old_disk_conf = device->ldev->disk_conf;
|
||||||
*new_disk_conf = *old_disk_conf;
|
*new_disk_conf = *old_disk_conf;
|
||||||
new_disk_conf->disk_size = p_usize;
|
new_disk_conf->disk_size = p_usize;
|
||||||
|
|
||||||
rcu_assign_pointer(device->ldev->disk_conf, new_disk_conf);
|
rcu_assign_pointer(device->ldev->disk_conf, new_disk_conf);
|
||||||
mutex_unlock(&first_peer_device(device)->connection->conf_update);
|
mutex_unlock(&connection->resource->conf_update);
|
||||||
synchronize_rcu();
|
synchronize_rcu();
|
||||||
kfree(old_disk_conf);
|
kfree(old_disk_conf);
|
||||||
|
|
||||||
|
@ -3846,10 +3846,10 @@ static int receive_state(struct drbd_connection *connection, struct packet_info
|
||||||
drbd_info(device, "real peer disk state = %s\n", drbd_disk_str(real_peer_disk));
|
drbd_info(device, "real peer disk state = %s\n", drbd_disk_str(real_peer_disk));
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_lock_irq(&first_peer_device(device)->connection->req_lock);
|
spin_lock_irq(&device->resource->req_lock);
|
||||||
retry:
|
retry:
|
||||||
os = ns = drbd_read_state(device);
|
os = ns = drbd_read_state(device);
|
||||||
spin_unlock_irq(&first_peer_device(device)->connection->req_lock);
|
spin_unlock_irq(&device->resource->req_lock);
|
||||||
|
|
||||||
/* If some other part of the code (asender thread, timeout)
|
/* If some other part of the code (asender thread, timeout)
|
||||||
* already decided to close the connection again,
|
* already decided to close the connection again,
|
||||||
|
@ -3952,7 +3952,7 @@ static int receive_state(struct drbd_connection *connection, struct packet_info
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_lock_irq(&first_peer_device(device)->connection->req_lock);
|
spin_lock_irq(&device->resource->req_lock);
|
||||||
if (os.i != drbd_read_state(device).i)
|
if (os.i != drbd_read_state(device).i)
|
||||||
goto retry;
|
goto retry;
|
||||||
clear_bit(CONSIDER_RESYNC, &device->flags);
|
clear_bit(CONSIDER_RESYNC, &device->flags);
|
||||||
|
@ -3966,7 +3966,7 @@ static int receive_state(struct drbd_connection *connection, struct packet_info
|
||||||
test_bit(NEW_CUR_UUID, &device->flags)) {
|
test_bit(NEW_CUR_UUID, &device->flags)) {
|
||||||
/* Do not allow tl_restart(RESEND) for a rebooted peer. We can only allow this
|
/* Do not allow tl_restart(RESEND) for a rebooted peer. We can only allow this
|
||||||
for temporal network outages! */
|
for temporal network outages! */
|
||||||
spin_unlock_irq(&first_peer_device(device)->connection->req_lock);
|
spin_unlock_irq(&device->resource->req_lock);
|
||||||
drbd_err(device, "Aborting Connect, can not thaw IO with an only Consistent peer\n");
|
drbd_err(device, "Aborting Connect, can not thaw IO with an only Consistent peer\n");
|
||||||
tl_clear(first_peer_device(device)->connection);
|
tl_clear(first_peer_device(device)->connection);
|
||||||
drbd_uuid_new_current(device);
|
drbd_uuid_new_current(device);
|
||||||
|
@ -3976,7 +3976,7 @@ static int receive_state(struct drbd_connection *connection, struct packet_info
|
||||||
}
|
}
|
||||||
rv = _drbd_set_state(device, ns, cs_flags, NULL);
|
rv = _drbd_set_state(device, ns, cs_flags, NULL);
|
||||||
ns = drbd_read_state(device);
|
ns = drbd_read_state(device);
|
||||||
spin_unlock_irq(&first_peer_device(device)->connection->req_lock);
|
spin_unlock_irq(&device->resource->req_lock);
|
||||||
|
|
||||||
if (rv < SS_SUCCESS) {
|
if (rv < SS_SUCCESS) {
|
||||||
conn_request_state(first_peer_device(device)->connection, NS(conn, C_DISCONNECTING), CS_HARD);
|
conn_request_state(first_peer_device(device)->connection, NS(conn, C_DISCONNECTING), CS_HARD);
|
||||||
|
@ -4483,12 +4483,12 @@ static void conn_disconnect(struct drbd_connection *connection)
|
||||||
if (conn_highest_role(connection) == R_PRIMARY && conn_highest_pdsk(connection) >= D_UNKNOWN)
|
if (conn_highest_role(connection) == R_PRIMARY && conn_highest_pdsk(connection) >= D_UNKNOWN)
|
||||||
conn_try_outdate_peer_async(connection);
|
conn_try_outdate_peer_async(connection);
|
||||||
|
|
||||||
spin_lock_irq(&connection->req_lock);
|
spin_lock_irq(&connection->resource->req_lock);
|
||||||
oc = connection->cstate;
|
oc = connection->cstate;
|
||||||
if (oc >= C_UNCONNECTED)
|
if (oc >= C_UNCONNECTED)
|
||||||
_conn_request_state(connection, NS(conn, C_UNCONNECTED), CS_VERBOSE);
|
_conn_request_state(connection, NS(conn, C_UNCONNECTED), CS_VERBOSE);
|
||||||
|
|
||||||
spin_unlock_irq(&connection->req_lock);
|
spin_unlock_irq(&connection->resource->req_lock);
|
||||||
|
|
||||||
if (oc == C_DISCONNECTING)
|
if (oc == C_DISCONNECTING)
|
||||||
conn_request_state(connection, NS(conn, C_STANDALONE), CS_VERBOSE | CS_HARD);
|
conn_request_state(connection, NS(conn, C_STANDALONE), CS_VERBOSE | CS_HARD);
|
||||||
|
@ -4499,11 +4499,11 @@ static int drbd_disconnected(struct drbd_device *device)
|
||||||
unsigned int i;
|
unsigned int i;
|
||||||
|
|
||||||
/* wait for current activity to cease. */
|
/* wait for current activity to cease. */
|
||||||
spin_lock_irq(&first_peer_device(device)->connection->req_lock);
|
spin_lock_irq(&device->resource->req_lock);
|
||||||
_drbd_wait_ee_list_empty(device, &device->active_ee);
|
_drbd_wait_ee_list_empty(device, &device->active_ee);
|
||||||
_drbd_wait_ee_list_empty(device, &device->sync_ee);
|
_drbd_wait_ee_list_empty(device, &device->sync_ee);
|
||||||
_drbd_wait_ee_list_empty(device, &device->read_ee);
|
_drbd_wait_ee_list_empty(device, &device->read_ee);
|
||||||
spin_unlock_irq(&first_peer_device(device)->connection->req_lock);
|
spin_unlock_irq(&device->resource->req_lock);
|
||||||
|
|
||||||
/* We do not have data structures that would allow us to
|
/* We do not have data structures that would allow us to
|
||||||
* get the rs_pending_cnt down to 0 again.
|
* get the rs_pending_cnt down to 0 again.
|
||||||
|
@ -4970,14 +4970,14 @@ validate_req_change_req_state(struct drbd_device *device, u64 id, sector_t secto
|
||||||
struct drbd_request *req;
|
struct drbd_request *req;
|
||||||
struct bio_and_error m;
|
struct bio_and_error m;
|
||||||
|
|
||||||
spin_lock_irq(&first_peer_device(device)->connection->req_lock);
|
spin_lock_irq(&device->resource->req_lock);
|
||||||
req = find_request(device, root, id, sector, missing_ok, func);
|
req = find_request(device, root, id, sector, missing_ok, func);
|
||||||
if (unlikely(!req)) {
|
if (unlikely(!req)) {
|
||||||
spin_unlock_irq(&first_peer_device(device)->connection->req_lock);
|
spin_unlock_irq(&device->resource->req_lock);
|
||||||
return -EIO;
|
return -EIO;
|
||||||
}
|
}
|
||||||
__req_mod(req, what, &m);
|
__req_mod(req, what, &m);
|
||||||
spin_unlock_irq(&first_peer_device(device)->connection->req_lock);
|
spin_unlock_irq(&device->resource->req_lock);
|
||||||
|
|
||||||
if (m.bio)
|
if (m.bio)
|
||||||
complete_master_bio(device, &m);
|
complete_master_bio(device, &m);
|
||||||
|
@ -5218,14 +5218,14 @@ static int connection_finish_peer_reqs(struct drbd_connection *connection)
|
||||||
}
|
}
|
||||||
set_bit(SIGNAL_ASENDER, &connection->flags);
|
set_bit(SIGNAL_ASENDER, &connection->flags);
|
||||||
|
|
||||||
spin_lock_irq(&connection->req_lock);
|
spin_lock_irq(&connection->resource->req_lock);
|
||||||
idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
|
idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
|
||||||
struct drbd_device *device = peer_device->device;
|
struct drbd_device *device = peer_device->device;
|
||||||
not_empty = !list_empty(&device->done_ee);
|
not_empty = !list_empty(&device->done_ee);
|
||||||
if (not_empty)
|
if (not_empty)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
spin_unlock_irq(&connection->req_lock);
|
spin_unlock_irq(&connection->resource->req_lock);
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
} while (not_empty);
|
} while (not_empty);
|
||||||
|
|
||||||
|
|
|
@ -851,9 +851,9 @@ static void complete_conflicting_writes(struct drbd_request *req)
|
||||||
break;
|
break;
|
||||||
/* Indicate to wake up device->misc_wait on progress. */
|
/* Indicate to wake up device->misc_wait on progress. */
|
||||||
i->waiting = true;
|
i->waiting = true;
|
||||||
spin_unlock_irq(&first_peer_device(device)->connection->req_lock);
|
spin_unlock_irq(&device->resource->req_lock);
|
||||||
schedule();
|
schedule();
|
||||||
spin_lock_irq(&first_peer_device(device)->connection->req_lock);
|
spin_lock_irq(&device->resource->req_lock);
|
||||||
}
|
}
|
||||||
finish_wait(&device->misc_wait, &wait);
|
finish_wait(&device->misc_wait, &wait);
|
||||||
}
|
}
|
||||||
|
@ -1078,7 +1078,7 @@ static void drbd_send_and_submit(struct drbd_device *device, struct drbd_request
|
||||||
struct bio_and_error m = { NULL, };
|
struct bio_and_error m = { NULL, };
|
||||||
bool no_remote = false;
|
bool no_remote = false;
|
||||||
|
|
||||||
spin_lock_irq(&first_peer_device(device)->connection->req_lock);
|
spin_lock_irq(&device->resource->req_lock);
|
||||||
if (rw == WRITE) {
|
if (rw == WRITE) {
|
||||||
/* This may temporarily give up the req_lock,
|
/* This may temporarily give up the req_lock,
|
||||||
* but will re-aquire it before it returns here.
|
* but will re-aquire it before it returns here.
|
||||||
|
@ -1140,9 +1140,9 @@ static void drbd_send_and_submit(struct drbd_device *device, struct drbd_request
|
||||||
/* needs to be marked within the same spinlock */
|
/* needs to be marked within the same spinlock */
|
||||||
_req_mod(req, TO_BE_SUBMITTED);
|
_req_mod(req, TO_BE_SUBMITTED);
|
||||||
/* but we need to give up the spinlock to submit */
|
/* but we need to give up the spinlock to submit */
|
||||||
spin_unlock_irq(&first_peer_device(device)->connection->req_lock);
|
spin_unlock_irq(&device->resource->req_lock);
|
||||||
drbd_submit_req_private_bio(req);
|
drbd_submit_req_private_bio(req);
|
||||||
spin_lock_irq(&first_peer_device(device)->connection->req_lock);
|
spin_lock_irq(&device->resource->req_lock);
|
||||||
} else if (no_remote) {
|
} else if (no_remote) {
|
||||||
nodata:
|
nodata:
|
||||||
if (__ratelimit(&drbd_ratelimit_state))
|
if (__ratelimit(&drbd_ratelimit_state))
|
||||||
|
@ -1155,7 +1155,7 @@ nodata:
|
||||||
out:
|
out:
|
||||||
if (drbd_req_put_completion_ref(req, &m, 1))
|
if (drbd_req_put_completion_ref(req, &m, 1))
|
||||||
kref_put(&req->kref, drbd_req_destroy);
|
kref_put(&req->kref, drbd_req_destroy);
|
||||||
spin_unlock_irq(&first_peer_device(device)->connection->req_lock);
|
spin_unlock_irq(&device->resource->req_lock);
|
||||||
|
|
||||||
if (m.bio)
|
if (m.bio)
|
||||||
complete_master_bio(device, &m);
|
complete_master_bio(device, &m);
|
||||||
|
@ -1360,10 +1360,10 @@ void request_timer_fn(unsigned long data)
|
||||||
|
|
||||||
now = jiffies;
|
now = jiffies;
|
||||||
|
|
||||||
spin_lock_irq(&connection->req_lock);
|
spin_lock_irq(&device->resource->req_lock);
|
||||||
req = find_oldest_request(connection);
|
req = find_oldest_request(connection);
|
||||||
if (!req) {
|
if (!req) {
|
||||||
spin_unlock_irq(&connection->req_lock);
|
spin_unlock_irq(&device->resource->req_lock);
|
||||||
mod_timer(&device->request_timer, now + et);
|
mod_timer(&device->request_timer, now + et);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -1397,6 +1397,6 @@ void request_timer_fn(unsigned long data)
|
||||||
__drbd_chk_io_error(device, DRBD_FORCE_DETACH);
|
__drbd_chk_io_error(device, DRBD_FORCE_DETACH);
|
||||||
}
|
}
|
||||||
nt = (time_after(now, req->start_time + et) ? now : req->start_time) + et;
|
nt = (time_after(now, req->start_time + et) ? now : req->start_time) + et;
|
||||||
spin_unlock_irq(&connection->req_lock);
|
spin_unlock_irq(&connection->resource->req_lock);
|
||||||
mod_timer(&device->request_timer, nt);
|
mod_timer(&device->request_timer, nt);
|
||||||
}
|
}
|
||||||
|
|
|
@ -318,9 +318,9 @@ static inline int req_mod(struct drbd_request *req,
|
||||||
struct bio_and_error m;
|
struct bio_and_error m;
|
||||||
int rv;
|
int rv;
|
||||||
|
|
||||||
spin_lock_irqsave(&first_peer_device(device)->connection->req_lock, flags);
|
spin_lock_irqsave(&device->resource->req_lock, flags);
|
||||||
rv = __req_mod(req, what, &m);
|
rv = __req_mod(req, what, &m);
|
||||||
spin_unlock_irqrestore(&first_peer_device(device)->connection->req_lock, flags);
|
spin_unlock_irqrestore(&device->resource->req_lock, flags);
|
||||||
|
|
||||||
if (m.bio)
|
if (m.bio)
|
||||||
complete_master_bio(device, &m);
|
complete_master_bio(device, &m);
|
||||||
|
|
|
@ -250,10 +250,10 @@ drbd_change_state(struct drbd_device *device, enum chg_state_flags f,
|
||||||
union drbd_state ns;
|
union drbd_state ns;
|
||||||
enum drbd_state_rv rv;
|
enum drbd_state_rv rv;
|
||||||
|
|
||||||
spin_lock_irqsave(&first_peer_device(device)->connection->req_lock, flags);
|
spin_lock_irqsave(&device->resource->req_lock, flags);
|
||||||
ns = apply_mask_val(drbd_read_state(device), mask, val);
|
ns = apply_mask_val(drbd_read_state(device), mask, val);
|
||||||
rv = _drbd_set_state(device, ns, f, NULL);
|
rv = _drbd_set_state(device, ns, f, NULL);
|
||||||
spin_unlock_irqrestore(&first_peer_device(device)->connection->req_lock, flags);
|
spin_unlock_irqrestore(&device->resource->req_lock, flags);
|
||||||
|
|
||||||
return rv;
|
return rv;
|
||||||
}
|
}
|
||||||
|
@ -284,7 +284,7 @@ _req_st_cond(struct drbd_device *device, union drbd_state mask,
|
||||||
if (test_and_clear_bit(CL_ST_CHG_FAIL, &device->flags))
|
if (test_and_clear_bit(CL_ST_CHG_FAIL, &device->flags))
|
||||||
return SS_CW_FAILED_BY_PEER;
|
return SS_CW_FAILED_BY_PEER;
|
||||||
|
|
||||||
spin_lock_irqsave(&first_peer_device(device)->connection->req_lock, flags);
|
spin_lock_irqsave(&device->resource->req_lock, flags);
|
||||||
os = drbd_read_state(device);
|
os = drbd_read_state(device);
|
||||||
ns = sanitize_state(device, apply_mask_val(os, mask, val), NULL);
|
ns = sanitize_state(device, apply_mask_val(os, mask, val), NULL);
|
||||||
rv = is_valid_transition(os, ns);
|
rv = is_valid_transition(os, ns);
|
||||||
|
@ -301,7 +301,7 @@ _req_st_cond(struct drbd_device *device, union drbd_state mask,
|
||||||
rv = SS_UNKNOWN_ERROR; /* cont waiting, otherwise fail. */
|
rv = SS_UNKNOWN_ERROR; /* cont waiting, otherwise fail. */
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
spin_unlock_irqrestore(&first_peer_device(device)->connection->req_lock, flags);
|
spin_unlock_irqrestore(&device->resource->req_lock, flags);
|
||||||
|
|
||||||
return rv;
|
return rv;
|
||||||
}
|
}
|
||||||
|
@ -330,12 +330,12 @@ drbd_req_state(struct drbd_device *device, union drbd_state mask,
|
||||||
if (f & CS_SERIALIZE)
|
if (f & CS_SERIALIZE)
|
||||||
mutex_lock(device->state_mutex);
|
mutex_lock(device->state_mutex);
|
||||||
|
|
||||||
spin_lock_irqsave(&first_peer_device(device)->connection->req_lock, flags);
|
spin_lock_irqsave(&device->resource->req_lock, flags);
|
||||||
os = drbd_read_state(device);
|
os = drbd_read_state(device);
|
||||||
ns = sanitize_state(device, apply_mask_val(os, mask, val), NULL);
|
ns = sanitize_state(device, apply_mask_val(os, mask, val), NULL);
|
||||||
rv = is_valid_transition(os, ns);
|
rv = is_valid_transition(os, ns);
|
||||||
if (rv < SS_SUCCESS) {
|
if (rv < SS_SUCCESS) {
|
||||||
spin_unlock_irqrestore(&first_peer_device(device)->connection->req_lock, flags);
|
spin_unlock_irqrestore(&device->resource->req_lock, flags);
|
||||||
goto abort;
|
goto abort;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -343,7 +343,7 @@ drbd_req_state(struct drbd_device *device, union drbd_state mask,
|
||||||
rv = is_valid_state(device, ns);
|
rv = is_valid_state(device, ns);
|
||||||
if (rv == SS_SUCCESS)
|
if (rv == SS_SUCCESS)
|
||||||
rv = is_valid_soft_transition(os, ns, first_peer_device(device)->connection);
|
rv = is_valid_soft_transition(os, ns, first_peer_device(device)->connection);
|
||||||
spin_unlock_irqrestore(&first_peer_device(device)->connection->req_lock, flags);
|
spin_unlock_irqrestore(&device->resource->req_lock, flags);
|
||||||
|
|
||||||
if (rv < SS_SUCCESS) {
|
if (rv < SS_SUCCESS) {
|
||||||
if (f & CS_VERBOSE)
|
if (f & CS_VERBOSE)
|
||||||
|
@ -366,14 +366,14 @@ drbd_req_state(struct drbd_device *device, union drbd_state mask,
|
||||||
print_st_err(device, os, ns, rv);
|
print_st_err(device, os, ns, rv);
|
||||||
goto abort;
|
goto abort;
|
||||||
}
|
}
|
||||||
spin_lock_irqsave(&first_peer_device(device)->connection->req_lock, flags);
|
spin_lock_irqsave(&device->resource->req_lock, flags);
|
||||||
ns = apply_mask_val(drbd_read_state(device), mask, val);
|
ns = apply_mask_val(drbd_read_state(device), mask, val);
|
||||||
rv = _drbd_set_state(device, ns, f, &done);
|
rv = _drbd_set_state(device, ns, f, &done);
|
||||||
} else {
|
} else {
|
||||||
rv = _drbd_set_state(device, ns, f, &done);
|
rv = _drbd_set_state(device, ns, f, &done);
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_unlock_irqrestore(&first_peer_device(device)->connection->req_lock, flags);
|
spin_unlock_irqrestore(&device->resource->req_lock, flags);
|
||||||
|
|
||||||
if (f & CS_WAIT_COMPLETE && rv == SS_SUCCESS) {
|
if (f & CS_WAIT_COMPLETE && rv == SS_SUCCESS) {
|
||||||
D_ASSERT(device, current != first_peer_device(device)->connection->worker.task);
|
D_ASSERT(device, current != first_peer_device(device)->connection->worker.task);
|
||||||
|
@ -1245,7 +1245,7 @@ static void after_state_ch(struct drbd_device *device, union drbd_state os,
|
||||||
struct drbd_connection *connection = first_peer_device(device)->connection;
|
struct drbd_connection *connection = first_peer_device(device)->connection;
|
||||||
enum drbd_req_event what = NOTHING;
|
enum drbd_req_event what = NOTHING;
|
||||||
|
|
||||||
spin_lock_irq(&connection->req_lock);
|
spin_lock_irq(&device->resource->req_lock);
|
||||||
if (os.conn < C_CONNECTED && conn_lowest_conn(connection) >= C_CONNECTED)
|
if (os.conn < C_CONNECTED && conn_lowest_conn(connection) >= C_CONNECTED)
|
||||||
what = RESEND;
|
what = RESEND;
|
||||||
|
|
||||||
|
@ -1260,13 +1260,13 @@ static void after_state_ch(struct drbd_device *device, union drbd_state os,
|
||||||
(union drbd_state) { { .susp_nod = 0 } },
|
(union drbd_state) { { .susp_nod = 0 } },
|
||||||
CS_VERBOSE);
|
CS_VERBOSE);
|
||||||
}
|
}
|
||||||
spin_unlock_irq(&connection->req_lock);
|
spin_unlock_irq(&device->resource->req_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ns.susp_fen) {
|
if (ns.susp_fen) {
|
||||||
struct drbd_connection *connection = first_peer_device(device)->connection;
|
struct drbd_connection *connection = first_peer_device(device)->connection;
|
||||||
|
|
||||||
spin_lock_irq(&connection->req_lock);
|
spin_lock_irq(&device->resource->req_lock);
|
||||||
if (connection->susp_fen && conn_lowest_conn(connection) >= C_CONNECTED) {
|
if (connection->susp_fen && conn_lowest_conn(connection) >= C_CONNECTED) {
|
||||||
/* case2: The connection was established again: */
|
/* case2: The connection was established again: */
|
||||||
struct drbd_peer_device *peer_device;
|
struct drbd_peer_device *peer_device;
|
||||||
|
@ -1282,7 +1282,7 @@ static void after_state_ch(struct drbd_device *device, union drbd_state os,
|
||||||
(union drbd_state) { { .susp_fen = 0 } },
|
(union drbd_state) { { .susp_fen = 0 } },
|
||||||
CS_VERBOSE);
|
CS_VERBOSE);
|
||||||
}
|
}
|
||||||
spin_unlock_irq(&connection->req_lock);
|
spin_unlock_irq(&device->resource->req_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Became sync source. With protocol >= 96, we still need to send out
|
/* Became sync source. With protocol >= 96, we still need to send out
|
||||||
|
@ -1555,13 +1555,13 @@ static int w_after_conn_state_ch(struct drbd_work *w, int unused)
|
||||||
if (oc == C_DISCONNECTING && ns_max.conn == C_STANDALONE) {
|
if (oc == C_DISCONNECTING && ns_max.conn == C_STANDALONE) {
|
||||||
struct net_conf *old_conf;
|
struct net_conf *old_conf;
|
||||||
|
|
||||||
mutex_lock(&connection->conf_update);
|
mutex_lock(&connection->resource->conf_update);
|
||||||
old_conf = connection->net_conf;
|
old_conf = connection->net_conf;
|
||||||
connection->my_addr_len = 0;
|
connection->my_addr_len = 0;
|
||||||
connection->peer_addr_len = 0;
|
connection->peer_addr_len = 0;
|
||||||
rcu_assign_pointer(connection->net_conf, NULL);
|
rcu_assign_pointer(connection->net_conf, NULL);
|
||||||
conn_free_crypto(connection);
|
conn_free_crypto(connection);
|
||||||
mutex_unlock(&connection->conf_update);
|
mutex_unlock(&connection->resource->conf_update);
|
||||||
|
|
||||||
synchronize_rcu();
|
synchronize_rcu();
|
||||||
kfree(old_conf);
|
kfree(old_conf);
|
||||||
|
@ -1579,13 +1579,13 @@ static int w_after_conn_state_ch(struct drbd_work *w, int unused)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
spin_lock_irq(&connection->req_lock);
|
spin_lock_irq(&connection->resource->req_lock);
|
||||||
_tl_restart(connection, CONNECTION_LOST_WHILE_PENDING);
|
_tl_restart(connection, CONNECTION_LOST_WHILE_PENDING);
|
||||||
_conn_request_state(connection,
|
_conn_request_state(connection,
|
||||||
(union drbd_state) { { .susp_fen = 1 } },
|
(union drbd_state) { { .susp_fen = 1 } },
|
||||||
(union drbd_state) { { .susp_fen = 0 } },
|
(union drbd_state) { { .susp_fen = 0 } },
|
||||||
CS_VERBOSE);
|
CS_VERBOSE);
|
||||||
spin_unlock_irq(&connection->req_lock);
|
spin_unlock_irq(&connection->resource->req_lock);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
kref_put(&connection->kref, drbd_destroy_connection);
|
kref_put(&connection->kref, drbd_destroy_connection);
|
||||||
|
@ -1802,7 +1802,7 @@ _conn_request_state(struct drbd_connection *connection, union drbd_state mask, u
|
||||||
/* This will be a cluster-wide state change.
|
/* This will be a cluster-wide state change.
|
||||||
* Need to give up the spinlock, grab the mutex,
|
* Need to give up the spinlock, grab the mutex,
|
||||||
* then send the state change request, ... */
|
* then send the state change request, ... */
|
||||||
spin_unlock_irq(&connection->req_lock);
|
spin_unlock_irq(&connection->resource->req_lock);
|
||||||
mutex_lock(&connection->cstate_mutex);
|
mutex_lock(&connection->cstate_mutex);
|
||||||
have_mutex = true;
|
have_mutex = true;
|
||||||
|
|
||||||
|
@ -1821,10 +1821,10 @@ _conn_request_state(struct drbd_connection *connection, union drbd_state mask, u
|
||||||
/* ... and re-aquire the spinlock.
|
/* ... and re-aquire the spinlock.
|
||||||
* If _conn_rq_cond() returned >= SS_SUCCESS, we must call
|
* If _conn_rq_cond() returned >= SS_SUCCESS, we must call
|
||||||
* conn_set_state() within the same spinlock. */
|
* conn_set_state() within the same spinlock. */
|
||||||
spin_lock_irq(&connection->req_lock);
|
spin_lock_irq(&connection->resource->req_lock);
|
||||||
wait_event_lock_irq(connection->ping_wait,
|
wait_event_lock_irq(connection->ping_wait,
|
||||||
(rv = _conn_rq_cond(connection, mask, val)),
|
(rv = _conn_rq_cond(connection, mask, val)),
|
||||||
connection->req_lock);
|
connection->resource->req_lock);
|
||||||
clear_bit(CONN_WD_ST_CHG_REQ, &connection->flags);
|
clear_bit(CONN_WD_ST_CHG_REQ, &connection->flags);
|
||||||
if (rv < SS_SUCCESS)
|
if (rv < SS_SUCCESS)
|
||||||
goto abort;
|
goto abort;
|
||||||
|
@ -1853,10 +1853,10 @@ _conn_request_state(struct drbd_connection *connection, union drbd_state mask, u
|
||||||
if (have_mutex) {
|
if (have_mutex) {
|
||||||
/* mutex_unlock() "... must not be used in interrupt context.",
|
/* mutex_unlock() "... must not be used in interrupt context.",
|
||||||
* so give up the spinlock, then re-aquire it */
|
* so give up the spinlock, then re-aquire it */
|
||||||
spin_unlock_irq(&connection->req_lock);
|
spin_unlock_irq(&connection->resource->req_lock);
|
||||||
abort_unlocked:
|
abort_unlocked:
|
||||||
mutex_unlock(&connection->cstate_mutex);
|
mutex_unlock(&connection->cstate_mutex);
|
||||||
spin_lock_irq(&connection->req_lock);
|
spin_lock_irq(&connection->resource->req_lock);
|
||||||
}
|
}
|
||||||
if (rv < SS_SUCCESS && flags & CS_VERBOSE) {
|
if (rv < SS_SUCCESS && flags & CS_VERBOSE) {
|
||||||
drbd_err(connection, "State change failed: %s\n", drbd_set_st_err_str(rv));
|
drbd_err(connection, "State change failed: %s\n", drbd_set_st_err_str(rv));
|
||||||
|
@ -1872,9 +1872,9 @@ conn_request_state(struct drbd_connection *connection, union drbd_state mask, un
|
||||||
{
|
{
|
||||||
enum drbd_state_rv rv;
|
enum drbd_state_rv rv;
|
||||||
|
|
||||||
spin_lock_irq(&connection->req_lock);
|
spin_lock_irq(&connection->resource->req_lock);
|
||||||
rv = _conn_request_state(connection, mask, val, flags);
|
rv = _conn_request_state(connection, mask, val, flags);
|
||||||
spin_unlock_irq(&connection->req_lock);
|
spin_unlock_irq(&connection->resource->req_lock);
|
||||||
|
|
||||||
return rv;
|
return rv;
|
||||||
}
|
}
|
||||||
|
|
|
@ -102,14 +102,14 @@ static void drbd_endio_read_sec_final(struct drbd_peer_request *peer_req) __rele
|
||||||
unsigned long flags = 0;
|
unsigned long flags = 0;
|
||||||
struct drbd_device *device = peer_req->w.device;
|
struct drbd_device *device = peer_req->w.device;
|
||||||
|
|
||||||
spin_lock_irqsave(&first_peer_device(device)->connection->req_lock, flags);
|
spin_lock_irqsave(&device->resource->req_lock, flags);
|
||||||
device->read_cnt += peer_req->i.size >> 9;
|
device->read_cnt += peer_req->i.size >> 9;
|
||||||
list_del(&peer_req->w.list);
|
list_del(&peer_req->w.list);
|
||||||
if (list_empty(&device->read_ee))
|
if (list_empty(&device->read_ee))
|
||||||
wake_up(&device->ee_wait);
|
wake_up(&device->ee_wait);
|
||||||
if (test_bit(__EE_WAS_ERROR, &peer_req->flags))
|
if (test_bit(__EE_WAS_ERROR, &peer_req->flags))
|
||||||
__drbd_chk_io_error(device, DRBD_READ_ERROR);
|
__drbd_chk_io_error(device, DRBD_READ_ERROR);
|
||||||
spin_unlock_irqrestore(&first_peer_device(device)->connection->req_lock, flags);
|
spin_unlock_irqrestore(&device->resource->req_lock, flags);
|
||||||
|
|
||||||
drbd_queue_work(&first_peer_device(device)->connection->sender_work, &peer_req->w);
|
drbd_queue_work(&first_peer_device(device)->connection->sender_work, &peer_req->w);
|
||||||
put_ldev(device);
|
put_ldev(device);
|
||||||
|
@ -134,7 +134,7 @@ static void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req) __rel
|
||||||
do_al_complete_io = peer_req->flags & EE_CALL_AL_COMPLETE_IO;
|
do_al_complete_io = peer_req->flags & EE_CALL_AL_COMPLETE_IO;
|
||||||
block_id = peer_req->block_id;
|
block_id = peer_req->block_id;
|
||||||
|
|
||||||
spin_lock_irqsave(&first_peer_device(device)->connection->req_lock, flags);
|
spin_lock_irqsave(&device->resource->req_lock, flags);
|
||||||
device->writ_cnt += peer_req->i.size >> 9;
|
device->writ_cnt += peer_req->i.size >> 9;
|
||||||
list_move_tail(&peer_req->w.list, &device->done_ee);
|
list_move_tail(&peer_req->w.list, &device->done_ee);
|
||||||
|
|
||||||
|
@ -150,7 +150,7 @@ static void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req) __rel
|
||||||
|
|
||||||
if (test_bit(__EE_WAS_ERROR, &peer_req->flags))
|
if (test_bit(__EE_WAS_ERROR, &peer_req->flags))
|
||||||
__drbd_chk_io_error(device, DRBD_WRITE_ERROR);
|
__drbd_chk_io_error(device, DRBD_WRITE_ERROR);
|
||||||
spin_unlock_irqrestore(&first_peer_device(device)->connection->req_lock, flags);
|
spin_unlock_irqrestore(&device->resource->req_lock, flags);
|
||||||
|
|
||||||
if (block_id == ID_SYNCER)
|
if (block_id == ID_SYNCER)
|
||||||
drbd_rs_complete_io(device, i.sector);
|
drbd_rs_complete_io(device, i.sector);
|
||||||
|
@ -273,9 +273,9 @@ void drbd_request_endio(struct bio *bio, int error)
|
||||||
req->private_bio = ERR_PTR(error);
|
req->private_bio = ERR_PTR(error);
|
||||||
|
|
||||||
/* not req_mod(), we need irqsave here! */
|
/* not req_mod(), we need irqsave here! */
|
||||||
spin_lock_irqsave(&first_peer_device(device)->connection->req_lock, flags);
|
spin_lock_irqsave(&device->resource->req_lock, flags);
|
||||||
__req_mod(req, what, &m);
|
__req_mod(req, what, &m);
|
||||||
spin_unlock_irqrestore(&first_peer_device(device)->connection->req_lock, flags);
|
spin_unlock_irqrestore(&device->resource->req_lock, flags);
|
||||||
put_ldev(device);
|
put_ldev(device);
|
||||||
|
|
||||||
if (m.bio)
|
if (m.bio)
|
||||||
|
@ -397,9 +397,9 @@ static int read_for_csum(struct drbd_device *device, sector_t sector, int size)
|
||||||
goto defer;
|
goto defer;
|
||||||
|
|
||||||
peer_req->w.cb = w_e_send_csum;
|
peer_req->w.cb = w_e_send_csum;
|
||||||
spin_lock_irq(&first_peer_device(device)->connection->req_lock);
|
spin_lock_irq(&device->resource->req_lock);
|
||||||
list_add(&peer_req->w.list, &device->read_ee);
|
list_add(&peer_req->w.list, &device->read_ee);
|
||||||
spin_unlock_irq(&first_peer_device(device)->connection->req_lock);
|
spin_unlock_irq(&device->resource->req_lock);
|
||||||
|
|
||||||
atomic_add(size >> 9, &device->rs_sect_ev);
|
atomic_add(size >> 9, &device->rs_sect_ev);
|
||||||
if (drbd_submit_peer_request(device, peer_req, READ, DRBD_FAULT_RS_RD) == 0)
|
if (drbd_submit_peer_request(device, peer_req, READ, DRBD_FAULT_RS_RD) == 0)
|
||||||
|
@ -409,9 +409,9 @@ static int read_for_csum(struct drbd_device *device, sector_t sector, int size)
|
||||||
* because bio_add_page failed (probably broken lower level driver),
|
* because bio_add_page failed (probably broken lower level driver),
|
||||||
* retry may or may not help.
|
* retry may or may not help.
|
||||||
* If it does not, you may need to force disconnect. */
|
* If it does not, you may need to force disconnect. */
|
||||||
spin_lock_irq(&first_peer_device(device)->connection->req_lock);
|
spin_lock_irq(&device->resource->req_lock);
|
||||||
list_del(&peer_req->w.list);
|
list_del(&peer_req->w.list);
|
||||||
spin_unlock_irq(&first_peer_device(device)->connection->req_lock);
|
spin_unlock_irq(&device->resource->req_lock);
|
||||||
|
|
||||||
drbd_free_peer_req(device, peer_req);
|
drbd_free_peer_req(device, peer_req);
|
||||||
defer:
|
defer:
|
||||||
|
@ -855,7 +855,7 @@ int drbd_resync_finished(struct drbd_device *device)
|
||||||
|
|
||||||
ping_peer(device);
|
ping_peer(device);
|
||||||
|
|
||||||
spin_lock_irq(&first_peer_device(device)->connection->req_lock);
|
spin_lock_irq(&device->resource->req_lock);
|
||||||
os = drbd_read_state(device);
|
os = drbd_read_state(device);
|
||||||
|
|
||||||
verify_done = (os.conn == C_VERIFY_S || os.conn == C_VERIFY_T);
|
verify_done = (os.conn == C_VERIFY_S || os.conn == C_VERIFY_T);
|
||||||
|
@ -944,7 +944,7 @@ int drbd_resync_finished(struct drbd_device *device)
|
||||||
|
|
||||||
_drbd_set_state(device, ns, CS_VERBOSE, NULL);
|
_drbd_set_state(device, ns, CS_VERBOSE, NULL);
|
||||||
out_unlock:
|
out_unlock:
|
||||||
spin_unlock_irq(&first_peer_device(device)->connection->req_lock);
|
spin_unlock_irq(&device->resource->req_lock);
|
||||||
put_ldev(device);
|
put_ldev(device);
|
||||||
out:
|
out:
|
||||||
device->rs_total = 0;
|
device->rs_total = 0;
|
||||||
|
@ -971,9 +971,9 @@ static void move_to_net_ee_or_free(struct drbd_device *device, struct drbd_peer_
|
||||||
int i = (peer_req->i.size + PAGE_SIZE -1) >> PAGE_SHIFT;
|
int i = (peer_req->i.size + PAGE_SIZE -1) >> PAGE_SHIFT;
|
||||||
atomic_add(i, &device->pp_in_use_by_net);
|
atomic_add(i, &device->pp_in_use_by_net);
|
||||||
atomic_sub(i, &device->pp_in_use);
|
atomic_sub(i, &device->pp_in_use);
|
||||||
spin_lock_irq(&first_peer_device(device)->connection->req_lock);
|
spin_lock_irq(&device->resource->req_lock);
|
||||||
list_add_tail(&peer_req->w.list, &device->net_ee);
|
list_add_tail(&peer_req->w.list, &device->net_ee);
|
||||||
spin_unlock_irq(&first_peer_device(device)->connection->req_lock);
|
spin_unlock_irq(&device->resource->req_lock);
|
||||||
wake_up(&drbd_pp_wait);
|
wake_up(&drbd_pp_wait);
|
||||||
} else
|
} else
|
||||||
drbd_free_peer_req(device, peer_req);
|
drbd_free_peer_req(device, peer_req);
|
||||||
|
@ -1847,7 +1847,7 @@ static void wait_for_work(struct drbd_connection *connection, struct list_head *
|
||||||
for (;;) {
|
for (;;) {
|
||||||
int send_barrier;
|
int send_barrier;
|
||||||
prepare_to_wait(&connection->sender_work.q_wait, &wait, TASK_INTERRUPTIBLE);
|
prepare_to_wait(&connection->sender_work.q_wait, &wait, TASK_INTERRUPTIBLE);
|
||||||
spin_lock_irq(&connection->req_lock);
|
spin_lock_irq(&connection->resource->req_lock);
|
||||||
spin_lock(&connection->sender_work.q_lock); /* FIXME get rid of this one? */
|
spin_lock(&connection->sender_work.q_lock); /* FIXME get rid of this one? */
|
||||||
/* dequeue single item only,
|
/* dequeue single item only,
|
||||||
* we still use drbd_queue_work_front() in some places */
|
* we still use drbd_queue_work_front() in some places */
|
||||||
|
@ -1855,11 +1855,11 @@ static void wait_for_work(struct drbd_connection *connection, struct list_head *
|
||||||
list_move(connection->sender_work.q.next, work_list);
|
list_move(connection->sender_work.q.next, work_list);
|
||||||
spin_unlock(&connection->sender_work.q_lock); /* FIXME get rid of this one? */
|
spin_unlock(&connection->sender_work.q_lock); /* FIXME get rid of this one? */
|
||||||
if (!list_empty(work_list) || signal_pending(current)) {
|
if (!list_empty(work_list) || signal_pending(current)) {
|
||||||
spin_unlock_irq(&connection->req_lock);
|
spin_unlock_irq(&connection->resource->req_lock);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
send_barrier = need_to_send_barrier(connection);
|
send_barrier = need_to_send_barrier(connection);
|
||||||
spin_unlock_irq(&connection->req_lock);
|
spin_unlock_irq(&connection->resource->req_lock);
|
||||||
if (send_barrier) {
|
if (send_barrier) {
|
||||||
drbd_send_barrier(connection);
|
drbd_send_barrier(connection);
|
||||||
connection->send.current_epoch_nr++;
|
connection->send.current_epoch_nr++;
|
||||||
|
|
Loading…
Reference in New Issue