mirror of https://github.com/openzfs/zfs.git
Add a "try" operation for range locks
zfs_rangelock_tryenter() bails immediately instead of waiting for the lock to become available. This will be used to resolve a deadlock in the FreeBSD page-in code. No functional change intended. Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov> Reviewed-by: Ryan Moeller <ryan@ixsystems.com> Signed-off-by: Mark Johnston <markj@FreeBSD.org> Closes #10519
This commit is contained in:
parent
a4b0a74c7f
commit
6e00561712
|
@ -71,6 +71,8 @@ void zfs_rangelock_fini(zfs_rangelock_t *);
|
||||||
|
|
||||||
zfs_locked_range_t *zfs_rangelock_enter(zfs_rangelock_t *,
|
zfs_locked_range_t *zfs_rangelock_enter(zfs_rangelock_t *,
|
||||||
uint64_t, uint64_t, zfs_rangelock_type_t);
|
uint64_t, uint64_t, zfs_rangelock_type_t);
|
||||||
|
zfs_locked_range_t *zfs_rangelock_tryenter(zfs_rangelock_t *,
|
||||||
|
uint64_t, uint64_t, zfs_rangelock_type_t);
|
||||||
void zfs_rangelock_exit(zfs_locked_range_t *);
|
void zfs_rangelock_exit(zfs_locked_range_t *);
|
||||||
void zfs_rangelock_reduce(zfs_locked_range_t *, uint64_t, uint64_t);
|
void zfs_rangelock_reduce(zfs_locked_range_t *, uint64_t, uint64_t);
|
||||||
|
|
||||||
|
|
|
@ -150,10 +150,12 @@ zfs_rangelock_fini(zfs_rangelock_t *rl)
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Check if a write lock can be grabbed, or wait and recheck until available.
|
* Check if a write lock can be grabbed. If not, fail immediately or sleep and
|
||||||
|
* recheck until available, depending on the value of the "nonblock" parameter.
|
||||||
*/
|
*/
|
||||||
static void
|
static boolean_t
|
||||||
zfs_rangelock_enter_writer(zfs_rangelock_t *rl, zfs_locked_range_t *new)
|
zfs_rangelock_enter_writer(zfs_rangelock_t *rl, zfs_locked_range_t *new,
|
||||||
|
boolean_t nonblock)
|
||||||
{
|
{
|
||||||
avl_tree_t *tree = &rl->rl_tree;
|
avl_tree_t *tree = &rl->rl_tree;
|
||||||
zfs_locked_range_t *lr;
|
zfs_locked_range_t *lr;
|
||||||
|
@ -183,7 +185,7 @@ zfs_rangelock_enter_writer(zfs_rangelock_t *rl, zfs_locked_range_t *new)
|
||||||
*/
|
*/
|
||||||
if (avl_numnodes(tree) == 0) {
|
if (avl_numnodes(tree) == 0) {
|
||||||
avl_add(tree, new);
|
avl_add(tree, new);
|
||||||
return;
|
return (B_TRUE);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -204,8 +206,10 @@ zfs_rangelock_enter_writer(zfs_rangelock_t *rl, zfs_locked_range_t *new)
|
||||||
goto wait;
|
goto wait;
|
||||||
|
|
||||||
avl_insert(tree, new, where);
|
avl_insert(tree, new, where);
|
||||||
return;
|
return (B_TRUE);
|
||||||
wait:
|
wait:
|
||||||
|
if (nonblock)
|
||||||
|
return (B_FALSE);
|
||||||
if (!lr->lr_write_wanted) {
|
if (!lr->lr_write_wanted) {
|
||||||
cv_init(&lr->lr_write_cv, NULL, CV_DEFAULT, NULL);
|
cv_init(&lr->lr_write_cv, NULL, CV_DEFAULT, NULL);
|
||||||
lr->lr_write_wanted = B_TRUE;
|
lr->lr_write_wanted = B_TRUE;
|
||||||
|
@ -391,10 +395,12 @@ zfs_rangelock_add_reader(avl_tree_t *tree, zfs_locked_range_t *new,
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Check if a reader lock can be grabbed, or wait and recheck until available.
|
* Check if a reader lock can be grabbed. If not, fail immediately or sleep and
|
||||||
|
* recheck until available, depending on the value of the "nonblock" parameter.
|
||||||
*/
|
*/
|
||||||
static void
|
static boolean_t
|
||||||
zfs_rangelock_enter_reader(zfs_rangelock_t *rl, zfs_locked_range_t *new)
|
zfs_rangelock_enter_reader(zfs_rangelock_t *rl, zfs_locked_range_t *new,
|
||||||
|
boolean_t nonblock)
|
||||||
{
|
{
|
||||||
avl_tree_t *tree = &rl->rl_tree;
|
avl_tree_t *tree = &rl->rl_tree;
|
||||||
zfs_locked_range_t *prev, *next;
|
zfs_locked_range_t *prev, *next;
|
||||||
|
@ -415,6 +421,8 @@ retry:
|
||||||
*/
|
*/
|
||||||
if (prev && (off < prev->lr_offset + prev->lr_length)) {
|
if (prev && (off < prev->lr_offset + prev->lr_length)) {
|
||||||
if ((prev->lr_type == RL_WRITER) || (prev->lr_write_wanted)) {
|
if ((prev->lr_type == RL_WRITER) || (prev->lr_write_wanted)) {
|
||||||
|
if (nonblock)
|
||||||
|
return (B_FALSE);
|
||||||
if (!prev->lr_read_wanted) {
|
if (!prev->lr_read_wanted) {
|
||||||
cv_init(&prev->lr_read_cv,
|
cv_init(&prev->lr_read_cv,
|
||||||
NULL, CV_DEFAULT, NULL);
|
NULL, CV_DEFAULT, NULL);
|
||||||
|
@ -439,6 +447,8 @@ retry:
|
||||||
if (off + len <= next->lr_offset)
|
if (off + len <= next->lr_offset)
|
||||||
goto got_lock;
|
goto got_lock;
|
||||||
if ((next->lr_type == RL_WRITER) || (next->lr_write_wanted)) {
|
if ((next->lr_type == RL_WRITER) || (next->lr_write_wanted)) {
|
||||||
|
if (nonblock)
|
||||||
|
return (B_FALSE);
|
||||||
if (!next->lr_read_wanted) {
|
if (!next->lr_read_wanted) {
|
||||||
cv_init(&next->lr_read_cv,
|
cv_init(&next->lr_read_cv,
|
||||||
NULL, CV_DEFAULT, NULL);
|
NULL, CV_DEFAULT, NULL);
|
||||||
|
@ -457,6 +467,7 @@ got_lock:
|
||||||
* locks and bumping ref counts (r_count).
|
* locks and bumping ref counts (r_count).
|
||||||
*/
|
*/
|
||||||
zfs_rangelock_add_reader(tree, new, prev, where);
|
zfs_rangelock_add_reader(tree, new, prev, where);
|
||||||
|
return (B_TRUE);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -464,11 +475,12 @@ got_lock:
|
||||||
* (RL_WRITER or RL_APPEND). If RL_APPEND is specified, rl_cb() will convert
|
* (RL_WRITER or RL_APPEND). If RL_APPEND is specified, rl_cb() will convert
|
||||||
* it to a RL_WRITER lock (with the offset at the end of the file). Returns
|
* it to a RL_WRITER lock (with the offset at the end of the file). Returns
|
||||||
* the range lock structure for later unlocking (or reduce range if the
|
* the range lock structure for later unlocking (or reduce range if the
|
||||||
* entire file is locked as RL_WRITER).
|
* entire file is locked as RL_WRITER), or NULL if nonblock is true and the
|
||||||
|
* lock could not be acquired immediately.
|
||||||
*/
|
*/
|
||||||
zfs_locked_range_t *
|
static zfs_locked_range_t *
|
||||||
zfs_rangelock_enter(zfs_rangelock_t *rl, uint64_t off, uint64_t len,
|
zfs_rangelock_enter_impl(zfs_rangelock_t *rl, uint64_t off, uint64_t len,
|
||||||
zfs_rangelock_type_t type)
|
zfs_rangelock_type_t type, boolean_t nonblock)
|
||||||
{
|
{
|
||||||
zfs_locked_range_t *new;
|
zfs_locked_range_t *new;
|
||||||
|
|
||||||
|
@ -491,18 +503,34 @@ zfs_rangelock_enter(zfs_rangelock_t *rl, uint64_t off, uint64_t len,
|
||||||
/*
|
/*
|
||||||
* First check for the usual case of no locks
|
* First check for the usual case of no locks
|
||||||
*/
|
*/
|
||||||
if (avl_numnodes(&rl->rl_tree) == 0)
|
if (avl_numnodes(&rl->rl_tree) == 0) {
|
||||||
avl_add(&rl->rl_tree, new);
|
avl_add(&rl->rl_tree, new);
|
||||||
else
|
} else if (!zfs_rangelock_enter_reader(rl, new, nonblock)) {
|
||||||
zfs_rangelock_enter_reader(rl, new);
|
kmem_free(new, sizeof (*new));
|
||||||
} else {
|
new = NULL;
|
||||||
/* RL_WRITER or RL_APPEND */
|
}
|
||||||
zfs_rangelock_enter_writer(rl, new);
|
} else if (!zfs_rangelock_enter_writer(rl, new, nonblock)) {
|
||||||
|
kmem_free(new, sizeof (*new));
|
||||||
|
new = NULL;
|
||||||
}
|
}
|
||||||
mutex_exit(&rl->rl_lock);
|
mutex_exit(&rl->rl_lock);
|
||||||
return (new);
|
return (new);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
zfs_locked_range_t *
|
||||||
|
zfs_rangelock_enter(zfs_rangelock_t *rl, uint64_t off, uint64_t len,
|
||||||
|
zfs_rangelock_type_t type)
|
||||||
|
{
|
||||||
|
return (zfs_rangelock_enter_impl(rl, off, len, type, B_FALSE));
|
||||||
|
}
|
||||||
|
|
||||||
|
zfs_locked_range_t *
|
||||||
|
zfs_rangelock_tryenter(zfs_rangelock_t *rl, uint64_t off, uint64_t len,
|
||||||
|
zfs_rangelock_type_t type)
|
||||||
|
{
|
||||||
|
return (zfs_rangelock_enter_impl(rl, off, len, type, B_TRUE));
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Safely free the zfs_locked_range_t.
|
* Safely free the zfs_locked_range_t.
|
||||||
*/
|
*/
|
||||||
|
@ -657,6 +685,7 @@ zfs_rangelock_reduce(zfs_locked_range_t *lr, uint64_t off, uint64_t len)
|
||||||
EXPORT_SYMBOL(zfs_rangelock_init);
|
EXPORT_SYMBOL(zfs_rangelock_init);
|
||||||
EXPORT_SYMBOL(zfs_rangelock_fini);
|
EXPORT_SYMBOL(zfs_rangelock_fini);
|
||||||
EXPORT_SYMBOL(zfs_rangelock_enter);
|
EXPORT_SYMBOL(zfs_rangelock_enter);
|
||||||
|
EXPORT_SYMBOL(zfs_rangelock_tryenter);
|
||||||
EXPORT_SYMBOL(zfs_rangelock_exit);
|
EXPORT_SYMBOL(zfs_rangelock_exit);
|
||||||
EXPORT_SYMBOL(zfs_rangelock_reduce);
|
EXPORT_SYMBOL(zfs_rangelock_reduce);
|
||||||
#endif
|
#endif
|
||||||
|
|
Loading…
Reference in New Issue