dlm: make plock operation killable
Allow processes blocked on plock requests to be interrupted when they are killed. This leaves the problem of cleaning up the lock state in userspace. This has three parts: 1. Add a flag to unlock operations sent to userspace indicating the file is being closed. Userspace will then look for and clear any waiting plock operations that were abandoned by an interrupted process. 2. Queue an unlock-close operation (like in 1) to clean up userspace from an interrupted plock request. This is needed because the vfs will not send a cleanup-unlock if it sees no locks on the file, which it won't if the interrupted operation was the only one. 3. Do not use replies from userspace for unlock-close operations because they are unnecessary (they are just cleaning up for the process which did not make an unlock call). This also simplifies the new unlock-close generated from point 2. Signed-off-by: David Teigland <teigland@redhat.com>
This commit is contained in:
parent
2a7ce0edd6
commit
901025d2f3
|
@ -71,6 +71,36 @@ static void send_op(struct plock_op *op)
|
||||||
wake_up(&send_wq);
|
wake_up(&send_wq);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* If a process was killed while waiting for the only plock on a file,
|
||||||
|
locks_remove_posix will not see any lock on the file so it won't
|
||||||
|
send an unlock-close to us to pass on to userspace to clean up the
|
||||||
|
abandoned waiter. So, we have to insert the unlock-close when the
|
||||||
|
lock call is interrupted. */
|
||||||
|
|
||||||
|
static void do_unlock_close(struct dlm_ls *ls, u64 number,
|
||||||
|
struct file *file, struct file_lock *fl)
|
||||||
|
{
|
||||||
|
struct plock_op *op;
|
||||||
|
|
||||||
|
op = kzalloc(sizeof(*op), GFP_NOFS);
|
||||||
|
if (!op)
|
||||||
|
return;
|
||||||
|
|
||||||
|
op->info.optype = DLM_PLOCK_OP_UNLOCK;
|
||||||
|
op->info.pid = fl->fl_pid;
|
||||||
|
op->info.fsid = ls->ls_global_id;
|
||||||
|
op->info.number = number;
|
||||||
|
op->info.start = 0;
|
||||||
|
op->info.end = OFFSET_MAX;
|
||||||
|
if (fl->fl_lmops && fl->fl_lmops->fl_grant)
|
||||||
|
op->info.owner = (__u64) fl->fl_pid;
|
||||||
|
else
|
||||||
|
op->info.owner = (__u64)(long) fl->fl_owner;
|
||||||
|
|
||||||
|
op->info.flags |= DLM_PLOCK_FL_CLOSE;
|
||||||
|
send_op(op);
|
||||||
|
}
|
||||||
|
|
||||||
int dlm_posix_lock(dlm_lockspace_t *lockspace, u64 number, struct file *file,
|
int dlm_posix_lock(dlm_lockspace_t *lockspace, u64 number, struct file *file,
|
||||||
int cmd, struct file_lock *fl)
|
int cmd, struct file_lock *fl)
|
||||||
{
|
{
|
||||||
|
@ -114,9 +144,19 @@ int dlm_posix_lock(dlm_lockspace_t *lockspace, u64 number, struct file *file,
|
||||||
|
|
||||||
send_op(op);
|
send_op(op);
|
||||||
|
|
||||||
if (xop->callback == NULL)
|
if (xop->callback == NULL) {
|
||||||
wait_event(recv_wq, (op->done != 0));
|
rv = wait_event_killable(recv_wq, (op->done != 0));
|
||||||
else {
|
if (rv == -ERESTARTSYS) {
|
||||||
|
log_debug(ls, "dlm_posix_lock: wait killed %llx",
|
||||||
|
(unsigned long long)number);
|
||||||
|
spin_lock(&ops_lock);
|
||||||
|
list_del(&op->list);
|
||||||
|
spin_unlock(&ops_lock);
|
||||||
|
kfree(xop);
|
||||||
|
do_unlock_close(ls, number, file, fl);
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
rv = FILE_LOCK_DEFERRED;
|
rv = FILE_LOCK_DEFERRED;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
@ -233,6 +273,13 @@ int dlm_posix_unlock(dlm_lockspace_t *lockspace, u64 number, struct file *file,
|
||||||
else
|
else
|
||||||
op->info.owner = (__u64)(long) fl->fl_owner;
|
op->info.owner = (__u64)(long) fl->fl_owner;
|
||||||
|
|
||||||
|
if (fl->fl_flags & FL_CLOSE) {
|
||||||
|
op->info.flags |= DLM_PLOCK_FL_CLOSE;
|
||||||
|
send_op(op);
|
||||||
|
rv = 0;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
send_op(op);
|
send_op(op);
|
||||||
wait_event(recv_wq, (op->done != 0));
|
wait_event(recv_wq, (op->done != 0));
|
||||||
|
|
||||||
|
@ -334,7 +381,10 @@ static ssize_t dev_read(struct file *file, char __user *u, size_t count,
|
||||||
spin_lock(&ops_lock);
|
spin_lock(&ops_lock);
|
||||||
if (!list_empty(&send_list)) {
|
if (!list_empty(&send_list)) {
|
||||||
op = list_entry(send_list.next, struct plock_op, list);
|
op = list_entry(send_list.next, struct plock_op, list);
|
||||||
list_move(&op->list, &recv_list);
|
if (op->info.flags & DLM_PLOCK_FL_CLOSE)
|
||||||
|
list_del(&op->list);
|
||||||
|
else
|
||||||
|
list_move(&op->list, &recv_list);
|
||||||
memcpy(&info, &op->info, sizeof(info));
|
memcpy(&info, &op->info, sizeof(info));
|
||||||
}
|
}
|
||||||
spin_unlock(&ops_lock);
|
spin_unlock(&ops_lock);
|
||||||
|
@ -342,6 +392,13 @@ static ssize_t dev_read(struct file *file, char __user *u, size_t count,
|
||||||
if (!op)
|
if (!op)
|
||||||
return -EAGAIN;
|
return -EAGAIN;
|
||||||
|
|
||||||
|
/* there is no need to get a reply from userspace for unlocks
|
||||||
|
that were generated by the vfs cleaning up for a close
|
||||||
|
(the process did not make an unlock call). */
|
||||||
|
|
||||||
|
if (op->info.flags & DLM_PLOCK_FL_CLOSE)
|
||||||
|
kfree(op);
|
||||||
|
|
||||||
if (copy_to_user(u, &info, sizeof(info)))
|
if (copy_to_user(u, &info, sizeof(info)))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
return sizeof(info);
|
return sizeof(info);
|
||||||
|
|
|
@ -14,7 +14,7 @@
|
||||||
#define DLM_PLOCK_MISC_NAME "dlm_plock"
|
#define DLM_PLOCK_MISC_NAME "dlm_plock"
|
||||||
|
|
||||||
#define DLM_PLOCK_VERSION_MAJOR 1
|
#define DLM_PLOCK_VERSION_MAJOR 1
|
||||||
#define DLM_PLOCK_VERSION_MINOR 1
|
#define DLM_PLOCK_VERSION_MINOR 2
|
||||||
#define DLM_PLOCK_VERSION_PATCH 0
|
#define DLM_PLOCK_VERSION_PATCH 0
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
|
@ -23,12 +23,14 @@ enum {
|
||||||
DLM_PLOCK_OP_GET,
|
DLM_PLOCK_OP_GET,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#define DLM_PLOCK_FL_CLOSE 1
|
||||||
|
|
||||||
struct dlm_plock_info {
|
struct dlm_plock_info {
|
||||||
__u32 version[3];
|
__u32 version[3];
|
||||||
__u8 optype;
|
__u8 optype;
|
||||||
__u8 ex;
|
__u8 ex;
|
||||||
__u8 wait;
|
__u8 wait;
|
||||||
__u8 pad;
|
__u8 flags;
|
||||||
__u32 pid;
|
__u32 pid;
|
||||||
__s32 nodeid;
|
__s32 nodeid;
|
||||||
__s32 rv;
|
__s32 rv;
|
||||||
|
|
Loading…
Reference in New Issue