Add down_timeout and change ACPI to use it
ACPI currently emulates a timeout for semaphores with calls to down_trylock and sleep. This produces horrible behaviour in terms of fairness and excessive wakeups. Now that we have a unified semaphore implementation, adding a real down_trylock is almost trivial. Signed-off-by: Matthew Wilcox <willy@linux.intel.com>
This commit is contained in:
parent
f06d968658
commit
f1241c87a1
|
@ -4,6 +4,8 @@
|
||||||
* Copyright (C) 2000 Andrew Henroid
|
* Copyright (C) 2000 Andrew Henroid
|
||||||
* Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
|
* Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
|
||||||
* Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
|
* Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
|
||||||
|
* Copyright (c) 2008 Intel Corporation
|
||||||
|
* Author: Matthew Wilcox <willy@linux.intel.com>
|
||||||
*
|
*
|
||||||
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
*
|
*
|
||||||
|
@ -37,15 +39,18 @@
|
||||||
#include <linux/workqueue.h>
|
#include <linux/workqueue.h>
|
||||||
#include <linux/nmi.h>
|
#include <linux/nmi.h>
|
||||||
#include <linux/acpi.h>
|
#include <linux/acpi.h>
|
||||||
#include <acpi/acpi.h>
|
|
||||||
#include <asm/io.h>
|
|
||||||
#include <acpi/acpi_bus.h>
|
|
||||||
#include <acpi/processor.h>
|
|
||||||
#include <asm/uaccess.h>
|
|
||||||
|
|
||||||
#include <linux/efi.h>
|
#include <linux/efi.h>
|
||||||
#include <linux/ioport.h>
|
#include <linux/ioport.h>
|
||||||
#include <linux/list.h>
|
#include <linux/list.h>
|
||||||
|
#include <linux/jiffies.h>
|
||||||
|
#include <linux/semaphore.h>
|
||||||
|
|
||||||
|
#include <asm/io.h>
|
||||||
|
#include <asm/uaccess.h>
|
||||||
|
|
||||||
|
#include <acpi/acpi.h>
|
||||||
|
#include <acpi/acpi_bus.h>
|
||||||
|
#include <acpi/processor.h>
|
||||||
|
|
||||||
#define _COMPONENT ACPI_OS_SERVICES
|
#define _COMPONENT ACPI_OS_SERVICES
|
||||||
ACPI_MODULE_NAME("osl");
|
ACPI_MODULE_NAME("osl");
|
||||||
|
@ -764,7 +769,6 @@ acpi_os_create_semaphore(u32 max_units, u32 initial_units, acpi_handle * handle)
|
||||||
{
|
{
|
||||||
struct semaphore *sem = NULL;
|
struct semaphore *sem = NULL;
|
||||||
|
|
||||||
|
|
||||||
sem = acpi_os_allocate(sizeof(struct semaphore));
|
sem = acpi_os_allocate(sizeof(struct semaphore));
|
||||||
if (!sem)
|
if (!sem)
|
||||||
return AE_NO_MEMORY;
|
return AE_NO_MEMORY;
|
||||||
|
@ -791,12 +795,12 @@ acpi_status acpi_os_delete_semaphore(acpi_handle handle)
|
||||||
{
|
{
|
||||||
struct semaphore *sem = (struct semaphore *)handle;
|
struct semaphore *sem = (struct semaphore *)handle;
|
||||||
|
|
||||||
|
|
||||||
if (!sem)
|
if (!sem)
|
||||||
return AE_BAD_PARAMETER;
|
return AE_BAD_PARAMETER;
|
||||||
|
|
||||||
ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Deleting semaphore[%p].\n", handle));
|
ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Deleting semaphore[%p].\n", handle));
|
||||||
|
|
||||||
|
BUG_ON(!list_empty(&sem->wait_list));
|
||||||
kfree(sem);
|
kfree(sem);
|
||||||
sem = NULL;
|
sem = NULL;
|
||||||
|
|
||||||
|
@ -804,21 +808,15 @@ acpi_status acpi_os_delete_semaphore(acpi_handle handle)
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* TODO: The kernel doesn't have a 'down_timeout' function -- had to
|
|
||||||
* improvise. The process is to sleep for one scheduler quantum
|
|
||||||
* until the semaphore becomes available. Downside is that this
|
|
||||||
* may result in starvation for timeout-based waits when there's
|
|
||||||
* lots of semaphore activity.
|
|
||||||
*
|
|
||||||
* TODO: Support for units > 1?
|
* TODO: Support for units > 1?
|
||||||
*/
|
*/
|
||||||
acpi_status acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 timeout)
|
acpi_status acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 timeout)
|
||||||
{
|
{
|
||||||
acpi_status status = AE_OK;
|
acpi_status status = AE_OK;
|
||||||
struct semaphore *sem = (struct semaphore *)handle;
|
struct semaphore *sem = (struct semaphore *)handle;
|
||||||
|
long jiffies;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
|
|
||||||
if (!sem || (units < 1))
|
if (!sem || (units < 1))
|
||||||
return AE_BAD_PARAMETER;
|
return AE_BAD_PARAMETER;
|
||||||
|
|
||||||
|
@ -828,58 +826,14 @@ acpi_status acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 timeout)
|
||||||
ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Waiting for semaphore[%p|%d|%d]\n",
|
ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Waiting for semaphore[%p|%d|%d]\n",
|
||||||
handle, units, timeout));
|
handle, units, timeout));
|
||||||
|
|
||||||
/*
|
if (timeout == ACPI_WAIT_FOREVER)
|
||||||
* This can be called during resume with interrupts off.
|
jiffies = MAX_SCHEDULE_TIMEOUT;
|
||||||
* Like boot-time, we should be single threaded and will
|
else
|
||||||
* always get the lock if we try -- timeout or not.
|
jiffies = msecs_to_jiffies(timeout);
|
||||||
* If this doesn't succeed, then we will oops courtesy of
|
|
||||||
* might_sleep() in down().
|
ret = down_timeout(sem, jiffies);
|
||||||
*/
|
if (ret)
|
||||||
if (!down_trylock(sem))
|
status = AE_TIME;
|
||||||
return AE_OK;
|
|
||||||
|
|
||||||
switch (timeout) {
|
|
||||||
/*
|
|
||||||
* No Wait:
|
|
||||||
* --------
|
|
||||||
* A zero timeout value indicates that we shouldn't wait - just
|
|
||||||
* acquire the semaphore if available otherwise return AE_TIME
|
|
||||||
* (a.k.a. 'would block').
|
|
||||||
*/
|
|
||||||
case 0:
|
|
||||||
if (down_trylock(sem))
|
|
||||||
status = AE_TIME;
|
|
||||||
break;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Wait Indefinitely:
|
|
||||||
* ------------------
|
|
||||||
*/
|
|
||||||
case ACPI_WAIT_FOREVER:
|
|
||||||
down(sem);
|
|
||||||
break;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Wait w/ Timeout:
|
|
||||||
* ----------------
|
|
||||||
*/
|
|
||||||
default:
|
|
||||||
// TODO: A better timeout algorithm?
|
|
||||||
{
|
|
||||||
int i = 0;
|
|
||||||
static const int quantum_ms = 1000 / HZ;
|
|
||||||
|
|
||||||
ret = down_trylock(sem);
|
|
||||||
for (i = timeout; (i > 0 && ret != 0); i -= quantum_ms) {
|
|
||||||
schedule_timeout_interruptible(1);
|
|
||||||
ret = down_trylock(sem);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (ret != 0)
|
|
||||||
status = AE_TIME;
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (ACPI_FAILURE(status)) {
|
if (ACPI_FAILURE(status)) {
|
||||||
ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
|
ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
|
||||||
|
@ -902,7 +856,6 @@ acpi_status acpi_os_signal_semaphore(acpi_handle handle, u32 units)
|
||||||
{
|
{
|
||||||
struct semaphore *sem = (struct semaphore *)handle;
|
struct semaphore *sem = (struct semaphore *)handle;
|
||||||
|
|
||||||
|
|
||||||
if (!sem || (units < 1))
|
if (!sem || (units < 1))
|
||||||
return AE_BAD_PARAMETER;
|
return AE_BAD_PARAMETER;
|
||||||
|
|
||||||
|
|
|
@ -74,6 +74,12 @@ extern int __must_check down_killable(struct semaphore *sem);
|
||||||
*/
|
*/
|
||||||
extern int __must_check down_trylock(struct semaphore *sem);
|
extern int __must_check down_trylock(struct semaphore *sem);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* As down(), except this function will return -ETIME if it fails to
|
||||||
|
* acquire the semaphore within the specified number of jiffies.
|
||||||
|
*/
|
||||||
|
extern int __must_check down_timeout(struct semaphore *sem, long jiffies);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Release the semaphore. Unlike mutexes, up() may be called from any
|
* Release the semaphore. Unlike mutexes, up() may be called from any
|
||||||
* context and even by tasks which have never called down().
|
* context and even by tasks which have never called down().
|
||||||
|
|
|
@ -35,6 +35,7 @@
|
||||||
static noinline void __down(struct semaphore *sem);
|
static noinline void __down(struct semaphore *sem);
|
||||||
static noinline int __down_interruptible(struct semaphore *sem);
|
static noinline int __down_interruptible(struct semaphore *sem);
|
||||||
static noinline int __down_killable(struct semaphore *sem);
|
static noinline int __down_killable(struct semaphore *sem);
|
||||||
|
static noinline int __down_timeout(struct semaphore *sem, long jiffies);
|
||||||
static noinline void __up(struct semaphore *sem);
|
static noinline void __up(struct semaphore *sem);
|
||||||
|
|
||||||
void down(struct semaphore *sem)
|
void down(struct semaphore *sem)
|
||||||
|
@ -104,6 +105,20 @@ int down_trylock(struct semaphore *sem)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(down_trylock);
|
EXPORT_SYMBOL(down_trylock);
|
||||||
|
|
||||||
|
int down_timeout(struct semaphore *sem, long jiffies)
|
||||||
|
{
|
||||||
|
unsigned long flags;
|
||||||
|
int result = 0;
|
||||||
|
|
||||||
|
spin_lock_irqsave(&sem->lock, flags);
|
||||||
|
if (unlikely(sem->count-- <= 0))
|
||||||
|
result = __down_timeout(sem, jiffies);
|
||||||
|
spin_unlock_irqrestore(&sem->lock, flags);
|
||||||
|
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(down_timeout);
|
||||||
|
|
||||||
void up(struct semaphore *sem)
|
void up(struct semaphore *sem)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
@ -142,10 +157,12 @@ static noinline void __sched __up_down_common(struct semaphore *sem)
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Because this function is inlined, the 'state' parameter will be constant,
|
* Because this function is inlined, the 'state' parameter will be
|
||||||
* and thus optimised away by the compiler.
|
* constant, and thus optimised away by the compiler. Likewise the
|
||||||
|
* 'timeout' parameter for the cases without timeouts.
|
||||||
*/
|
*/
|
||||||
static inline int __sched __down_common(struct semaphore *sem, long state)
|
static inline int __sched __down_common(struct semaphore *sem, long state,
|
||||||
|
long timeout)
|
||||||
{
|
{
|
||||||
int result = 0;
|
int result = 0;
|
||||||
struct task_struct *task = current;
|
struct task_struct *task = current;
|
||||||
|
@ -160,14 +177,20 @@ static inline int __sched __down_common(struct semaphore *sem, long state)
|
||||||
goto interrupted;
|
goto interrupted;
|
||||||
if (state == TASK_KILLABLE && fatal_signal_pending(task))
|
if (state == TASK_KILLABLE && fatal_signal_pending(task))
|
||||||
goto interrupted;
|
goto interrupted;
|
||||||
|
if (timeout <= 0)
|
||||||
|
goto timed_out;
|
||||||
__set_task_state(task, state);
|
__set_task_state(task, state);
|
||||||
spin_unlock_irq(&sem->lock);
|
spin_unlock_irq(&sem->lock);
|
||||||
schedule();
|
timeout = schedule_timeout(timeout);
|
||||||
spin_lock_irq(&sem->lock);
|
spin_lock_irq(&sem->lock);
|
||||||
if (waiter.up)
|
if (waiter.up)
|
||||||
goto woken;
|
goto woken;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
timed_out:
|
||||||
|
list_del(&waiter.list);
|
||||||
|
result = -ETIME;
|
||||||
|
goto woken;
|
||||||
interrupted:
|
interrupted:
|
||||||
list_del(&waiter.list);
|
list_del(&waiter.list);
|
||||||
result = -EINTR;
|
result = -EINTR;
|
||||||
|
@ -187,17 +210,22 @@ static inline int __sched __down_common(struct semaphore *sem, long state)
|
||||||
|
|
||||||
static noinline void __sched __down(struct semaphore *sem)
|
static noinline void __sched __down(struct semaphore *sem)
|
||||||
{
|
{
|
||||||
__down_common(sem, TASK_UNINTERRUPTIBLE);
|
__down_common(sem, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
|
||||||
}
|
}
|
||||||
|
|
||||||
static noinline int __sched __down_interruptible(struct semaphore *sem)
|
static noinline int __sched __down_interruptible(struct semaphore *sem)
|
||||||
{
|
{
|
||||||
return __down_common(sem, TASK_INTERRUPTIBLE);
|
return __down_common(sem, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
|
||||||
}
|
}
|
||||||
|
|
||||||
static noinline int __sched __down_killable(struct semaphore *sem)
|
static noinline int __sched __down_killable(struct semaphore *sem)
|
||||||
{
|
{
|
||||||
return __down_common(sem, TASK_KILLABLE);
|
return __down_common(sem, TASK_KILLABLE, MAX_SCHEDULE_TIMEOUT);
|
||||||
|
}
|
||||||
|
|
||||||
|
static noinline int __sched __down_timeout(struct semaphore *sem, long jiffies)
|
||||||
|
{
|
||||||
|
return __down_common(sem, TASK_UNINTERRUPTIBLE, jiffies);
|
||||||
}
|
}
|
||||||
|
|
||||||
static noinline void __sched __up(struct semaphore *sem)
|
static noinline void __sched __up(struct semaphore *sem)
|
||||||
|
|
Loading…
Reference in New Issue