hwspinlock/core: use a mutex to protect the radix tree
Since we're using non-atomic radix tree allocations, we should be protecting the tree using a mutex and not a spinlock. Non-atomic allocations and process context locking is good enough, as the tree is manipulated only when locks are registered/ unregistered/requested/freed. The locks themselves are still protected by spinlocks of course, and mutexes are not involved in the locking/unlocking paths. Cc: <stable@kernel.org> Signed-off-by: Juan Gutierrez <jgutierrez@ti.com> [ohad@wizery.com: rewrite the commit log, #include mutex.h, add minor commentary] [ohad@wizery.com: update register/unregister parts in hwspinlock.txt] Signed-off-by: Ohad Ben-Cohen <ohad@wizery.com>
This commit is contained in:
parent
c3c1250e93
commit
93b465c2e1
|
@ -39,23 +39,20 @@ independent, drivers.
|
||||||
in case an unused hwspinlock isn't available. Users of this
|
in case an unused hwspinlock isn't available. Users of this
|
||||||
API will usually want to communicate the lock's id to the remote core
|
API will usually want to communicate the lock's id to the remote core
|
||||||
before it can be used to achieve synchronization.
|
before it can be used to achieve synchronization.
|
||||||
Can be called from an atomic context (this function will not sleep) but
|
Should be called from a process context (might sleep).
|
||||||
not from within interrupt context.
|
|
||||||
|
|
||||||
struct hwspinlock *hwspin_lock_request_specific(unsigned int id);
|
struct hwspinlock *hwspin_lock_request_specific(unsigned int id);
|
||||||
- assign a specific hwspinlock id and return its address, or NULL
|
- assign a specific hwspinlock id and return its address, or NULL
|
||||||
if that hwspinlock is already in use. Usually board code will
|
if that hwspinlock is already in use. Usually board code will
|
||||||
be calling this function in order to reserve specific hwspinlock
|
be calling this function in order to reserve specific hwspinlock
|
||||||
ids for predefined purposes.
|
ids for predefined purposes.
|
||||||
Can be called from an atomic context (this function will not sleep) but
|
Should be called from a process context (might sleep).
|
||||||
not from within interrupt context.
|
|
||||||
|
|
||||||
int hwspin_lock_free(struct hwspinlock *hwlock);
|
int hwspin_lock_free(struct hwspinlock *hwlock);
|
||||||
- free a previously-assigned hwspinlock; returns 0 on success, or an
|
- free a previously-assigned hwspinlock; returns 0 on success, or an
|
||||||
appropriate error code on failure (e.g. -EINVAL if the hwspinlock
|
appropriate error code on failure (e.g. -EINVAL if the hwspinlock
|
||||||
is already free).
|
is already free).
|
||||||
Can be called from an atomic context (this function will not sleep) but
|
Should be called from a process context (might sleep).
|
||||||
not from within interrupt context.
|
|
||||||
|
|
||||||
int hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int timeout);
|
int hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int timeout);
|
||||||
- lock a previously-assigned hwspinlock with a timeout limit (specified in
|
- lock a previously-assigned hwspinlock with a timeout limit (specified in
|
||||||
|
@ -232,15 +229,14 @@ int hwspinlock_example2(void)
|
||||||
|
|
||||||
int hwspin_lock_register(struct hwspinlock *hwlock);
|
int hwspin_lock_register(struct hwspinlock *hwlock);
|
||||||
- to be called from the underlying platform-specific implementation, in
|
- to be called from the underlying platform-specific implementation, in
|
||||||
order to register a new hwspinlock instance. Can be called from an atomic
|
order to register a new hwspinlock instance. Should be called from
|
||||||
context (this function will not sleep) but not from within interrupt
|
a process context (this function might sleep).
|
||||||
context. Returns 0 on success, or appropriate error code on failure.
|
Returns 0 on success, or appropriate error code on failure.
|
||||||
|
|
||||||
struct hwspinlock *hwspin_lock_unregister(unsigned int id);
|
struct hwspinlock *hwspin_lock_unregister(unsigned int id);
|
||||||
- to be called from the underlying vendor-specific implementation, in order
|
- to be called from the underlying vendor-specific implementation, in order
|
||||||
to unregister an existing (and unused) hwspinlock instance.
|
to unregister an existing (and unused) hwspinlock instance.
|
||||||
Can be called from an atomic context (will not sleep) but not from
|
Should be called from a process context (this function might sleep).
|
||||||
within interrupt context.
|
|
||||||
Returns the address of hwspinlock on success, or NULL on error (e.g.
|
Returns the address of hwspinlock on success, or NULL on error (e.g.
|
||||||
if the hwspinlock is sill in use).
|
if the hwspinlock is sill in use).
|
||||||
|
|
||||||
|
|
|
@ -26,6 +26,7 @@
|
||||||
#include <linux/radix-tree.h>
|
#include <linux/radix-tree.h>
|
||||||
#include <linux/hwspinlock.h>
|
#include <linux/hwspinlock.h>
|
||||||
#include <linux/pm_runtime.h>
|
#include <linux/pm_runtime.h>
|
||||||
|
#include <linux/mutex.h>
|
||||||
|
|
||||||
#include "hwspinlock_internal.h"
|
#include "hwspinlock_internal.h"
|
||||||
|
|
||||||
|
@ -52,10 +53,12 @@
|
||||||
static RADIX_TREE(hwspinlock_tree, GFP_KERNEL);
|
static RADIX_TREE(hwspinlock_tree, GFP_KERNEL);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Synchronization of access to the tree is achieved using this spinlock,
|
* Synchronization of access to the tree is achieved using this mutex,
|
||||||
* as the radix-tree API requires that users provide all synchronisation.
|
* as the radix-tree API requires that users provide all synchronisation.
|
||||||
|
* A mutex is needed because we're using non-atomic radix tree allocations.
|
||||||
*/
|
*/
|
||||||
static DEFINE_SPINLOCK(hwspinlock_tree_lock);
|
static DEFINE_MUTEX(hwspinlock_tree_lock);
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* __hwspin_trylock() - attempt to lock a specific hwspinlock
|
* __hwspin_trylock() - attempt to lock a specific hwspinlock
|
||||||
|
@ -261,8 +264,7 @@ EXPORT_SYMBOL_GPL(__hwspin_unlock);
|
||||||
* This function should be called from the underlying platform-specific
|
* This function should be called from the underlying platform-specific
|
||||||
* implementation, to register a new hwspinlock instance.
|
* implementation, to register a new hwspinlock instance.
|
||||||
*
|
*
|
||||||
* Can be called from an atomic context (will not sleep) but not from
|
* Should be called from a process context (might sleep)
|
||||||
* within interrupt context.
|
|
||||||
*
|
*
|
||||||
* Returns 0 on success, or an appropriate error code on failure
|
* Returns 0 on success, or an appropriate error code on failure
|
||||||
*/
|
*/
|
||||||
|
@ -279,7 +281,7 @@ int hwspin_lock_register(struct hwspinlock *hwlock)
|
||||||
|
|
||||||
spin_lock_init(&hwlock->lock);
|
spin_lock_init(&hwlock->lock);
|
||||||
|
|
||||||
spin_lock(&hwspinlock_tree_lock);
|
mutex_lock(&hwspinlock_tree_lock);
|
||||||
|
|
||||||
ret = radix_tree_insert(&hwspinlock_tree, hwlock->id, hwlock);
|
ret = radix_tree_insert(&hwspinlock_tree, hwlock->id, hwlock);
|
||||||
if (ret == -EEXIST)
|
if (ret == -EEXIST)
|
||||||
|
@ -295,7 +297,7 @@ int hwspin_lock_register(struct hwspinlock *hwlock)
|
||||||
WARN_ON(tmp != hwlock);
|
WARN_ON(tmp != hwlock);
|
||||||
|
|
||||||
out:
|
out:
|
||||||
spin_unlock(&hwspinlock_tree_lock);
|
mutex_unlock(&hwspinlock_tree_lock);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(hwspin_lock_register);
|
EXPORT_SYMBOL_GPL(hwspin_lock_register);
|
||||||
|
@ -307,8 +309,7 @@ EXPORT_SYMBOL_GPL(hwspin_lock_register);
|
||||||
* This function should be called from the underlying platform-specific
|
* This function should be called from the underlying platform-specific
|
||||||
* implementation, to unregister an existing (and unused) hwspinlock.
|
* implementation, to unregister an existing (and unused) hwspinlock.
|
||||||
*
|
*
|
||||||
* Can be called from an atomic context (will not sleep) but not from
|
* Should be called from a process context (might sleep)
|
||||||
* within interrupt context.
|
|
||||||
*
|
*
|
||||||
* Returns the address of hwspinlock @id on success, or NULL on failure
|
* Returns the address of hwspinlock @id on success, or NULL on failure
|
||||||
*/
|
*/
|
||||||
|
@ -317,7 +318,7 @@ struct hwspinlock *hwspin_lock_unregister(unsigned int id)
|
||||||
struct hwspinlock *hwlock = NULL;
|
struct hwspinlock *hwlock = NULL;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
spin_lock(&hwspinlock_tree_lock);
|
mutex_lock(&hwspinlock_tree_lock);
|
||||||
|
|
||||||
/* make sure the hwspinlock is not in use (tag is set) */
|
/* make sure the hwspinlock is not in use (tag is set) */
|
||||||
ret = radix_tree_tag_get(&hwspinlock_tree, id, HWSPINLOCK_UNUSED);
|
ret = radix_tree_tag_get(&hwspinlock_tree, id, HWSPINLOCK_UNUSED);
|
||||||
|
@ -333,7 +334,7 @@ struct hwspinlock *hwspin_lock_unregister(unsigned int id)
|
||||||
}
|
}
|
||||||
|
|
||||||
out:
|
out:
|
||||||
spin_unlock(&hwspinlock_tree_lock);
|
mutex_unlock(&hwspinlock_tree_lock);
|
||||||
return hwlock;
|
return hwlock;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(hwspin_lock_unregister);
|
EXPORT_SYMBOL_GPL(hwspin_lock_unregister);
|
||||||
|
@ -402,9 +403,7 @@ EXPORT_SYMBOL_GPL(hwspin_lock_get_id);
|
||||||
* to the remote core before it can be used for synchronization (to get the
|
* to the remote core before it can be used for synchronization (to get the
|
||||||
* id of a given hwlock, use hwspin_lock_get_id()).
|
* id of a given hwlock, use hwspin_lock_get_id()).
|
||||||
*
|
*
|
||||||
* Can be called from an atomic context (will not sleep) but not from
|
* Should be called from a process context (might sleep)
|
||||||
* within interrupt context (simply because there is no use case for
|
|
||||||
* that yet).
|
|
||||||
*
|
*
|
||||||
* Returns the address of the assigned hwspinlock, or NULL on error
|
* Returns the address of the assigned hwspinlock, or NULL on error
|
||||||
*/
|
*/
|
||||||
|
@ -413,7 +412,7 @@ struct hwspinlock *hwspin_lock_request(void)
|
||||||
struct hwspinlock *hwlock;
|
struct hwspinlock *hwlock;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
spin_lock(&hwspinlock_tree_lock);
|
mutex_lock(&hwspinlock_tree_lock);
|
||||||
|
|
||||||
/* look for an unused lock */
|
/* look for an unused lock */
|
||||||
ret = radix_tree_gang_lookup_tag(&hwspinlock_tree, (void **)&hwlock,
|
ret = radix_tree_gang_lookup_tag(&hwspinlock_tree, (void **)&hwlock,
|
||||||
|
@ -433,7 +432,7 @@ struct hwspinlock *hwspin_lock_request(void)
|
||||||
hwlock = NULL;
|
hwlock = NULL;
|
||||||
|
|
||||||
out:
|
out:
|
||||||
spin_unlock(&hwspinlock_tree_lock);
|
mutex_unlock(&hwspinlock_tree_lock);
|
||||||
return hwlock;
|
return hwlock;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(hwspin_lock_request);
|
EXPORT_SYMBOL_GPL(hwspin_lock_request);
|
||||||
|
@ -447,9 +446,7 @@ EXPORT_SYMBOL_GPL(hwspin_lock_request);
|
||||||
* Usually early board code will be calling this function in order to
|
* Usually early board code will be calling this function in order to
|
||||||
* reserve specific hwspinlock ids for predefined purposes.
|
* reserve specific hwspinlock ids for predefined purposes.
|
||||||
*
|
*
|
||||||
* Can be called from an atomic context (will not sleep) but not from
|
* Should be called from a process context (might sleep)
|
||||||
* within interrupt context (simply because there is no use case for
|
|
||||||
* that yet).
|
|
||||||
*
|
*
|
||||||
* Returns the address of the assigned hwspinlock, or NULL on error
|
* Returns the address of the assigned hwspinlock, or NULL on error
|
||||||
*/
|
*/
|
||||||
|
@ -458,7 +455,7 @@ struct hwspinlock *hwspin_lock_request_specific(unsigned int id)
|
||||||
struct hwspinlock *hwlock;
|
struct hwspinlock *hwlock;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
spin_lock(&hwspinlock_tree_lock);
|
mutex_lock(&hwspinlock_tree_lock);
|
||||||
|
|
||||||
/* make sure this hwspinlock exists */
|
/* make sure this hwspinlock exists */
|
||||||
hwlock = radix_tree_lookup(&hwspinlock_tree, id);
|
hwlock = radix_tree_lookup(&hwspinlock_tree, id);
|
||||||
|
@ -484,7 +481,7 @@ struct hwspinlock *hwspin_lock_request_specific(unsigned int id)
|
||||||
hwlock = NULL;
|
hwlock = NULL;
|
||||||
|
|
||||||
out:
|
out:
|
||||||
spin_unlock(&hwspinlock_tree_lock);
|
mutex_unlock(&hwspinlock_tree_lock);
|
||||||
return hwlock;
|
return hwlock;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(hwspin_lock_request_specific);
|
EXPORT_SYMBOL_GPL(hwspin_lock_request_specific);
|
||||||
|
@ -497,9 +494,7 @@ EXPORT_SYMBOL_GPL(hwspin_lock_request_specific);
|
||||||
* Should only be called with an @hwlock that was retrieved from
|
* Should only be called with an @hwlock that was retrieved from
|
||||||
* an earlier call to omap_hwspin_lock_request{_specific}.
|
* an earlier call to omap_hwspin_lock_request{_specific}.
|
||||||
*
|
*
|
||||||
* Can be called from an atomic context (will not sleep) but not from
|
* Should be called from a process context (might sleep)
|
||||||
* within interrupt context (simply because there is no use case for
|
|
||||||
* that yet).
|
|
||||||
*
|
*
|
||||||
* Returns 0 on success, or an appropriate error code on failure
|
* Returns 0 on success, or an appropriate error code on failure
|
||||||
*/
|
*/
|
||||||
|
@ -513,7 +508,7 @@ int hwspin_lock_free(struct hwspinlock *hwlock)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_lock(&hwspinlock_tree_lock);
|
mutex_lock(&hwspinlock_tree_lock);
|
||||||
|
|
||||||
/* make sure the hwspinlock is used */
|
/* make sure the hwspinlock is used */
|
||||||
ret = radix_tree_tag_get(&hwspinlock_tree, hwlock->id,
|
ret = radix_tree_tag_get(&hwspinlock_tree, hwlock->id,
|
||||||
|
@ -540,7 +535,7 @@ int hwspin_lock_free(struct hwspinlock *hwlock)
|
||||||
module_put(hwlock->dev->driver->owner);
|
module_put(hwlock->dev->driver->owner);
|
||||||
|
|
||||||
out:
|
out:
|
||||||
spin_unlock(&hwspinlock_tree_lock);
|
mutex_unlock(&hwspinlock_tree_lock);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(hwspin_lock_free);
|
EXPORT_SYMBOL_GPL(hwspin_lock_free);
|
||||||
|
|
Loading…
Reference in New Issue