hwspinlock updates for v5.3
This contains support for hardware spinlock TI K3 AM65x and J721E family of SoCs, support for using hwspinlocks from atomic context and better error reporting when dealing with hardware disabled in DeviceTree. -----BEGIN PGP SIGNATURE----- iQJPBAABCAA5FiEEBd4DzF816k8JZtUlCx85Pw2ZrcUFAl0uHkobHGJqb3JuLmFu ZGVyc3NvbkBsaW5hcm8ub3JnAAoJEAsfOT8Nma3Fm0MP/ibvaJ82nOKumQGrOjom CAI2U/nf1AWbm3Rhy+u3zVelKpd9iLKjsUj2fF42HBARZWXGfxEJUL7fH50Whp+n lGDgD5r1vE+wp4aqHycRZJEKWUnKTLA/2ddK9QUfljtvdItZJ1MBoKu/V3nhELZd wfK7vLVbeSIcK2ChVYFZtNhFRmRRN24K4RzcANzzxNrP7+xzqZuJEwmpKKibeMCN M/LI0XcE95s4+O3e5mcTOOtEyGqbsNrwOwQquYVOFBNbzKHN2HN/9dPAXHA4/K0y N8zseMeV52dTtaaK8tNLcY7XNJp8r326TN8N/ufmU8oO+KCj3cx1juBSeS4pze1m U4+vFS9B8aRdeB6a6+W4ZdJpJVSJvTKDJMSB2vbvXJAQfklrGlaHQRmv+OBoPVMj vUwWB10hJZMrONfGxN8KwG7wzFx7yIol2u2KmJcSfFVAJVVA/YBaulZDl6jkzyeB GDrgrGnd7iYGa2LhPVHZ/xwyCb0WsRQMXx2gkNegxwlF+nnfTrNMKUJ9eTps4WUn o80/khuegIzyieym9SB+f7BCRO1xC26FhcGuHrKJXK2Rfcf6LJv+b1B0Q+hwL8UQ 2DsImsH9VnWV7lIaXvbQh8Fsr361xuxJ2Lm3dszbtfEUuhLSyMuJvhyPO+FPSggZ C51xGa/x71EqBWvKh2YheJSo =Dz9r -----END PGP SIGNATURE----- Merge tag 'hwlock-v5.3' of git://github.com/andersson/remoteproc Pull hwspinlock updates from Bjorn Andersson: "This contains support for hardware spinlock TI K3 AM65x and J721E family of SoCs, support for using hwspinlocks from atomic context and better error reporting when dealing with hardware disabled in DeviceTree" * tag 'hwlock-v5.3' of git://github.com/andersson/remoteproc: hwspinlock: add the 'in_atomic' API hwspinlock: document the hwspinlock 'raw' API hwspinlock: stm32: implement the relax() ops hwspinlock: ignore disabled device hwspinlock/omap: Add a trace during probe hwspinlock/omap: Add support for TI K3 SoCs dt-bindings: hwlock: Update OMAP binding for TI K3 SoCs
This commit is contained in:
commit
57ab5f7402
|
@ -1,12 +1,16 @@
|
||||||
OMAP4+ HwSpinlock Driver
|
TI HwSpinlock for OMAP and K3 based SoCs
|
||||||
========================
|
=========================================
|
||||||
|
|
||||||
Required properties:
|
Required properties:
|
||||||
- compatible: Should be "ti,omap4-hwspinlock" for
|
- compatible: Should be one of the following,
|
||||||
OMAP44xx, OMAP54xx, AM33xx, AM43xx, DRA7xx SoCs
|
"ti,omap4-hwspinlock" for
|
||||||
|
OMAP44xx, OMAP54xx, AM33xx, AM43xx, DRA7xx SoCs
|
||||||
|
"ti,am654-hwspinlock" for
|
||||||
|
K3 AM65x and J721E SoCs
|
||||||
- reg: Contains the hwspinlock module register address space
|
- reg: Contains the hwspinlock module register address space
|
||||||
(base address and length)
|
(base address and length)
|
||||||
- ti,hwmods: Name of the hwmod associated with the hwspinlock device
|
- ti,hwmods: Name of the hwmod associated with the hwspinlock device
|
||||||
|
(for OMAP architecture based SoCs only)
|
||||||
- #hwlock-cells: Should be 1. The OMAP hwspinlock users will use a
|
- #hwlock-cells: Should be 1. The OMAP hwspinlock users will use a
|
||||||
0-indexed relative hwlock number as the argument
|
0-indexed relative hwlock number as the argument
|
||||||
specifier value for requesting a specific hwspinlock
|
specifier value for requesting a specific hwspinlock
|
||||||
|
@ -17,10 +21,21 @@ Please look at the generic hwlock binding for usage information for consumers,
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
|
|
||||||
/* OMAP4 */
|
1. OMAP4 SoCs
|
||||||
hwspinlock: spinlock@4a0f6000 {
|
hwspinlock: spinlock@4a0f6000 {
|
||||||
compatible = "ti,omap4-hwspinlock";
|
compatible = "ti,omap4-hwspinlock";
|
||||||
reg = <0x4a0f6000 0x1000>;
|
reg = <0x4a0f6000 0x1000>;
|
||||||
ti,hwmods = "spinlock";
|
ti,hwmods = "spinlock";
|
||||||
#hwlock-cells = <1>;
|
#hwlock-cells = <1>;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
2. AM65x SoCs and J721E SoCs
|
||||||
|
&cbass_main {
|
||||||
|
cbass_main_navss: interconnect0 {
|
||||||
|
hwspinlock: spinlock@30e00000 {
|
||||||
|
compatible = "ti,am654-hwspinlock";
|
||||||
|
reg = <0x00 0x30e00000 0x00 0x1000>;
|
||||||
|
#hwlock-cells = <1>;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
|
@ -134,6 +134,39 @@ notably -ETIMEDOUT if the hwspinlock is still busy after timeout msecs).
|
||||||
|
|
||||||
The function will never sleep.
|
The function will never sleep.
|
||||||
|
|
||||||
|
::
|
||||||
|
|
||||||
|
int hwspin_lock_timeout_raw(struct hwspinlock *hwlock, unsigned int timeout);
|
||||||
|
|
||||||
|
Lock a previously-assigned hwspinlock with a timeout limit (specified in
|
||||||
|
msecs). If the hwspinlock is already taken, the function will busy loop
|
||||||
|
waiting for it to be released, but give up when the timeout elapses.
|
||||||
|
|
||||||
|
Caution: User must protect the routine of getting hardware lock with mutex
|
||||||
|
or spinlock to avoid dead-lock, that will let user can do some time-consuming
|
||||||
|
or sleepable operations under the hardware lock.
|
||||||
|
|
||||||
|
Returns 0 when successful and an appropriate error code otherwise (most
|
||||||
|
notably -ETIMEDOUT if the hwspinlock is still busy after timeout msecs).
|
||||||
|
|
||||||
|
The function will never sleep.
|
||||||
|
|
||||||
|
::
|
||||||
|
|
||||||
|
int hwspin_lock_timeout_in_atomic(struct hwspinlock *hwlock, unsigned int to);
|
||||||
|
|
||||||
|
Lock a previously-assigned hwspinlock with a timeout limit (specified in
|
||||||
|
msecs). If the hwspinlock is already taken, the function will busy loop
|
||||||
|
waiting for it to be released, but give up when the timeout elapses.
|
||||||
|
|
||||||
|
This function shall be called only from an atomic context and the timeout
|
||||||
|
value shall not exceed a few msecs.
|
||||||
|
|
||||||
|
Returns 0 when successful and an appropriate error code otherwise (most
|
||||||
|
notably -ETIMEDOUT if the hwspinlock is still busy after timeout msecs).
|
||||||
|
|
||||||
|
The function will never sleep.
|
||||||
|
|
||||||
::
|
::
|
||||||
|
|
||||||
int hwspin_trylock(struct hwspinlock *hwlock);
|
int hwspin_trylock(struct hwspinlock *hwlock);
|
||||||
|
@ -184,6 +217,34 @@ Returns 0 on success and an appropriate error code otherwise (most
|
||||||
notably -EBUSY if the hwspinlock was already taken).
|
notably -EBUSY if the hwspinlock was already taken).
|
||||||
The function will never sleep.
|
The function will never sleep.
|
||||||
|
|
||||||
|
::
|
||||||
|
|
||||||
|
int hwspin_trylock_raw(struct hwspinlock *hwlock);
|
||||||
|
|
||||||
|
Attempt to lock a previously-assigned hwspinlock, but immediately fail if
|
||||||
|
it is already taken.
|
||||||
|
|
||||||
|
Caution: User must protect the routine of getting hardware lock with mutex
|
||||||
|
or spinlock to avoid dead-lock, that will let user can do some time-consuming
|
||||||
|
or sleepable operations under the hardware lock.
|
||||||
|
|
||||||
|
Returns 0 on success and an appropriate error code otherwise (most
|
||||||
|
notably -EBUSY if the hwspinlock was already taken).
|
||||||
|
The function will never sleep.
|
||||||
|
|
||||||
|
::
|
||||||
|
|
||||||
|
int hwspin_trylock_in_atomic(struct hwspinlock *hwlock);
|
||||||
|
|
||||||
|
Attempt to lock a previously-assigned hwspinlock, but immediately fail if
|
||||||
|
it is already taken.
|
||||||
|
|
||||||
|
This function shall be called only from an atomic context.
|
||||||
|
|
||||||
|
Returns 0 on success and an appropriate error code otherwise (most
|
||||||
|
notably -EBUSY if the hwspinlock was already taken).
|
||||||
|
The function will never sleep.
|
||||||
|
|
||||||
::
|
::
|
||||||
|
|
||||||
void hwspin_unlock(struct hwspinlock *hwlock);
|
void hwspin_unlock(struct hwspinlock *hwlock);
|
||||||
|
@ -220,6 +281,26 @@ Upon a successful return from this function, preemption is reenabled,
|
||||||
and the state of the local interrupts is restored to the state saved at
|
and the state of the local interrupts is restored to the state saved at
|
||||||
the given flags. This function will never sleep.
|
the given flags. This function will never sleep.
|
||||||
|
|
||||||
|
::
|
||||||
|
|
||||||
|
void hwspin_unlock_raw(struct hwspinlock *hwlock);
|
||||||
|
|
||||||
|
Unlock a previously-locked hwspinlock.
|
||||||
|
|
||||||
|
The caller should **never** unlock an hwspinlock which is already unlocked.
|
||||||
|
Doing so is considered a bug (there is no protection against this).
|
||||||
|
This function will never sleep.
|
||||||
|
|
||||||
|
::
|
||||||
|
|
||||||
|
void hwspin_unlock_in_atomic(struct hwspinlock *hwlock);
|
||||||
|
|
||||||
|
Unlock a previously-locked hwspinlock.
|
||||||
|
|
||||||
|
The caller should **never** unlock an hwspinlock which is already unlocked.
|
||||||
|
Doing so is considered a bug (there is no protection against this).
|
||||||
|
This function will never sleep.
|
||||||
|
|
||||||
::
|
::
|
||||||
|
|
||||||
int hwspin_lock_get_id(struct hwspinlock *hwlock);
|
int hwspin_lock_get_id(struct hwspinlock *hwlock);
|
||||||
|
|
|
@ -9,7 +9,7 @@ menuconfig HWSPINLOCK
|
||||||
config HWSPINLOCK_OMAP
|
config HWSPINLOCK_OMAP
|
||||||
tristate "OMAP Hardware Spinlock device"
|
tristate "OMAP Hardware Spinlock device"
|
||||||
depends on HWSPINLOCK
|
depends on HWSPINLOCK
|
||||||
depends on ARCH_OMAP4 || SOC_OMAP5 || SOC_DRA7XX || SOC_AM33XX || SOC_AM43XX
|
depends on ARCH_OMAP4 || SOC_OMAP5 || SOC_DRA7XX || SOC_AM33XX || SOC_AM43XX || ARCH_K3
|
||||||
help
|
help
|
||||||
Say y here to support the OMAP Hardware Spinlock device (firstly
|
Say y here to support the OMAP Hardware Spinlock device (firstly
|
||||||
introduced in OMAP4).
|
introduced in OMAP4).
|
||||||
|
|
|
@ -9,6 +9,7 @@
|
||||||
|
|
||||||
#define pr_fmt(fmt) "%s: " fmt, __func__
|
#define pr_fmt(fmt) "%s: " fmt, __func__
|
||||||
|
|
||||||
|
#include <linux/delay.h>
|
||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <linux/spinlock.h>
|
#include <linux/spinlock.h>
|
||||||
|
@ -23,6 +24,9 @@
|
||||||
|
|
||||||
#include "hwspinlock_internal.h"
|
#include "hwspinlock_internal.h"
|
||||||
|
|
||||||
|
/* retry delay used in atomic context */
|
||||||
|
#define HWSPINLOCK_RETRY_DELAY_US 100
|
||||||
|
|
||||||
/* radix tree tags */
|
/* radix tree tags */
|
||||||
#define HWSPINLOCK_UNUSED (0) /* tags an hwspinlock as unused */
|
#define HWSPINLOCK_UNUSED (0) /* tags an hwspinlock as unused */
|
||||||
|
|
||||||
|
@ -68,11 +72,11 @@ static DEFINE_MUTEX(hwspinlock_tree_lock);
|
||||||
* user need some time-consuming or sleepable operations under the hardware
|
* user need some time-consuming or sleepable operations under the hardware
|
||||||
* lock, they need one sleepable lock (like mutex) to protect the operations.
|
* lock, they need one sleepable lock (like mutex) to protect the operations.
|
||||||
*
|
*
|
||||||
* If the mode is not HWLOCK_RAW, upon a successful return from this function,
|
* If the mode is neither HWLOCK_IN_ATOMIC nor HWLOCK_RAW, upon a successful
|
||||||
* preemption (and possibly interrupts) is disabled, so the caller must not
|
* return from this function, preemption (and possibly interrupts) is disabled,
|
||||||
* sleep, and is advised to release the hwspinlock as soon as possible. This is
|
* so the caller must not sleep, and is advised to release the hwspinlock as
|
||||||
* required in order to minimize remote cores polling on the hardware
|
* soon as possible. This is required in order to minimize remote cores polling
|
||||||
* interconnect.
|
* on the hardware interconnect.
|
||||||
*
|
*
|
||||||
* The user decides whether local interrupts are disabled or not, and if yes,
|
* The user decides whether local interrupts are disabled or not, and if yes,
|
||||||
* whether he wants their previous state to be saved. It is up to the user
|
* whether he wants their previous state to be saved. It is up to the user
|
||||||
|
@ -112,6 +116,7 @@ int __hwspin_trylock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
|
||||||
ret = spin_trylock_irq(&hwlock->lock);
|
ret = spin_trylock_irq(&hwlock->lock);
|
||||||
break;
|
break;
|
||||||
case HWLOCK_RAW:
|
case HWLOCK_RAW:
|
||||||
|
case HWLOCK_IN_ATOMIC:
|
||||||
ret = 1;
|
ret = 1;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
|
@ -136,6 +141,7 @@ int __hwspin_trylock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
|
||||||
spin_unlock_irq(&hwlock->lock);
|
spin_unlock_irq(&hwlock->lock);
|
||||||
break;
|
break;
|
||||||
case HWLOCK_RAW:
|
case HWLOCK_RAW:
|
||||||
|
case HWLOCK_IN_ATOMIC:
|
||||||
/* Nothing to do */
|
/* Nothing to do */
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
|
@ -179,11 +185,14 @@ EXPORT_SYMBOL_GPL(__hwspin_trylock);
|
||||||
* user need some time-consuming or sleepable operations under the hardware
|
* user need some time-consuming or sleepable operations under the hardware
|
||||||
* lock, they need one sleepable lock (like mutex) to protect the operations.
|
* lock, they need one sleepable lock (like mutex) to protect the operations.
|
||||||
*
|
*
|
||||||
* If the mode is not HWLOCK_RAW, upon a successful return from this function,
|
* If the mode is HWLOCK_IN_ATOMIC (called from an atomic context) the timeout
|
||||||
* preemption is disabled (and possibly local interrupts, too), so the caller
|
* is handled with busy-waiting delays, hence shall not exceed few msecs.
|
||||||
* must not sleep, and is advised to release the hwspinlock as soon as possible.
|
*
|
||||||
* This is required in order to minimize remote cores polling on the
|
* If the mode is neither HWLOCK_IN_ATOMIC nor HWLOCK_RAW, upon a successful
|
||||||
* hardware interconnect.
|
* return from this function, preemption (and possibly interrupts) is disabled,
|
||||||
|
* so the caller must not sleep, and is advised to release the hwspinlock as
|
||||||
|
* soon as possible. This is required in order to minimize remote cores polling
|
||||||
|
* on the hardware interconnect.
|
||||||
*
|
*
|
||||||
* The user decides whether local interrupts are disabled or not, and if yes,
|
* The user decides whether local interrupts are disabled or not, and if yes,
|
||||||
* whether he wants their previous state to be saved. It is up to the user
|
* whether he wants their previous state to be saved. It is up to the user
|
||||||
|
@ -198,7 +207,7 @@ int __hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int to,
|
||||||
int mode, unsigned long *flags)
|
int mode, unsigned long *flags)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
unsigned long expire;
|
unsigned long expire, atomic_delay = 0;
|
||||||
|
|
||||||
expire = msecs_to_jiffies(to) + jiffies;
|
expire = msecs_to_jiffies(to) + jiffies;
|
||||||
|
|
||||||
|
@ -212,8 +221,15 @@ int __hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int to,
|
||||||
* The lock is already taken, let's check if the user wants
|
* The lock is already taken, let's check if the user wants
|
||||||
* us to try again
|
* us to try again
|
||||||
*/
|
*/
|
||||||
if (time_is_before_eq_jiffies(expire))
|
if (mode == HWLOCK_IN_ATOMIC) {
|
||||||
return -ETIMEDOUT;
|
udelay(HWSPINLOCK_RETRY_DELAY_US);
|
||||||
|
atomic_delay += HWSPINLOCK_RETRY_DELAY_US;
|
||||||
|
if (atomic_delay > to * 1000)
|
||||||
|
return -ETIMEDOUT;
|
||||||
|
} else {
|
||||||
|
if (time_is_before_eq_jiffies(expire))
|
||||||
|
return -ETIMEDOUT;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Allow platform-specific relax handlers to prevent
|
* Allow platform-specific relax handlers to prevent
|
||||||
|
@ -276,6 +292,7 @@ void __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
|
||||||
spin_unlock_irq(&hwlock->lock);
|
spin_unlock_irq(&hwlock->lock);
|
||||||
break;
|
break;
|
||||||
case HWLOCK_RAW:
|
case HWLOCK_RAW:
|
||||||
|
case HWLOCK_IN_ATOMIC:
|
||||||
/* Nothing to do */
|
/* Nothing to do */
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
|
@ -333,6 +350,11 @@ int of_hwspin_lock_get_id(struct device_node *np, int index)
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
if (!of_device_is_available(args.np)) {
|
||||||
|
ret = -ENOENT;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
/* Find the hwspinlock device: we need its base_id */
|
/* Find the hwspinlock device: we need its base_id */
|
||||||
ret = -EPROBE_DEFER;
|
ret = -EPROBE_DEFER;
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
|
|
|
@ -140,6 +140,9 @@ static int omap_hwspinlock_probe(struct platform_device *pdev)
|
||||||
if (ret)
|
if (ret)
|
||||||
goto reg_fail;
|
goto reg_fail;
|
||||||
|
|
||||||
|
dev_dbg(&pdev->dev, "Registered %d locks with HwSpinlock core\n",
|
||||||
|
num_locks);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
reg_fail:
|
reg_fail:
|
||||||
|
@ -171,6 +174,7 @@ static int omap_hwspinlock_remove(struct platform_device *pdev)
|
||||||
|
|
||||||
static const struct of_device_id omap_hwspinlock_of_match[] = {
|
static const struct of_device_id omap_hwspinlock_of_match[] = {
|
||||||
{ .compatible = "ti,omap4-hwspinlock", },
|
{ .compatible = "ti,omap4-hwspinlock", },
|
||||||
|
{ .compatible = "ti,am654-hwspinlock", },
|
||||||
{ /* end */ },
|
{ /* end */ },
|
||||||
};
|
};
|
||||||
MODULE_DEVICE_TABLE(of, omap_hwspinlock_of_match);
|
MODULE_DEVICE_TABLE(of, omap_hwspinlock_of_match);
|
||||||
|
|
|
@ -5,6 +5,7 @@
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <linux/clk.h>
|
#include <linux/clk.h>
|
||||||
|
#include <linux/delay.h>
|
||||||
#include <linux/hwspinlock.h>
|
#include <linux/hwspinlock.h>
|
||||||
#include <linux/io.h>
|
#include <linux/io.h>
|
||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
|
@ -42,9 +43,15 @@ static void stm32_hwspinlock_unlock(struct hwspinlock *lock)
|
||||||
writel(STM32_MUTEX_COREID, lock_addr);
|
writel(STM32_MUTEX_COREID, lock_addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void stm32_hwspinlock_relax(struct hwspinlock *lock)
|
||||||
|
{
|
||||||
|
ndelay(50);
|
||||||
|
}
|
||||||
|
|
||||||
static const struct hwspinlock_ops stm32_hwspinlock_ops = {
|
static const struct hwspinlock_ops stm32_hwspinlock_ops = {
|
||||||
.trylock = stm32_hwspinlock_trylock,
|
.trylock = stm32_hwspinlock_trylock,
|
||||||
.unlock = stm32_hwspinlock_unlock,
|
.unlock = stm32_hwspinlock_unlock,
|
||||||
|
.relax = stm32_hwspinlock_relax,
|
||||||
};
|
};
|
||||||
|
|
||||||
static int stm32_hwspinlock_probe(struct platform_device *pdev)
|
static int stm32_hwspinlock_probe(struct platform_device *pdev)
|
||||||
|
|
|
@ -14,9 +14,10 @@
|
||||||
#include <linux/sched.h>
|
#include <linux/sched.h>
|
||||||
|
|
||||||
/* hwspinlock mode argument */
|
/* hwspinlock mode argument */
|
||||||
#define HWLOCK_IRQSTATE 0x01 /* Disable interrupts, save state */
|
#define HWLOCK_IRQSTATE 0x01 /* Disable interrupts, save state */
|
||||||
#define HWLOCK_IRQ 0x02 /* Disable interrupts, don't save state */
|
#define HWLOCK_IRQ 0x02 /* Disable interrupts, don't save state */
|
||||||
#define HWLOCK_RAW 0x03
|
#define HWLOCK_RAW 0x03
|
||||||
|
#define HWLOCK_IN_ATOMIC 0x04 /* Called while in atomic context */
|
||||||
|
|
||||||
struct device;
|
struct device;
|
||||||
struct device_node;
|
struct device_node;
|
||||||
|
@ -222,6 +223,23 @@ static inline int hwspin_trylock_raw(struct hwspinlock *hwlock)
|
||||||
return __hwspin_trylock(hwlock, HWLOCK_RAW, NULL);
|
return __hwspin_trylock(hwlock, HWLOCK_RAW, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* hwspin_trylock_in_atomic() - attempt to lock a specific hwspinlock
|
||||||
|
* @hwlock: an hwspinlock which we want to trylock
|
||||||
|
*
|
||||||
|
* This function attempts to lock an hwspinlock, and will immediately fail
|
||||||
|
* if the hwspinlock is already taken.
|
||||||
|
*
|
||||||
|
* This function shall be called only from an atomic context.
|
||||||
|
*
|
||||||
|
* Returns 0 if we successfully locked the hwspinlock, -EBUSY if
|
||||||
|
* the hwspinlock was already taken, and -EINVAL if @hwlock is invalid.
|
||||||
|
*/
|
||||||
|
static inline int hwspin_trylock_in_atomic(struct hwspinlock *hwlock)
|
||||||
|
{
|
||||||
|
return __hwspin_trylock(hwlock, HWLOCK_IN_ATOMIC, NULL);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* hwspin_trylock() - attempt to lock a specific hwspinlock
|
* hwspin_trylock() - attempt to lock a specific hwspinlock
|
||||||
* @hwlock: an hwspinlock which we want to trylock
|
* @hwlock: an hwspinlock which we want to trylock
|
||||||
|
@ -312,6 +330,28 @@ int hwspin_lock_timeout_raw(struct hwspinlock *hwlock, unsigned int to)
|
||||||
return __hwspin_lock_timeout(hwlock, to, HWLOCK_RAW, NULL);
|
return __hwspin_lock_timeout(hwlock, to, HWLOCK_RAW, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* hwspin_lock_timeout_in_atomic() - lock an hwspinlock with timeout limit
|
||||||
|
* @hwlock: the hwspinlock to be locked
|
||||||
|
* @to: timeout value in msecs
|
||||||
|
*
|
||||||
|
* This function locks the underlying @hwlock. If the @hwlock
|
||||||
|
* is already taken, the function will busy loop waiting for it to
|
||||||
|
* be released, but give up when @timeout msecs have elapsed.
|
||||||
|
*
|
||||||
|
* This function shall be called only from an atomic context and the timeout
|
||||||
|
* value shall not exceed a few msecs.
|
||||||
|
*
|
||||||
|
* Returns 0 when the @hwlock was successfully taken, and an appropriate
|
||||||
|
* error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still
|
||||||
|
* busy after @timeout msecs). The function will never sleep.
|
||||||
|
*/
|
||||||
|
static inline
|
||||||
|
int hwspin_lock_timeout_in_atomic(struct hwspinlock *hwlock, unsigned int to)
|
||||||
|
{
|
||||||
|
return __hwspin_lock_timeout(hwlock, to, HWLOCK_IN_ATOMIC, NULL);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* hwspin_lock_timeout() - lock an hwspinlock with timeout limit
|
* hwspin_lock_timeout() - lock an hwspinlock with timeout limit
|
||||||
* @hwlock: the hwspinlock to be locked
|
* @hwlock: the hwspinlock to be locked
|
||||||
|
@ -386,6 +426,21 @@ static inline void hwspin_unlock_raw(struct hwspinlock *hwlock)
|
||||||
__hwspin_unlock(hwlock, HWLOCK_RAW, NULL);
|
__hwspin_unlock(hwlock, HWLOCK_RAW, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* hwspin_unlock_in_atomic() - unlock hwspinlock
|
||||||
|
* @hwlock: a previously-acquired hwspinlock which we want to unlock
|
||||||
|
*
|
||||||
|
* This function will unlock a specific hwspinlock.
|
||||||
|
*
|
||||||
|
* @hwlock must be already locked (e.g. by hwspin_trylock()) before calling
|
||||||
|
* this function: it is a bug to call unlock on a @hwlock that is already
|
||||||
|
* unlocked.
|
||||||
|
*/
|
||||||
|
static inline void hwspin_unlock_in_atomic(struct hwspinlock *hwlock)
|
||||||
|
{
|
||||||
|
__hwspin_unlock(hwlock, HWLOCK_IN_ATOMIC, NULL);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* hwspin_unlock() - unlock hwspinlock
|
* hwspin_unlock() - unlock hwspinlock
|
||||||
* @hwlock: a previously-acquired hwspinlock which we want to unlock
|
* @hwlock: a previously-acquired hwspinlock which we want to unlock
|
||||||
|
|
Loading…
Reference in New Issue