2018-05-12 01:03:16 +08:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0 */
|
2011-02-18 01:52:03 +08:00
|
|
|
/*
|
|
|
|
* Hardware spinlock public header
|
|
|
|
*
|
|
|
|
* Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com
|
|
|
|
*
|
|
|
|
* Contact: Ohad Ben-Cohen <ohad@wizery.com>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef __LINUX_HWSPINLOCK_H
|
|
|
|
#define __LINUX_HWSPINLOCK_H
|
|
|
|
|
|
|
|
#include <linux/err.h>
|
|
|
|
#include <linux/sched.h>
|
|
|
|
|
|
|
|
/* hwspinlock mode argument */
|
2019-03-07 23:58:23 +08:00
|
|
|
#define HWLOCK_IRQSTATE 0x01 /* Disable interrupts, save state */
|
|
|
|
#define HWLOCK_IRQ 0x02 /* Disable interrupts, don't save state */
|
|
|
|
#define HWLOCK_RAW 0x03
|
|
|
|
#define HWLOCK_IN_ATOMIC 0x04 /* Called while in atomic context */
|
2011-02-18 01:52:03 +08:00
|
|
|
|
2012-01-31 00:46:54 +08:00
|
|
|
struct device;
|
2015-03-05 10:01:14 +08:00
|
|
|
struct device_node;
|
2011-02-18 01:52:03 +08:00
|
|
|
struct hwspinlock;
|
hwspinlock/core: register a bank of hwspinlocks in a single API call
Hardware Spinlock devices usually contain numerous locks (known
devices today support between 32 to 256 locks).
Originally hwspinlock core required drivers to register (and later,
when needed, unregister) each lock separately.
That worked, but required hwspinlocks drivers to do a bit extra work
when they were probed/removed.
This patch changes hwspin_lock_{un}register() to allow a bank of
hwspinlocks to be {un}registered in a single invocation.
A new 'struct hwspinlock_device', which contains an array of 'struct
hwspinlock's is now being passed to the core upon registration (so
instead of wrapping each struct hwspinlock, a priv member has been added
to allow drivers to piggyback their private data with each hwspinlock).
While at it, several per-lock members were moved to be per-device:
1. struct device *dev
2. struct hwspinlock_ops *ops
In addition, now that the array of locks is handled by the core,
there's no reason to maintain a per-lock 'int id' member: the id of the
lock anyway equals to its index in the bank's array plus the bank's
base_id.
Remove this per-lock id member too, and instead use a simple pointers
arithmetic to derive it.
As a result of this change, hwspinlocks drivers are now simpler and smaller
(about %20 code reduction) and the memory footprint of the hwspinlock
framework is reduced.
Signed-off-by: Ohad Ben-Cohen <ohad@wizery.com>
2011-09-06 20:39:21 +08:00
|
|
|
struct hwspinlock_device;
|
|
|
|
struct hwspinlock_ops;
|
2011-02-18 01:52:03 +08:00
|
|
|
|
hwspinlock/core/omap: fix id issues on multiple hwspinlock devices
hwspinlock devices provide system-wide hardware locks that are used
by remote processors that have no other way to achieve synchronization.
To achieve that, each physical lock must have a system-wide id number
that is agreed upon, otherwise remote processors can't possibly assume
they're using the same hardware lock.
Usually boards have a single hwspinlock device, which provides several
hwspinlocks, and in this case, they can be trivially numbered 0 to
(num-of-locks - 1).
In case boards have several hwspinlocks devices, a different base id
should be used for each hwspinlock device (they can't all use 0 as
a starting id!).
While this is certainly not common, it's just plain wrong to just
silently use 0 as a base id whenever the hwspinlock driver is probed.
This patch provides a hwspinlock_pdata structure, that boards can use
to set a different base id for each of the hwspinlock devices they may
have, and demonstrates how to use it with the omap hwspinlock driver.
While we're at it, make sure the hwspinlock core prints an explicit
error message in case an hwspinlock is registered with an id number
that already exists; this will help users catch such base id issues.
Reported-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Ohad Ben-Cohen <ohad@wizery.com>
Acked-by: Tony Lindgren <tony@atomide.com>
2011-09-06 04:15:06 +08:00
|
|
|
/**
|
|
|
|
* struct hwspinlock_pdata - platform data for hwspinlock drivers
|
|
|
|
* @base_id: base id for this hwspinlock device
|
|
|
|
*
|
|
|
|
* hwspinlock devices provide system-wide hardware locks that are used
|
|
|
|
* by remote processors that have no other way to achieve synchronization.
|
|
|
|
*
|
|
|
|
* To achieve that, each physical lock must have a system-wide id number
|
|
|
|
* that is agreed upon, otherwise remote processors can't possibly assume
|
|
|
|
* they're using the same hardware lock.
|
|
|
|
*
|
|
|
|
* Usually boards have a single hwspinlock device, which provides several
|
|
|
|
* hwspinlocks, and in this case, they can be trivially numbered 0 to
|
|
|
|
* (num-of-locks - 1).
|
|
|
|
*
|
|
|
|
* In case boards have several hwspinlocks devices, a different base id
|
|
|
|
* should be used for each hwspinlock device (they can't all use 0 as
|
|
|
|
* a starting id!).
|
|
|
|
*
|
|
|
|
* This platform data structure should be used to provide the base id
|
|
|
|
* for each device (which is trivially 0 when only a single hwspinlock
|
|
|
|
* device exists). It can be shared between different platforms, hence
|
|
|
|
* its location.
|
|
|
|
*/
|
|
|
|
struct hwspinlock_pdata {
|
|
|
|
int base_id;
|
|
|
|
};
|
|
|
|
|
2018-06-22 16:09:01 +08:00
|
|
|
#ifdef CONFIG_HWSPINLOCK
|
2011-02-18 01:52:03 +08:00
|
|
|
|
hwspinlock/core: register a bank of hwspinlocks in a single API call
Hardware Spinlock devices usually contain numerous locks (known
devices today support between 32 to 256 locks).
Originally hwspinlock core required drivers to register (and later,
when needed, unregister) each lock separately.
That worked, but required hwspinlocks drivers to do a bit extra work
when they were probed/removed.
This patch changes hwspin_lock_{un}register() to allow a bank of
hwspinlocks to be {un}registered in a single invocation.
A new 'struct hwspinlock_device', which contains an array of 'struct
hwspinlock's is now being passed to the core upon registration (so
instead of wrapping each struct hwspinlock, a priv member has been added
to allow drivers to piggyback their private data with each hwspinlock).
While at it, several per-lock members were moved to be per-device:
1. struct device *dev
2. struct hwspinlock_ops *ops
In addition, now that the array of locks is handled by the core,
there's no reason to maintain a per-lock 'int id' member: the id of the
lock anyway equals to its index in the bank's array plus the bank's
base_id.
Remove this per-lock id member too, and instead use a simple pointers
arithmetic to derive it.
As a result of this change, hwspinlocks drivers are now simpler and smaller
(about %20 code reduction) and the memory footprint of the hwspinlock
framework is reduced.
Signed-off-by: Ohad Ben-Cohen <ohad@wizery.com>
2011-09-06 20:39:21 +08:00
|
|
|
int hwspin_lock_register(struct hwspinlock_device *bank, struct device *dev,
|
|
|
|
const struct hwspinlock_ops *ops, int base_id, int num_locks);
|
|
|
|
int hwspin_lock_unregister(struct hwspinlock_device *bank);
|
2011-02-18 01:52:03 +08:00
|
|
|
struct hwspinlock *hwspin_lock_request(void);
|
|
|
|
struct hwspinlock *hwspin_lock_request_specific(unsigned int id);
|
|
|
|
int hwspin_lock_free(struct hwspinlock *hwlock);
|
2015-03-05 10:01:14 +08:00
|
|
|
int of_hwspin_lock_get_id(struct device_node *np, int index);
|
2011-02-18 01:52:03 +08:00
|
|
|
int hwspin_lock_get_id(struct hwspinlock *hwlock);
|
|
|
|
int __hwspin_lock_timeout(struct hwspinlock *, unsigned int, int,
|
|
|
|
unsigned long *);
|
|
|
|
int __hwspin_trylock(struct hwspinlock *, int, unsigned long *);
|
|
|
|
void __hwspin_unlock(struct hwspinlock *, int, unsigned long *);
|
2018-06-22 16:08:58 +08:00
|
|
|
int of_hwspin_lock_get_id_byname(struct device_node *np, const char *name);
|
2018-06-22 16:08:59 +08:00
|
|
|
int devm_hwspin_lock_free(struct device *dev, struct hwspinlock *hwlock);
|
|
|
|
struct hwspinlock *devm_hwspin_lock_request(struct device *dev);
|
|
|
|
struct hwspinlock *devm_hwspin_lock_request_specific(struct device *dev,
|
|
|
|
unsigned int id);
|
2018-06-22 16:09:00 +08:00
|
|
|
int devm_hwspin_lock_unregister(struct device *dev,
|
|
|
|
struct hwspinlock_device *bank);
|
|
|
|
int devm_hwspin_lock_register(struct device *dev,
|
|
|
|
struct hwspinlock_device *bank,
|
|
|
|
const struct hwspinlock_ops *ops,
|
|
|
|
int base_id, int num_locks);
|
2011-02-18 01:52:03 +08:00
|
|
|
|
|
|
|
#else /* !CONFIG_HWSPINLOCK */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We don't want these functions to fail if CONFIG_HWSPINLOCK is not
|
|
|
|
* enabled. We prefer to silently succeed in this case, and let the
|
|
|
|
* code path get compiled away. This way, if CONFIG_HWSPINLOCK is not
|
|
|
|
* required on a given setup, users will still work.
|
|
|
|
*
|
|
|
|
* The only exception is hwspin_lock_register/hwspin_lock_unregister, with which
|
|
|
|
* we _do_ want users to fail (no point in registering hwspinlock instances if
|
|
|
|
* the framework is not available).
|
|
|
|
*
|
|
|
|
* Note: ERR_PTR(-ENODEV) will still be considered a success for NULL-checking
|
|
|
|
* users. Others, which care, can still check this with IS_ERR.
|
|
|
|
*/
|
|
|
|
static inline struct hwspinlock *hwspin_lock_request(void)
|
|
|
|
{
|
|
|
|
return ERR_PTR(-ENODEV);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline struct hwspinlock *hwspin_lock_request_specific(unsigned int id)
|
|
|
|
{
|
|
|
|
return ERR_PTR(-ENODEV);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int hwspin_lock_free(struct hwspinlock *hwlock)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline
|
|
|
|
int __hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int to,
|
|
|
|
int mode, unsigned long *flags)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline
|
|
|
|
int __hwspin_trylock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline
|
|
|
|
void __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2015-03-05 10:01:14 +08:00
|
|
|
static inline int of_hwspin_lock_get_id(struct device_node *np, int index)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-02-18 01:52:03 +08:00
|
|
|
static inline int hwspin_lock_get_id(struct hwspinlock *hwlock)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-06-22 16:08:58 +08:00
|
|
|
static inline
|
|
|
|
int of_hwspin_lock_get_id_byname(struct device_node *np, const char *name)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-06-22 16:08:59 +08:00
|
|
|
static inline
|
|
|
|
int devm_hwspin_lock_free(struct device *dev, struct hwspinlock *hwlock)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline struct hwspinlock *devm_hwspin_lock_request(struct device *dev)
|
|
|
|
{
|
|
|
|
return ERR_PTR(-ENODEV);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline
|
|
|
|
struct hwspinlock *devm_hwspin_lock_request_specific(struct device *dev,
|
|
|
|
unsigned int id)
|
|
|
|
{
|
|
|
|
return ERR_PTR(-ENODEV);
|
|
|
|
}
|
|
|
|
|
2011-02-18 01:52:03 +08:00
|
|
|
#endif /* !CONFIG_HWSPINLOCK */
|
|
|
|
|
|
|
|
/**
|
|
|
|
* hwspin_trylock_irqsave() - try to lock an hwspinlock, disable interrupts
|
|
|
|
* @hwlock: an hwspinlock which we want to trylock
|
|
|
|
* @flags: a pointer to where the caller's interrupt state will be saved at
|
|
|
|
*
|
|
|
|
* This function attempts to lock the underlying hwspinlock, and will
|
|
|
|
* immediately fail if the hwspinlock is already locked.
|
|
|
|
*
|
|
|
|
* Upon a successful return from this function, preemption and local
|
|
|
|
* interrupts are disabled (previous interrupts state is saved at @flags),
|
|
|
|
* so the caller must not sleep, and is advised to release the hwspinlock
|
|
|
|
* as soon as possible.
|
|
|
|
*
|
|
|
|
* Returns 0 if we successfully locked the hwspinlock, -EBUSY if
|
|
|
|
* the hwspinlock was already taken, and -EINVAL if @hwlock is invalid.
|
|
|
|
*/
|
|
|
|
static inline
|
|
|
|
int hwspin_trylock_irqsave(struct hwspinlock *hwlock, unsigned long *flags)
|
|
|
|
{
|
|
|
|
return __hwspin_trylock(hwlock, HWLOCK_IRQSTATE, flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* hwspin_trylock_irq() - try to lock an hwspinlock, disable interrupts
|
|
|
|
* @hwlock: an hwspinlock which we want to trylock
|
|
|
|
*
|
|
|
|
* This function attempts to lock the underlying hwspinlock, and will
|
|
|
|
* immediately fail if the hwspinlock is already locked.
|
|
|
|
*
|
|
|
|
* Upon a successful return from this function, preemption and local
|
|
|
|
* interrupts are disabled, so the caller must not sleep, and is advised
|
|
|
|
* to release the hwspinlock as soon as possible.
|
|
|
|
*
|
|
|
|
* Returns 0 if we successfully locked the hwspinlock, -EBUSY if
|
|
|
|
* the hwspinlock was already taken, and -EINVAL if @hwlock is invalid.
|
|
|
|
*/
|
|
|
|
static inline int hwspin_trylock_irq(struct hwspinlock *hwlock)
|
|
|
|
{
|
|
|
|
return __hwspin_trylock(hwlock, HWLOCK_IRQ, NULL);
|
|
|
|
}
|
|
|
|
|
2018-04-08 11:06:57 +08:00
|
|
|
/**
|
|
|
|
* hwspin_trylock_raw() - attempt to lock a specific hwspinlock
|
|
|
|
* @hwlock: an hwspinlock which we want to trylock
|
|
|
|
*
|
|
|
|
* This function attempts to lock an hwspinlock, and will immediately fail
|
|
|
|
* if the hwspinlock is already taken.
|
|
|
|
*
|
|
|
|
* Caution: User must protect the routine of getting hardware lock with mutex
|
|
|
|
* or spinlock to avoid dead-lock, that will let user can do some time-consuming
|
|
|
|
* or sleepable operations under the hardware lock.
|
|
|
|
*
|
|
|
|
* Returns 0 if we successfully locked the hwspinlock, -EBUSY if
|
|
|
|
* the hwspinlock was already taken, and -EINVAL if @hwlock is invalid.
|
|
|
|
*/
|
|
|
|
static inline int hwspin_trylock_raw(struct hwspinlock *hwlock)
|
|
|
|
{
|
|
|
|
return __hwspin_trylock(hwlock, HWLOCK_RAW, NULL);
|
|
|
|
}
|
|
|
|
|
2019-03-07 23:58:23 +08:00
|
|
|
/**
|
|
|
|
* hwspin_trylock_in_atomic() - attempt to lock a specific hwspinlock
|
|
|
|
* @hwlock: an hwspinlock which we want to trylock
|
|
|
|
*
|
|
|
|
* This function attempts to lock an hwspinlock, and will immediately fail
|
|
|
|
* if the hwspinlock is already taken.
|
|
|
|
*
|
|
|
|
* This function shall be called only from an atomic context.
|
|
|
|
*
|
|
|
|
* Returns 0 if we successfully locked the hwspinlock, -EBUSY if
|
|
|
|
* the hwspinlock was already taken, and -EINVAL if @hwlock is invalid.
|
|
|
|
*/
|
|
|
|
static inline int hwspin_trylock_in_atomic(struct hwspinlock *hwlock)
|
|
|
|
{
|
|
|
|
return __hwspin_trylock(hwlock, HWLOCK_IN_ATOMIC, NULL);
|
|
|
|
}
|
|
|
|
|
2011-02-18 01:52:03 +08:00
|
|
|
/**
|
|
|
|
* hwspin_trylock() - attempt to lock a specific hwspinlock
|
|
|
|
* @hwlock: an hwspinlock which we want to trylock
|
|
|
|
*
|
|
|
|
* This function attempts to lock an hwspinlock, and will immediately fail
|
|
|
|
* if the hwspinlock is already taken.
|
|
|
|
*
|
|
|
|
* Upon a successful return from this function, preemption is disabled,
|
|
|
|
* so the caller must not sleep, and is advised to release the hwspinlock
|
|
|
|
* as soon as possible. This is required in order to minimize remote cores
|
|
|
|
* polling on the hardware interconnect.
|
|
|
|
*
|
|
|
|
* Returns 0 if we successfully locked the hwspinlock, -EBUSY if
|
|
|
|
* the hwspinlock was already taken, and -EINVAL if @hwlock is invalid.
|
|
|
|
*/
|
|
|
|
static inline int hwspin_trylock(struct hwspinlock *hwlock)
|
|
|
|
{
|
|
|
|
return __hwspin_trylock(hwlock, 0, NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* hwspin_lock_timeout_irqsave() - lock hwspinlock, with timeout, disable irqs
|
|
|
|
* @hwlock: the hwspinlock to be locked
|
|
|
|
* @to: timeout value in msecs
|
|
|
|
* @flags: a pointer to where the caller's interrupt state will be saved at
|
|
|
|
*
|
|
|
|
* This function locks the underlying @hwlock. If the @hwlock
|
|
|
|
* is already taken, the function will busy loop waiting for it to
|
|
|
|
* be released, but give up when @timeout msecs have elapsed.
|
|
|
|
*
|
|
|
|
* Upon a successful return from this function, preemption and local interrupts
|
|
|
|
* are disabled (plus previous interrupt state is saved), so the caller must
|
|
|
|
* not sleep, and is advised to release the hwspinlock as soon as possible.
|
|
|
|
*
|
|
|
|
* Returns 0 when the @hwlock was successfully taken, and an appropriate
|
|
|
|
* error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still
|
|
|
|
* busy after @timeout msecs). The function will never sleep.
|
|
|
|
*/
|
|
|
|
static inline int hwspin_lock_timeout_irqsave(struct hwspinlock *hwlock,
|
|
|
|
unsigned int to, unsigned long *flags)
|
|
|
|
{
|
|
|
|
return __hwspin_lock_timeout(hwlock, to, HWLOCK_IRQSTATE, flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* hwspin_lock_timeout_irq() - lock hwspinlock, with timeout, disable irqs
|
|
|
|
* @hwlock: the hwspinlock to be locked
|
|
|
|
* @to: timeout value in msecs
|
|
|
|
*
|
|
|
|
* This function locks the underlying @hwlock. If the @hwlock
|
|
|
|
* is already taken, the function will busy loop waiting for it to
|
|
|
|
* be released, but give up when @timeout msecs have elapsed.
|
|
|
|
*
|
|
|
|
* Upon a successful return from this function, preemption and local interrupts
|
|
|
|
* are disabled so the caller must not sleep, and is advised to release the
|
|
|
|
* hwspinlock as soon as possible.
|
|
|
|
*
|
|
|
|
* Returns 0 when the @hwlock was successfully taken, and an appropriate
|
|
|
|
* error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still
|
|
|
|
* busy after @timeout msecs). The function will never sleep.
|
|
|
|
*/
|
|
|
|
static inline
|
|
|
|
int hwspin_lock_timeout_irq(struct hwspinlock *hwlock, unsigned int to)
|
|
|
|
{
|
|
|
|
return __hwspin_lock_timeout(hwlock, to, HWLOCK_IRQ, NULL);
|
|
|
|
}
|
|
|
|
|
2018-04-08 11:06:57 +08:00
|
|
|
/**
|
|
|
|
* hwspin_lock_timeout_raw() - lock an hwspinlock with timeout limit
|
|
|
|
* @hwlock: the hwspinlock to be locked
|
|
|
|
* @to: timeout value in msecs
|
|
|
|
*
|
|
|
|
* This function locks the underlying @hwlock. If the @hwlock
|
|
|
|
* is already taken, the function will busy loop waiting for it to
|
|
|
|
* be released, but give up when @timeout msecs have elapsed.
|
|
|
|
*
|
|
|
|
* Caution: User must protect the routine of getting hardware lock with mutex
|
|
|
|
* or spinlock to avoid dead-lock, that will let user can do some time-consuming
|
|
|
|
* or sleepable operations under the hardware lock.
|
|
|
|
*
|
|
|
|
* Returns 0 when the @hwlock was successfully taken, and an appropriate
|
|
|
|
* error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still
|
|
|
|
* busy after @timeout msecs). The function will never sleep.
|
|
|
|
*/
|
|
|
|
static inline
|
|
|
|
int hwspin_lock_timeout_raw(struct hwspinlock *hwlock, unsigned int to)
|
|
|
|
{
|
|
|
|
return __hwspin_lock_timeout(hwlock, to, HWLOCK_RAW, NULL);
|
|
|
|
}
|
|
|
|
|
2019-03-07 23:58:23 +08:00
|
|
|
/**
|
|
|
|
* hwspin_lock_timeout_in_atomic() - lock an hwspinlock with timeout limit
|
|
|
|
* @hwlock: the hwspinlock to be locked
|
|
|
|
* @to: timeout value in msecs
|
|
|
|
*
|
|
|
|
* This function locks the underlying @hwlock. If the @hwlock
|
|
|
|
* is already taken, the function will busy loop waiting for it to
|
|
|
|
* be released, but give up when @timeout msecs have elapsed.
|
|
|
|
*
|
|
|
|
* This function shall be called only from an atomic context and the timeout
|
|
|
|
* value shall not exceed a few msecs.
|
|
|
|
*
|
|
|
|
* Returns 0 when the @hwlock was successfully taken, and an appropriate
|
|
|
|
* error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still
|
|
|
|
* busy after @timeout msecs). The function will never sleep.
|
|
|
|
*/
|
|
|
|
static inline
|
|
|
|
int hwspin_lock_timeout_in_atomic(struct hwspinlock *hwlock, unsigned int to)
|
|
|
|
{
|
|
|
|
return __hwspin_lock_timeout(hwlock, to, HWLOCK_IN_ATOMIC, NULL);
|
|
|
|
}
|
|
|
|
|
2011-02-18 01:52:03 +08:00
|
|
|
/**
|
|
|
|
* hwspin_lock_timeout() - lock an hwspinlock with timeout limit
|
|
|
|
* @hwlock: the hwspinlock to be locked
|
|
|
|
* @to: timeout value in msecs
|
|
|
|
*
|
|
|
|
* This function locks the underlying @hwlock. If the @hwlock
|
|
|
|
* is already taken, the function will busy loop waiting for it to
|
|
|
|
* be released, but give up when @timeout msecs have elapsed.
|
|
|
|
*
|
|
|
|
* Upon a successful return from this function, preemption is disabled
|
|
|
|
* so the caller must not sleep, and is advised to release the hwspinlock
|
|
|
|
* as soon as possible.
|
|
|
|
* This is required in order to minimize remote cores polling on the
|
|
|
|
* hardware interconnect.
|
|
|
|
*
|
|
|
|
* Returns 0 when the @hwlock was successfully taken, and an appropriate
|
|
|
|
* error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still
|
|
|
|
* busy after @timeout msecs). The function will never sleep.
|
|
|
|
*/
|
|
|
|
static inline
|
|
|
|
int hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int to)
|
|
|
|
{
|
|
|
|
return __hwspin_lock_timeout(hwlock, to, 0, NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* hwspin_unlock_irqrestore() - unlock hwspinlock, restore irq state
|
|
|
|
* @hwlock: a previously-acquired hwspinlock which we want to unlock
|
|
|
|
* @flags: previous caller's interrupt state to restore
|
|
|
|
*
|
|
|
|
* This function will unlock a specific hwspinlock, enable preemption and
|
|
|
|
* restore the previous state of the local interrupts. It should be used
|
|
|
|
* to undo, e.g., hwspin_trylock_irqsave().
|
|
|
|
*
|
|
|
|
* @hwlock must be already locked before calling this function: it is a bug
|
|
|
|
* to call unlock on a @hwlock that is already unlocked.
|
|
|
|
*/
|
|
|
|
static inline void hwspin_unlock_irqrestore(struct hwspinlock *hwlock,
|
|
|
|
unsigned long *flags)
|
|
|
|
{
|
|
|
|
__hwspin_unlock(hwlock, HWLOCK_IRQSTATE, flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* hwspin_unlock_irq() - unlock hwspinlock, enable interrupts
|
|
|
|
* @hwlock: a previously-acquired hwspinlock which we want to unlock
|
|
|
|
*
|
|
|
|
* This function will unlock a specific hwspinlock, enable preemption and
|
|
|
|
* enable local interrupts. Should be used to undo hwspin_lock_irq().
|
|
|
|
*
|
|
|
|
* @hwlock must be already locked (e.g. by hwspin_trylock_irq()) before
|
|
|
|
* calling this function: it is a bug to call unlock on a @hwlock that is
|
|
|
|
* already unlocked.
|
|
|
|
*/
|
|
|
|
static inline void hwspin_unlock_irq(struct hwspinlock *hwlock)
|
|
|
|
{
|
|
|
|
__hwspin_unlock(hwlock, HWLOCK_IRQ, NULL);
|
|
|
|
}
|
|
|
|
|
2018-04-08 11:06:57 +08:00
|
|
|
/**
|
|
|
|
* hwspin_unlock_raw() - unlock hwspinlock
|
|
|
|
* @hwlock: a previously-acquired hwspinlock which we want to unlock
|
|
|
|
*
|
|
|
|
* This function will unlock a specific hwspinlock.
|
|
|
|
*
|
|
|
|
* @hwlock must be already locked (e.g. by hwspin_trylock()) before calling
|
|
|
|
* this function: it is a bug to call unlock on a @hwlock that is already
|
|
|
|
* unlocked.
|
|
|
|
*/
|
|
|
|
static inline void hwspin_unlock_raw(struct hwspinlock *hwlock)
|
|
|
|
{
|
|
|
|
__hwspin_unlock(hwlock, HWLOCK_RAW, NULL);
|
|
|
|
}
|
|
|
|
|
2019-03-07 23:58:23 +08:00
|
|
|
/**
|
|
|
|
* hwspin_unlock_in_atomic() - unlock hwspinlock
|
|
|
|
* @hwlock: a previously-acquired hwspinlock which we want to unlock
|
|
|
|
*
|
|
|
|
* This function will unlock a specific hwspinlock.
|
|
|
|
*
|
|
|
|
* @hwlock must be already locked (e.g. by hwspin_trylock()) before calling
|
|
|
|
* this function: it is a bug to call unlock on a @hwlock that is already
|
|
|
|
* unlocked.
|
|
|
|
*/
|
|
|
|
static inline void hwspin_unlock_in_atomic(struct hwspinlock *hwlock)
|
|
|
|
{
|
|
|
|
__hwspin_unlock(hwlock, HWLOCK_IN_ATOMIC, NULL);
|
|
|
|
}
|
|
|
|
|
2011-02-18 01:52:03 +08:00
|
|
|
/**
|
|
|
|
* hwspin_unlock() - unlock hwspinlock
|
|
|
|
* @hwlock: a previously-acquired hwspinlock which we want to unlock
|
|
|
|
*
|
|
|
|
* This function will unlock a specific hwspinlock and enable preemption
|
|
|
|
* back.
|
|
|
|
*
|
|
|
|
* @hwlock must be already locked (e.g. by hwspin_trylock()) before calling
|
|
|
|
* this function: it is a bug to call unlock on a @hwlock that is already
|
|
|
|
* unlocked.
|
|
|
|
*/
|
|
|
|
static inline void hwspin_unlock(struct hwspinlock *hwlock)
|
|
|
|
{
|
|
|
|
__hwspin_unlock(hwlock, 0, NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif /* __LINUX_HWSPINLOCK_H */
|