2011-02-18 01:52:03 +08:00
|
|
|
/*
|
|
|
|
* OMAP hardware spinlock driver
|
|
|
|
*
|
2015-03-05 10:01:16 +08:00
|
|
|
* Copyright (C) 2010-2015 Texas Instruments Incorporated - http://www.ti.com
|
2011-02-18 01:52:03 +08:00
|
|
|
*
|
|
|
|
* Contact: Simon Que <sque@ti.com>
|
|
|
|
* Hari Kanigeri <h-kanigeri2@ti.com>
|
|
|
|
* Ohad Ben-Cohen <ohad@wizery.com>
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU General Public License
|
|
|
|
* version 2 as published by the Free Software Foundation.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful, but
|
|
|
|
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
* General Public License for more details.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/device.h>
|
|
|
|
#include <linux/delay.h>
|
|
|
|
#include <linux/io.h>
|
|
|
|
#include <linux/bitops.h>
|
|
|
|
#include <linux/pm_runtime.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/spinlock.h>
|
|
|
|
#include <linux/hwspinlock.h>
|
2015-03-05 10:01:16 +08:00
|
|
|
#include <linux/of.h>
|
2011-02-18 01:52:03 +08:00
|
|
|
#include <linux/platform_device.h>
|
|
|
|
|
|
|
|
#include "hwspinlock_internal.h"
|
|
|
|
|
|
|
|
/* Spinlock register offsets */
|
|
|
|
#define SYSSTATUS_OFFSET 0x0014
|
|
|
|
#define LOCK_BASE_OFFSET 0x0800
|
|
|
|
|
|
|
|
#define SPINLOCK_NUMLOCKS_BIT_OFFSET (24)
|
|
|
|
|
|
|
|
/* Possible values of SPINLOCK_LOCK_REG */
|
|
|
|
#define SPINLOCK_NOTTAKEN (0) /* free */
|
|
|
|
#define SPINLOCK_TAKEN (1) /* locked */
|
|
|
|
|
|
|
|
static int omap_hwspinlock_trylock(struct hwspinlock *lock)
|
|
|
|
{
|
hwspinlock/core: register a bank of hwspinlocks in a single API call
Hardware Spinlock devices usually contain numerous locks (known
devices today support between 32 to 256 locks).
Originally hwspinlock core required drivers to register (and later,
when needed, unregister) each lock separately.
That worked, but required hwspinlocks drivers to do a bit extra work
when they were probed/removed.
This patch changes hwspin_lock_{un}register() to allow a bank of
hwspinlocks to be {un}registered in a single invocation.
A new 'struct hwspinlock_device', which contains an array of 'struct
hwspinlock's is now being passed to the core upon registration (so
instead of wrapping each struct hwspinlock, a priv member has been added
to allow drivers to piggyback their private data with each hwspinlock).
While at it, several per-lock members were moved to be per-device:
1. struct device *dev
2. struct hwspinlock_ops *ops
In addition, now that the array of locks is handled by the core,
there's no reason to maintain a per-lock 'int id' member: the id of the
lock anyway equals to its index in the bank's array plus the bank's
base_id.
Remove this per-lock id member too, and instead use a simple pointers
arithmetic to derive it.
As a result of this change, hwspinlocks drivers are now simpler and smaller
(about %20 code reduction) and the memory footprint of the hwspinlock
framework is reduced.
Signed-off-by: Ohad Ben-Cohen <ohad@wizery.com>
2011-09-06 20:39:21 +08:00
|
|
|
void __iomem *lock_addr = lock->priv;
|
2011-02-18 01:52:03 +08:00
|
|
|
|
|
|
|
/* attempt to acquire the lock by reading its value */
|
hwspinlock/core: register a bank of hwspinlocks in a single API call
Hardware Spinlock devices usually contain numerous locks (known
devices today support between 32 to 256 locks).
Originally hwspinlock core required drivers to register (and later,
when needed, unregister) each lock separately.
That worked, but required hwspinlocks drivers to do a bit extra work
when they were probed/removed.
This patch changes hwspin_lock_{un}register() to allow a bank of
hwspinlocks to be {un}registered in a single invocation.
A new 'struct hwspinlock_device', which contains an array of 'struct
hwspinlock's is now being passed to the core upon registration (so
instead of wrapping each struct hwspinlock, a priv member has been added
to allow drivers to piggyback their private data with each hwspinlock).
While at it, several per-lock members were moved to be per-device:
1. struct device *dev
2. struct hwspinlock_ops *ops
In addition, now that the array of locks is handled by the core,
there's no reason to maintain a per-lock 'int id' member: the id of the
lock anyway equals to its index in the bank's array plus the bank's
base_id.
Remove this per-lock id member too, and instead use a simple pointers
arithmetic to derive it.
As a result of this change, hwspinlocks drivers are now simpler and smaller
(about %20 code reduction) and the memory footprint of the hwspinlock
framework is reduced.
Signed-off-by: Ohad Ben-Cohen <ohad@wizery.com>
2011-09-06 20:39:21 +08:00
|
|
|
return (SPINLOCK_NOTTAKEN == readl(lock_addr));
|
2011-02-18 01:52:03 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void omap_hwspinlock_unlock(struct hwspinlock *lock)
|
|
|
|
{
|
hwspinlock/core: register a bank of hwspinlocks in a single API call
Hardware Spinlock devices usually contain numerous locks (known
devices today support between 32 to 256 locks).
Originally hwspinlock core required drivers to register (and later,
when needed, unregister) each lock separately.
That worked, but required hwspinlocks drivers to do a bit extra work
when they were probed/removed.
This patch changes hwspin_lock_{un}register() to allow a bank of
hwspinlocks to be {un}registered in a single invocation.
A new 'struct hwspinlock_device', which contains an array of 'struct
hwspinlock's is now being passed to the core upon registration (so
instead of wrapping each struct hwspinlock, a priv member has been added
to allow drivers to piggyback their private data with each hwspinlock).
While at it, several per-lock members were moved to be per-device:
1. struct device *dev
2. struct hwspinlock_ops *ops
In addition, now that the array of locks is handled by the core,
there's no reason to maintain a per-lock 'int id' member: the id of the
lock anyway equals to its index in the bank's array plus the bank's
base_id.
Remove this per-lock id member too, and instead use a simple pointers
arithmetic to derive it.
As a result of this change, hwspinlocks drivers are now simpler and smaller
(about %20 code reduction) and the memory footprint of the hwspinlock
framework is reduced.
Signed-off-by: Ohad Ben-Cohen <ohad@wizery.com>
2011-09-06 20:39:21 +08:00
|
|
|
void __iomem *lock_addr = lock->priv;
|
2011-02-18 01:52:03 +08:00
|
|
|
|
|
|
|
/* release the lock by writing 0 to it */
|
hwspinlock/core: register a bank of hwspinlocks in a single API call
Hardware Spinlock devices usually contain numerous locks (known
devices today support between 32 to 256 locks).
Originally hwspinlock core required drivers to register (and later,
when needed, unregister) each lock separately.
That worked, but required hwspinlocks drivers to do a bit extra work
when they were probed/removed.
This patch changes hwspin_lock_{un}register() to allow a bank of
hwspinlocks to be {un}registered in a single invocation.
A new 'struct hwspinlock_device', which contains an array of 'struct
hwspinlock's is now being passed to the core upon registration (so
instead of wrapping each struct hwspinlock, a priv member has been added
to allow drivers to piggyback their private data with each hwspinlock).
While at it, several per-lock members were moved to be per-device:
1. struct device *dev
2. struct hwspinlock_ops *ops
In addition, now that the array of locks is handled by the core,
there's no reason to maintain a per-lock 'int id' member: the id of the
lock anyway equals to its index in the bank's array plus the bank's
base_id.
Remove this per-lock id member too, and instead use a simple pointers
arithmetic to derive it.
As a result of this change, hwspinlocks drivers are now simpler and smaller
(about %20 code reduction) and the memory footprint of the hwspinlock
framework is reduced.
Signed-off-by: Ohad Ben-Cohen <ohad@wizery.com>
2011-09-06 20:39:21 +08:00
|
|
|
writel(SPINLOCK_NOTTAKEN, lock_addr);
|
2011-02-18 01:52:03 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* relax the OMAP interconnect while spinning on it.
|
|
|
|
*
|
|
|
|
* The specs recommended that the retry delay time will be
|
|
|
|
* just over half of the time that a requester would be
|
|
|
|
* expected to hold the lock.
|
|
|
|
*
|
|
|
|
* The number below is taken from an hardware specs example,
|
|
|
|
* obviously it is somewhat arbitrary.
|
|
|
|
*/
|
|
|
|
static void omap_hwspinlock_relax(struct hwspinlock *lock)
|
|
|
|
{
|
|
|
|
ndelay(50);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct hwspinlock_ops omap_hwspinlock_ops = {
|
|
|
|
.trylock = omap_hwspinlock_trylock,
|
|
|
|
.unlock = omap_hwspinlock_unlock,
|
|
|
|
.relax = omap_hwspinlock_relax,
|
|
|
|
};
|
|
|
|
|
2012-11-20 02:23:22 +08:00
|
|
|
static int omap_hwspinlock_probe(struct platform_device *pdev)
|
2011-02-18 01:52:03 +08:00
|
|
|
{
|
2015-03-05 10:01:16 +08:00
|
|
|
struct device_node *node = pdev->dev.of_node;
|
hwspinlock/core: register a bank of hwspinlocks in a single API call
Hardware Spinlock devices usually contain numerous locks (known
devices today support between 32 to 256 locks).
Originally hwspinlock core required drivers to register (and later,
when needed, unregister) each lock separately.
That worked, but required hwspinlocks drivers to do a bit extra work
when they were probed/removed.
This patch changes hwspin_lock_{un}register() to allow a bank of
hwspinlocks to be {un}registered in a single invocation.
A new 'struct hwspinlock_device', which contains an array of 'struct
hwspinlock's is now being passed to the core upon registration (so
instead of wrapping each struct hwspinlock, a priv member has been added
to allow drivers to piggyback their private data with each hwspinlock).
While at it, several per-lock members were moved to be per-device:
1. struct device *dev
2. struct hwspinlock_ops *ops
In addition, now that the array of locks is handled by the core,
there's no reason to maintain a per-lock 'int id' member: the id of the
lock anyway equals to its index in the bank's array plus the bank's
base_id.
Remove this per-lock id member too, and instead use a simple pointers
arithmetic to derive it.
As a result of this change, hwspinlocks drivers are now simpler and smaller
(about %20 code reduction) and the memory footprint of the hwspinlock
framework is reduced.
Signed-off-by: Ohad Ben-Cohen <ohad@wizery.com>
2011-09-06 20:39:21 +08:00
|
|
|
struct hwspinlock_device *bank;
|
|
|
|
struct hwspinlock *hwlock;
|
2011-02-18 01:52:03 +08:00
|
|
|
struct resource *res;
|
|
|
|
void __iomem *io_base;
|
hwspinlock/core: register a bank of hwspinlocks in a single API call
Hardware Spinlock devices usually contain numerous locks (known
devices today support between 32 to 256 locks).
Originally hwspinlock core required drivers to register (and later,
when needed, unregister) each lock separately.
That worked, but required hwspinlocks drivers to do a bit extra work
when they were probed/removed.
This patch changes hwspin_lock_{un}register() to allow a bank of
hwspinlocks to be {un}registered in a single invocation.
A new 'struct hwspinlock_device', which contains an array of 'struct
hwspinlock's is now being passed to the core upon registration (so
instead of wrapping each struct hwspinlock, a priv member has been added
to allow drivers to piggyback their private data with each hwspinlock).
While at it, several per-lock members were moved to be per-device:
1. struct device *dev
2. struct hwspinlock_ops *ops
In addition, now that the array of locks is handled by the core,
there's no reason to maintain a per-lock 'int id' member: the id of the
lock anyway equals to its index in the bank's array plus the bank's
base_id.
Remove this per-lock id member too, and instead use a simple pointers
arithmetic to derive it.
As a result of this change, hwspinlocks drivers are now simpler and smaller
(about %20 code reduction) and the memory footprint of the hwspinlock
framework is reduced.
Signed-off-by: Ohad Ben-Cohen <ohad@wizery.com>
2011-09-06 20:39:21 +08:00
|
|
|
int num_locks, i, ret;
|
2015-03-05 10:01:16 +08:00
|
|
|
/* Only a single hwspinlock block device is supported */
|
|
|
|
int base_id = 0;
|
2011-02-18 01:52:03 +08:00
|
|
|
|
2015-03-05 10:01:16 +08:00
|
|
|
if (!node)
|
hwspinlock/core/omap: fix id issues on multiple hwspinlock devices
hwspinlock devices provide system-wide hardware locks that are used
by remote processors that have no other way to achieve synchronization.
To achieve that, each physical lock must have a system-wide id number
that is agreed upon, otherwise remote processors can't possibly assume
they're using the same hardware lock.
Usually boards have a single hwspinlock device, which provides several
hwspinlocks, and in this case, they can be trivially numbered 0 to
(num-of-locks - 1).
In case boards have several hwspinlocks devices, a different base id
should be used for each hwspinlock device (they can't all use 0 as
a starting id!).
While this is certainly not common, it's just plain wrong to just
silently use 0 as a base id whenever the hwspinlock driver is probed.
This patch provides a hwspinlock_pdata structure, that boards can use
to set a different base id for each of the hwspinlock devices they may
have, and demonstrates how to use it with the omap hwspinlock driver.
While we're at it, make sure the hwspinlock core prints an explicit
error message in case an hwspinlock is registered with an id number
that already exists; this will help users catch such base id issues.
Reported-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Ohad Ben-Cohen <ohad@wizery.com>
Acked-by: Tony Lindgren <tony@atomide.com>
2011-09-06 04:15:06 +08:00
|
|
|
return -ENODEV;
|
|
|
|
|
2011-02-18 01:52:03 +08:00
|
|
|
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
|
|
|
if (!res)
|
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
io_base = ioremap(res->start, resource_size(res));
|
2011-09-05 22:30:34 +08:00
|
|
|
if (!io_base)
|
|
|
|
return -ENOMEM;
|
2011-02-18 01:52:03 +08:00
|
|
|
|
2014-07-03 07:00:59 +08:00
|
|
|
/*
|
|
|
|
* make sure the module is enabled and clocked before reading
|
|
|
|
* the module SYSSTATUS register
|
|
|
|
*/
|
|
|
|
pm_runtime_enable(&pdev->dev);
|
|
|
|
ret = pm_runtime_get_sync(&pdev->dev);
|
|
|
|
if (ret < 0) {
|
|
|
|
pm_runtime_put_noidle(&pdev->dev);
|
|
|
|
goto iounmap_base;
|
|
|
|
}
|
|
|
|
|
2011-02-18 01:52:03 +08:00
|
|
|
/* Determine number of locks */
|
|
|
|
i = readl(io_base + SYSSTATUS_OFFSET);
|
|
|
|
i >>= SPINLOCK_NUMLOCKS_BIT_OFFSET;
|
|
|
|
|
2014-07-03 07:00:59 +08:00
|
|
|
/*
|
|
|
|
* runtime PM will make sure the clock of this module is
|
|
|
|
* enabled again iff at least one lock is requested
|
|
|
|
*/
|
|
|
|
ret = pm_runtime_put(&pdev->dev);
|
|
|
|
if (ret < 0)
|
|
|
|
goto iounmap_base;
|
|
|
|
|
2011-02-18 01:52:03 +08:00
|
|
|
/* one of the four lsb's must be set, and nothing else */
|
|
|
|
if (hweight_long(i & 0xf) != 1 || i > 8) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto iounmap_base;
|
|
|
|
}
|
|
|
|
|
hwspinlock/core: register a bank of hwspinlocks in a single API call
Hardware Spinlock devices usually contain numerous locks (known
devices today support between 32 to 256 locks).
Originally hwspinlock core required drivers to register (and later,
when needed, unregister) each lock separately.
That worked, but required hwspinlocks drivers to do a bit extra work
when they were probed/removed.
This patch changes hwspin_lock_{un}register() to allow a bank of
hwspinlocks to be {un}registered in a single invocation.
A new 'struct hwspinlock_device', which contains an array of 'struct
hwspinlock's is now being passed to the core upon registration (so
instead of wrapping each struct hwspinlock, a priv member has been added
to allow drivers to piggyback their private data with each hwspinlock).
While at it, several per-lock members were moved to be per-device:
1. struct device *dev
2. struct hwspinlock_ops *ops
In addition, now that the array of locks is handled by the core,
there's no reason to maintain a per-lock 'int id' member: the id of the
lock anyway equals to its index in the bank's array plus the bank's
base_id.
Remove this per-lock id member too, and instead use a simple pointers
arithmetic to derive it.
As a result of this change, hwspinlocks drivers are now simpler and smaller
(about %20 code reduction) and the memory footprint of the hwspinlock
framework is reduced.
Signed-off-by: Ohad Ben-Cohen <ohad@wizery.com>
2011-09-06 20:39:21 +08:00
|
|
|
num_locks = i * 32; /* actual number of locks in this device */
|
2011-09-05 22:30:34 +08:00
|
|
|
|
hwspinlock/core: register a bank of hwspinlocks in a single API call
Hardware Spinlock devices usually contain numerous locks (known
devices today support between 32 to 256 locks).
Originally hwspinlock core required drivers to register (and later,
when needed, unregister) each lock separately.
That worked, but required hwspinlocks drivers to do a bit extra work
when they were probed/removed.
This patch changes hwspin_lock_{un}register() to allow a bank of
hwspinlocks to be {un}registered in a single invocation.
A new 'struct hwspinlock_device', which contains an array of 'struct
hwspinlock's is now being passed to the core upon registration (so
instead of wrapping each struct hwspinlock, a priv member has been added
to allow drivers to piggyback their private data with each hwspinlock).
While at it, several per-lock members were moved to be per-device:
1. struct device *dev
2. struct hwspinlock_ops *ops
In addition, now that the array of locks is handled by the core,
there's no reason to maintain a per-lock 'int id' member: the id of the
lock anyway equals to its index in the bank's array plus the bank's
base_id.
Remove this per-lock id member too, and instead use a simple pointers
arithmetic to derive it.
As a result of this change, hwspinlocks drivers are now simpler and smaller
(about %20 code reduction) and the memory footprint of the hwspinlock
framework is reduced.
Signed-off-by: Ohad Ben-Cohen <ohad@wizery.com>
2011-09-06 20:39:21 +08:00
|
|
|
bank = kzalloc(sizeof(*bank) + num_locks * sizeof(*hwlock), GFP_KERNEL);
|
|
|
|
if (!bank) {
|
2011-09-05 22:30:34 +08:00
|
|
|
ret = -ENOMEM;
|
|
|
|
goto iounmap_base;
|
|
|
|
}
|
|
|
|
|
hwspinlock/core: register a bank of hwspinlocks in a single API call
Hardware Spinlock devices usually contain numerous locks (known
devices today support between 32 to 256 locks).
Originally hwspinlock core required drivers to register (and later,
when needed, unregister) each lock separately.
That worked, but required hwspinlocks drivers to do a bit extra work
when they were probed/removed.
This patch changes hwspin_lock_{un}register() to allow a bank of
hwspinlocks to be {un}registered in a single invocation.
A new 'struct hwspinlock_device', which contains an array of 'struct
hwspinlock's is now being passed to the core upon registration (so
instead of wrapping each struct hwspinlock, a priv member has been added
to allow drivers to piggyback their private data with each hwspinlock).
While at it, several per-lock members were moved to be per-device:
1. struct device *dev
2. struct hwspinlock_ops *ops
In addition, now that the array of locks is handled by the core,
there's no reason to maintain a per-lock 'int id' member: the id of the
lock anyway equals to its index in the bank's array plus the bank's
base_id.
Remove this per-lock id member too, and instead use a simple pointers
arithmetic to derive it.
As a result of this change, hwspinlocks drivers are now simpler and smaller
(about %20 code reduction) and the memory footprint of the hwspinlock
framework is reduced.
Signed-off-by: Ohad Ben-Cohen <ohad@wizery.com>
2011-09-06 20:39:21 +08:00
|
|
|
platform_set_drvdata(pdev, bank);
|
2011-02-18 01:52:03 +08:00
|
|
|
|
hwspinlock/core: register a bank of hwspinlocks in a single API call
Hardware Spinlock devices usually contain numerous locks (known
devices today support between 32 to 256 locks).
Originally hwspinlock core required drivers to register (and later,
when needed, unregister) each lock separately.
That worked, but required hwspinlocks drivers to do a bit extra work
when they were probed/removed.
This patch changes hwspin_lock_{un}register() to allow a bank of
hwspinlocks to be {un}registered in a single invocation.
A new 'struct hwspinlock_device', which contains an array of 'struct
hwspinlock's is now being passed to the core upon registration (so
instead of wrapping each struct hwspinlock, a priv member has been added
to allow drivers to piggyback their private data with each hwspinlock).
While at it, several per-lock members were moved to be per-device:
1. struct device *dev
2. struct hwspinlock_ops *ops
In addition, now that the array of locks is handled by the core,
there's no reason to maintain a per-lock 'int id' member: the id of the
lock anyway equals to its index in the bank's array plus the bank's
base_id.
Remove this per-lock id member too, and instead use a simple pointers
arithmetic to derive it.
As a result of this change, hwspinlocks drivers are now simpler and smaller
(about %20 code reduction) and the memory footprint of the hwspinlock
framework is reduced.
Signed-off-by: Ohad Ben-Cohen <ohad@wizery.com>
2011-09-06 20:39:21 +08:00
|
|
|
for (i = 0, hwlock = &bank->lock[0]; i < num_locks; i++, hwlock++)
|
|
|
|
hwlock->priv = io_base + LOCK_BASE_OFFSET + sizeof(u32) * i;
|
2011-02-18 01:52:03 +08:00
|
|
|
|
hwspinlock/core: register a bank of hwspinlocks in a single API call
Hardware Spinlock devices usually contain numerous locks (known
devices today support between 32 to 256 locks).
Originally hwspinlock core required drivers to register (and later,
when needed, unregister) each lock separately.
That worked, but required hwspinlocks drivers to do a bit extra work
when they were probed/removed.
This patch changes hwspin_lock_{un}register() to allow a bank of
hwspinlocks to be {un}registered in a single invocation.
A new 'struct hwspinlock_device', which contains an array of 'struct
hwspinlock's is now being passed to the core upon registration (so
instead of wrapping each struct hwspinlock, a priv member has been added
to allow drivers to piggyback their private data with each hwspinlock).
While at it, several per-lock members were moved to be per-device:
1. struct device *dev
2. struct hwspinlock_ops *ops
In addition, now that the array of locks is handled by the core,
there's no reason to maintain a per-lock 'int id' member: the id of the
lock anyway equals to its index in the bank's array plus the bank's
base_id.
Remove this per-lock id member too, and instead use a simple pointers
arithmetic to derive it.
As a result of this change, hwspinlocks drivers are now simpler and smaller
(about %20 code reduction) and the memory footprint of the hwspinlock
framework is reduced.
Signed-off-by: Ohad Ben-Cohen <ohad@wizery.com>
2011-09-06 20:39:21 +08:00
|
|
|
ret = hwspin_lock_register(bank, &pdev->dev, &omap_hwspinlock_ops,
|
2015-03-05 10:01:16 +08:00
|
|
|
base_id, num_locks);
|
hwspinlock/core: register a bank of hwspinlocks in a single API call
Hardware Spinlock devices usually contain numerous locks (known
devices today support between 32 to 256 locks).
Originally hwspinlock core required drivers to register (and later,
when needed, unregister) each lock separately.
That worked, but required hwspinlocks drivers to do a bit extra work
when they were probed/removed.
This patch changes hwspin_lock_{un}register() to allow a bank of
hwspinlocks to be {un}registered in a single invocation.
A new 'struct hwspinlock_device', which contains an array of 'struct
hwspinlock's is now being passed to the core upon registration (so
instead of wrapping each struct hwspinlock, a priv member has been added
to allow drivers to piggyback their private data with each hwspinlock).
While at it, several per-lock members were moved to be per-device:
1. struct device *dev
2. struct hwspinlock_ops *ops
In addition, now that the array of locks is handled by the core,
there's no reason to maintain a per-lock 'int id' member: the id of the
lock anyway equals to its index in the bank's array plus the bank's
base_id.
Remove this per-lock id member too, and instead use a simple pointers
arithmetic to derive it.
As a result of this change, hwspinlocks drivers are now simpler and smaller
(about %20 code reduction) and the memory footprint of the hwspinlock
framework is reduced.
Signed-off-by: Ohad Ben-Cohen <ohad@wizery.com>
2011-09-06 20:39:21 +08:00
|
|
|
if (ret)
|
|
|
|
goto reg_fail;
|
2011-02-18 01:52:03 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
hwspinlock/core: register a bank of hwspinlocks in a single API call
Hardware Spinlock devices usually contain numerous locks (known
devices today support between 32 to 256 locks).
Originally hwspinlock core required drivers to register (and later,
when needed, unregister) each lock separately.
That worked, but required hwspinlocks drivers to do a bit extra work
when they were probed/removed.
This patch changes hwspin_lock_{un}register() to allow a bank of
hwspinlocks to be {un}registered in a single invocation.
A new 'struct hwspinlock_device', which contains an array of 'struct
hwspinlock's is now being passed to the core upon registration (so
instead of wrapping each struct hwspinlock, a priv member has been added
to allow drivers to piggyback their private data with each hwspinlock).
While at it, several per-lock members were moved to be per-device:
1. struct device *dev
2. struct hwspinlock_ops *ops
In addition, now that the array of locks is handled by the core,
there's no reason to maintain a per-lock 'int id' member: the id of the
lock anyway equals to its index in the bank's array plus the bank's
base_id.
Remove this per-lock id member too, and instead use a simple pointers
arithmetic to derive it.
As a result of this change, hwspinlocks drivers are now simpler and smaller
(about %20 code reduction) and the memory footprint of the hwspinlock
framework is reduced.
Signed-off-by: Ohad Ben-Cohen <ohad@wizery.com>
2011-09-06 20:39:21 +08:00
|
|
|
reg_fail:
|
|
|
|
kfree(bank);
|
2011-02-18 01:52:03 +08:00
|
|
|
iounmap_base:
|
2014-07-03 07:00:59 +08:00
|
|
|
pm_runtime_disable(&pdev->dev);
|
2011-02-18 01:52:03 +08:00
|
|
|
iounmap(io_base);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2012-11-20 02:25:52 +08:00
|
|
|
static int omap_hwspinlock_remove(struct platform_device *pdev)
|
2011-02-18 01:52:03 +08:00
|
|
|
{
|
hwspinlock/core: register a bank of hwspinlocks in a single API call
Hardware Spinlock devices usually contain numerous locks (known
devices today support between 32 to 256 locks).
Originally hwspinlock core required drivers to register (and later,
when needed, unregister) each lock separately.
That worked, but required hwspinlocks drivers to do a bit extra work
when they were probed/removed.
This patch changes hwspin_lock_{un}register() to allow a bank of
hwspinlocks to be {un}registered in a single invocation.
A new 'struct hwspinlock_device', which contains an array of 'struct
hwspinlock's is now being passed to the core upon registration (so
instead of wrapping each struct hwspinlock, a priv member has been added
to allow drivers to piggyback their private data with each hwspinlock).
While at it, several per-lock members were moved to be per-device:
1. struct device *dev
2. struct hwspinlock_ops *ops
In addition, now that the array of locks is handled by the core,
there's no reason to maintain a per-lock 'int id' member: the id of the
lock anyway equals to its index in the bank's array plus the bank's
base_id.
Remove this per-lock id member too, and instead use a simple pointers
arithmetic to derive it.
As a result of this change, hwspinlocks drivers are now simpler and smaller
(about %20 code reduction) and the memory footprint of the hwspinlock
framework is reduced.
Signed-off-by: Ohad Ben-Cohen <ohad@wizery.com>
2011-09-06 20:39:21 +08:00
|
|
|
struct hwspinlock_device *bank = platform_get_drvdata(pdev);
|
|
|
|
void __iomem *io_base = bank->lock[0].priv - LOCK_BASE_OFFSET;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = hwspin_lock_unregister(bank);
|
|
|
|
if (ret) {
|
|
|
|
dev_err(&pdev->dev, "%s failed: %d\n", __func__, ret);
|
|
|
|
return ret;
|
2011-02-18 01:52:03 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
pm_runtime_disable(&pdev->dev);
|
hwspinlock/core: register a bank of hwspinlocks in a single API call
Hardware Spinlock devices usually contain numerous locks (known
devices today support between 32 to 256 locks).
Originally hwspinlock core required drivers to register (and later,
when needed, unregister) each lock separately.
That worked, but required hwspinlocks drivers to do a bit extra work
when they were probed/removed.
This patch changes hwspin_lock_{un}register() to allow a bank of
hwspinlocks to be {un}registered in a single invocation.
A new 'struct hwspinlock_device', which contains an array of 'struct
hwspinlock's is now being passed to the core upon registration (so
instead of wrapping each struct hwspinlock, a priv member has been added
to allow drivers to piggyback their private data with each hwspinlock).
While at it, several per-lock members were moved to be per-device:
1. struct device *dev
2. struct hwspinlock_ops *ops
In addition, now that the array of locks is handled by the core,
there's no reason to maintain a per-lock 'int id' member: the id of the
lock anyway equals to its index in the bank's array plus the bank's
base_id.
Remove this per-lock id member too, and instead use a simple pointers
arithmetic to derive it.
As a result of this change, hwspinlocks drivers are now simpler and smaller
(about %20 code reduction) and the memory footprint of the hwspinlock
framework is reduced.
Signed-off-by: Ohad Ben-Cohen <ohad@wizery.com>
2011-09-06 20:39:21 +08:00
|
|
|
iounmap(io_base);
|
|
|
|
kfree(bank);
|
2011-02-18 01:52:03 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-03-05 10:01:16 +08:00
|
|
|
static const struct of_device_id omap_hwspinlock_of_match[] = {
|
|
|
|
{ .compatible = "ti,omap4-hwspinlock", },
|
|
|
|
{ /* end */ },
|
|
|
|
};
|
|
|
|
MODULE_DEVICE_TABLE(of, omap_hwspinlock_of_match);
|
|
|
|
|
2011-02-18 01:52:03 +08:00
|
|
|
static struct platform_driver omap_hwspinlock_driver = {
|
|
|
|
.probe = omap_hwspinlock_probe,
|
2012-11-20 02:20:13 +08:00
|
|
|
.remove = omap_hwspinlock_remove,
|
2011-02-18 01:52:03 +08:00
|
|
|
.driver = {
|
|
|
|
.name = "omap_hwspinlock",
|
2015-03-05 10:01:16 +08:00
|
|
|
.of_match_table = of_match_ptr(omap_hwspinlock_of_match),
|
2011-02-18 01:52:03 +08:00
|
|
|
},
|
|
|
|
};
|
|
|
|
|
|
|
|
static int __init omap_hwspinlock_init(void)
|
|
|
|
{
|
|
|
|
return platform_driver_register(&omap_hwspinlock_driver);
|
|
|
|
}
|
|
|
|
/* board init code might need to reserve hwspinlocks for predefined purposes */
|
|
|
|
postcore_initcall(omap_hwspinlock_init);
|
|
|
|
|
|
|
|
static void __exit omap_hwspinlock_exit(void)
|
|
|
|
{
|
|
|
|
platform_driver_unregister(&omap_hwspinlock_driver);
|
|
|
|
}
|
|
|
|
module_exit(omap_hwspinlock_exit);
|
|
|
|
|
|
|
|
MODULE_LICENSE("GPL v2");
|
|
|
|
MODULE_DESCRIPTION("Hardware spinlock driver for OMAP");
|
|
|
|
MODULE_AUTHOR("Simon Que <sque@ti.com>");
|
|
|
|
MODULE_AUTHOR("Hari Kanigeri <h-kanigeri2@ti.com>");
|
|
|
|
MODULE_AUTHOR("Ohad Ben-Cohen <ohad@wizery.com>");
|