2017-11-08 00:30:07 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* device.h - generic, centralized driver model
|
|
|
|
*
|
|
|
|
* Copyright (c) 2001-2003 Patrick Mochel <mochel@osdl.org>
|
2009-05-12 05:16:57 +08:00
|
|
|
* Copyright (c) 2004-2009 Greg Kroah-Hartman <gregkh@suse.de>
|
|
|
|
* Copyright (c) 2008-2009 Novell Inc.
|
2005-04-17 06:20:36 +08:00
|
|
|
*
|
2019-06-18 23:34:59 +08:00
|
|
|
* See Documentation/driver-api/driver-model/ for more information.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef _DEVICE_H_
|
|
|
|
#define _DEVICE_H_
|
|
|
|
|
2019-12-10 03:33:00 +08:00
|
|
|
#include <linux/dev_printk.h>
|
2020-06-10 18:12:23 +08:00
|
|
|
#include <linux/energy_model.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <linux/ioport.h>
|
|
|
|
#include <linux/kobject.h>
|
2005-03-22 03:49:14 +08:00
|
|
|
#include <linux/klist.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <linux/list.h>
|
2008-05-29 00:28:39 +08:00
|
|
|
#include <linux/lockdep.h>
|
2006-08-15 13:43:17 +08:00
|
|
|
#include <linux/compiler.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <linux/types.h>
|
2011-05-27 01:46:22 +08:00
|
|
|
#include <linux/mutex.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <linux/pm.h>
|
2011-07-27 07:09:06 +08:00
|
|
|
#include <linux/atomic.h>
|
2013-04-07 00:56:00 +08:00
|
|
|
#include <linux/uidgid.h>
|
2013-10-12 04:11:38 +08:00
|
|
|
#include <linux/gfp.h>
|
2018-05-09 13:29:52 +08:00
|
|
|
#include <linux/overflow.h>
|
2019-12-10 03:33:01 +08:00
|
|
|
#include <linux/device/bus.h>
|
2019-12-10 03:33:02 +08:00
|
|
|
#include <linux/device/class.h>
|
2019-12-10 03:33:03 +08:00
|
|
|
#include <linux/device/driver.h>
|
2006-11-11 14:18:39 +08:00
|
|
|
#include <asm/device.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
struct device;
|
2008-12-17 04:23:36 +08:00
|
|
|
struct device_private;
|
2005-04-17 06:20:36 +08:00
|
|
|
struct device_driver;
|
2007-11-29 07:59:15 +08:00
|
|
|
struct driver_private;
|
2011-05-27 01:46:22 +08:00
|
|
|
struct module;
|
2005-04-17 06:20:36 +08:00
|
|
|
struct class;
|
2010-11-16 06:13:18 +08:00
|
|
|
struct subsys_private;
|
2010-04-14 07:12:28 +08:00
|
|
|
struct device_node;
|
2015-03-17 06:49:03 +08:00
|
|
|
struct fwnode_handle;
|
2011-08-26 22:48:26 +08:00
|
|
|
struct iommu_ops;
|
2012-05-31 04:18:41 +08:00
|
|
|
struct iommu_group;
|
pinctrl: remove include file from <linux/device.h>
When pulling the recent pinctrl merge, I was surprised by how a
pinctrl-only pull request ended up rebuilding basically the whole
kernel.
The reason for that ended up being that <linux/device.h> included
<linux/pinctrl/devinfo.h>, so any change to that file ended up causing
pretty much every driver out there to be rebuilt.
The reason for that was because 'struct device' has this in it:
#ifdef CONFIG_PINCTRL
struct dev_pin_info *pins;
#endif
but we already avoid header includes for these kinds of things in that
header file, preferring to just use a forward-declaration of the
structure instead. Exactly to avoid this kind of header dependency.
Since some drivers seem to expect that <linux/pinctrl/devinfo.h> header
to come in automatically, move the include to <linux/pinctrl/pinctrl.h>
instead. It might be better to just make the includes more targeted,
but I'm not going to review every driver.
It would definitely be good to have a tool for finding and minimizing
header dependencies automatically - or at least help with them. Right
now we almost certainly end up having way too many of these things, and
it's hard to test every single configuration.
FWIW, you can get a sense of the "hotness" of a header file with something
like this after doing a full build:
find . -name '.*.o.cmd' -print0 |
xargs -0 tail --lines=+2 |
grep -v 'wildcard ' |
tr ' \\' '\n' |
sort | uniq -c | sort -n | less -S
which isn't exact (there are other things in those '*.o.cmd' than just
the dependencies, and the "--lines=+2" only removes the header), but
might a useful approximation.
With this patch, <linux/pinctrl/devinfo.h> drops to "only" having 833
users in the current x86-64 allmodconfig. In contrast, <linux/device.h>
has 14857 build files including it directly or indirectly.
Of course, the headers that absolutely _everybody_ includes (things like
<linux/types.h> etc) get a score of 23000+.
Cc: Linus Walleij <linus.walleij@linaro.org>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2018-02-03 08:44:14 +08:00
|
|
|
struct dev_pin_info;
|
2020-03-26 23:08:30 +08:00
|
|
|
struct dev_iommu;
|
Driver core: udev triggered device-<>driver binding
We get two per-bus sysfs files:
ls-l /sys/subsystem/usb
drwxr-xr-x 2 root root 0 2007-02-16 16:42 devices
drwxr-xr-x 7 root root 0 2007-02-16 14:55 drivers
-rw-r--r-- 1 root root 4096 2007-02-16 16:42 drivers_autoprobe
--w------- 1 root root 4096 2007-02-16 16:42 drivers_probe
The flag "drivers_autoprobe" controls the behavior of the bus to bind
devices by default, or just initialize the device and leave it alone.
The command "drivers_probe" accepts a bus_id and the bus tries to bind a
driver to this device.
Systems who want to control the driver binding with udev, switch off the
bus initiated probing:
echo 0 > /sys/subsystem/usb/drivers_autoprobe
echo 0 > /sys/subsystem/pcmcia/drivers_autoprobe
...
and initiate the probing with udev rules like:
ACTION=="add", SUBSYSTEM=="usb", ATTR{subsystem/drivers_probe}="$kernel"
ACTION=="add", SUBSYSTEM=="pcmcia", ATTR{subsystem/drivers_probe}="$kernel"
...
Custom driver binding can happen in earlier rules by something like:
ACTION=="add", SUBSYSTEM=="usb", \
ATTRS{idVendor}=="1234", ATTRS{idProduct}=="5678" \
ATTR{subsystem/drivers/<custom-driver>/bind}="$kernel"
This is intended to solve the modprobe.conf mess with "install-rules", custom
bind/unbind-scripts and all the weird things people invented over the years.
It should also provide the functionality "libusual" was supposed to do.
With udev, one can just write a udev rule to drive all USB-disks at the
third port of USB-hub by the "ub" driver, and everything else by
usb-storage. One can also instruct udev to bind different wireless
drivers to identical cards - just selected by the pcmcia slot-number, and
whatever ...
To use the mentioned rules, it needs udev version 106, to be able to
write ATTR{}="$kernel" to sysfs files.
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
2007-02-17 00:33:36 +08:00
|
|
|
|
2011-12-15 06:29:38 +08:00
|
|
|
/**
|
|
|
|
* struct subsys_interface - interfaces to device functions
|
2012-01-22 03:02:51 +08:00
|
|
|
* @name: name of the device function
|
2021-03-21 04:12:40 +08:00
|
|
|
* @subsys: subsystem of the devices to attach to
|
2012-01-22 03:02:51 +08:00
|
|
|
* @node: the list of functions registered at the subsystem
|
|
|
|
* @add_dev: device hookup to device function handler
|
|
|
|
* @remove_dev: device hookup to device function handler
|
2011-12-15 06:29:38 +08:00
|
|
|
*
|
|
|
|
* Simple interfaces attached to a subsystem. Multiple interfaces can
|
|
|
|
* attach to a subsystem and its devices. Unlike drivers, they do not
|
|
|
|
* exclusively claim or control devices. Interfaces usually represent
|
|
|
|
* a specific functionality of a subsystem/class of devices.
|
|
|
|
*/
|
|
|
|
struct subsys_interface {
|
|
|
|
const char *name;
|
|
|
|
struct bus_type *subsys;
|
|
|
|
struct list_head node;
|
|
|
|
int (*add_dev)(struct device *dev, struct subsys_interface *sif);
|
2015-07-30 17:34:01 +08:00
|
|
|
void (*remove_dev)(struct device *dev, struct subsys_interface *sif);
|
2011-12-15 06:29:38 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
int subsys_interface_register(struct subsys_interface *sif);
|
|
|
|
void subsys_interface_unregister(struct subsys_interface *sif);
|
|
|
|
|
|
|
|
int subsys_system_register(struct bus_type *subsys,
|
|
|
|
const struct attribute_group **groups);
|
2013-03-13 02:30:05 +08:00
|
|
|
int subsys_virtual_register(struct bus_type *subsys,
|
|
|
|
const struct attribute_group **groups);
|
2011-12-15 06:29:38 +08:00
|
|
|
|
2007-03-13 04:08:57 +08:00
|
|
|
/*
|
|
|
|
* The type of device, "struct device" is embedded in. A class
|
|
|
|
* or bus can contain devices of different types
|
|
|
|
* like "partitions" and "disks", "mouse" and "event".
|
|
|
|
* This identifies the device type and carries type-specific
|
|
|
|
* information, equivalent to the kobj_type of a kobject.
|
|
|
|
* If "name" is specified, the uevent will contain it in
|
|
|
|
* the DEVTYPE variable.
|
|
|
|
*/
|
2006-10-08 03:54:55 +08:00
|
|
|
struct device_type {
|
2007-03-13 04:08:57 +08:00
|
|
|
const char *name;
|
2009-06-25 01:06:31 +08:00
|
|
|
const struct attribute_group **groups;
|
2007-08-14 21:15:12 +08:00
|
|
|
int (*uevent)(struct device *dev, struct kobj_uevent_env *env);
|
2013-04-07 00:56:00 +08:00
|
|
|
char *(*devnode)(struct device *dev, umode_t *mode,
|
2013-04-12 02:43:29 +08:00
|
|
|
kuid_t *uid, kgid_t *gid);
|
2006-10-08 03:54:55 +08:00
|
|
|
void (*release)(struct device *dev);
|
Introduce new top level suspend and hibernation callbacks
Introduce 'struct pm_ops' and 'struct pm_ext_ops' ('ext' meaning
'extended') representing suspend and hibernation operations for bus
types, device classes, device types and device drivers.
Modify the PM core to use 'struct pm_ops' and 'struct pm_ext_ops'
objects, if defined, instead of the ->suspend(), ->resume(),
->suspend_late(), and ->resume_early() callbacks (the old callbacks
will be considered as legacy and gradually phased out).
The main purpose of doing this is to separate suspend (aka S2RAM and
standby) callbacks from hibernation callbacks in such a way that the
new callbacks won't take arguments and the semantics of each of them
will be clearly specified. This has been requested for multiple
times by many people, including Linus himself, and the reason is that
within the current scheme if ->resume() is called, for example, it's
difficult to say why it's been called (ie. is it a resume from RAM or
from hibernation or a suspend/hibernation failure etc.?).
The second purpose is to make the suspend/hibernation callbacks more
flexible so that device drivers can handle more than they can within
the current scheme. For example, some drivers may need to prevent
new children of the device from being registered before their
->suspend() callbacks are executed or they may want to carry out some
operations requiring the availability of some other devices, not
directly bound via the parent-child relationship, in order to prepare
for the execution of ->suspend(), etc.
Ultimately, we'd like to stop using the freezing of tasks for suspend
and therefore the drivers' suspend/hibernation code will have to take
care of the handling of the user space during suspend/hibernation.
That, in turn, would be difficult within the current scheme, without
the new ->prepare() and ->complete() callbacks.
Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
Acked-by: Pavel Machek <pavel@ucw.cz>
Signed-off-by: Jesse Barnes <jbarnes@virtuousgeek.org>
2008-05-21 05:00:01 +08:00
|
|
|
|
2009-07-25 13:11:32 +08:00
|
|
|
const struct dev_pm_ops *pm;
|
2006-10-08 03:54:55 +08:00
|
|
|
};
|
|
|
|
|
2005-10-01 20:49:43 +08:00
|
|
|
/* interface for exporting device attributes */
|
|
|
|
struct device_attribute {
|
|
|
|
struct attribute attr;
|
|
|
|
ssize_t (*show)(struct device *dev, struct device_attribute *attr,
|
|
|
|
char *buf);
|
|
|
|
ssize_t (*store)(struct device *dev, struct device_attribute *attr,
|
|
|
|
const char *buf, size_t count);
|
|
|
|
};
|
|
|
|
|
2011-12-15 06:29:38 +08:00
|
|
|
struct dev_ext_attribute {
|
|
|
|
struct device_attribute attr;
|
|
|
|
void *var;
|
|
|
|
};
|
|
|
|
|
|
|
|
ssize_t device_show_ulong(struct device *dev, struct device_attribute *attr,
|
|
|
|
char *buf);
|
|
|
|
ssize_t device_store_ulong(struct device *dev, struct device_attribute *attr,
|
|
|
|
const char *buf, size_t count);
|
|
|
|
ssize_t device_show_int(struct device *dev, struct device_attribute *attr,
|
|
|
|
char *buf);
|
|
|
|
ssize_t device_store_int(struct device *dev, struct device_attribute *attr,
|
|
|
|
const char *buf, size_t count);
|
2012-10-10 01:52:05 +08:00
|
|
|
ssize_t device_show_bool(struct device *dev, struct device_attribute *attr,
|
|
|
|
char *buf);
|
|
|
|
ssize_t device_store_bool(struct device *dev, struct device_attribute *attr,
|
|
|
|
const char *buf, size_t count);
|
2005-10-01 20:49:43 +08:00
|
|
|
|
2008-01-25 13:04:46 +08:00
|
|
|
#define DEVICE_ATTR(_name, _mode, _show, _store) \
|
2011-12-15 06:29:38 +08:00
|
|
|
struct device_attribute dev_attr_##_name = __ATTR(_name, _mode, _show, _store)
|
2017-12-18 18:08:29 +08:00
|
|
|
#define DEVICE_ATTR_PREALLOC(_name, _mode, _show, _store) \
|
|
|
|
struct device_attribute dev_attr_##_name = \
|
|
|
|
__ATTR_PREALLOC(_name, _mode, _show, _store)
|
2013-07-15 07:05:54 +08:00
|
|
|
#define DEVICE_ATTR_RW(_name) \
|
|
|
|
struct device_attribute dev_attr_##_name = __ATTR_RW(_name)
|
2020-06-26 07:51:03 +08:00
|
|
|
#define DEVICE_ATTR_ADMIN_RW(_name) \
|
|
|
|
struct device_attribute dev_attr_##_name = __ATTR_RW_MODE(_name, 0600)
|
2013-07-15 07:05:54 +08:00
|
|
|
#define DEVICE_ATTR_RO(_name) \
|
|
|
|
struct device_attribute dev_attr_##_name = __ATTR_RO(_name)
|
2020-06-26 07:51:03 +08:00
|
|
|
#define DEVICE_ATTR_ADMIN_RO(_name) \
|
|
|
|
struct device_attribute dev_attr_##_name = __ATTR_RO_MODE(_name, 0400)
|
2013-08-24 06:02:56 +08:00
|
|
|
#define DEVICE_ATTR_WO(_name) \
|
|
|
|
struct device_attribute dev_attr_##_name = __ATTR_WO(_name)
|
2011-12-15 06:29:38 +08:00
|
|
|
#define DEVICE_ULONG_ATTR(_name, _mode, _var) \
|
|
|
|
struct dev_ext_attribute dev_attr_##_name = \
|
|
|
|
{ __ATTR(_name, _mode, device_show_ulong, device_store_ulong), &(_var) }
|
|
|
|
#define DEVICE_INT_ATTR(_name, _mode, _var) \
|
|
|
|
struct dev_ext_attribute dev_attr_##_name = \
|
2012-05-04 07:19:02 +08:00
|
|
|
{ __ATTR(_name, _mode, device_show_int, device_store_int), &(_var) }
|
2012-10-10 01:52:05 +08:00
|
|
|
#define DEVICE_BOOL_ATTR(_name, _mode, _var) \
|
|
|
|
struct dev_ext_attribute dev_attr_##_name = \
|
|
|
|
{ __ATTR(_name, _mode, device_show_bool, device_store_bool), &(_var) }
|
2012-05-15 01:30:03 +08:00
|
|
|
#define DEVICE_ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store) \
|
|
|
|
struct device_attribute dev_attr_##_name = \
|
|
|
|
__ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store)
|
2005-10-01 20:49:43 +08:00
|
|
|
|
2020-06-29 14:50:05 +08:00
|
|
|
int device_create_file(struct device *device,
|
|
|
|
const struct device_attribute *entry);
|
|
|
|
void device_remove_file(struct device *dev,
|
|
|
|
const struct device_attribute *attr);
|
|
|
|
bool device_remove_file_self(struct device *dev,
|
|
|
|
const struct device_attribute *attr);
|
|
|
|
int __must_check device_create_bin_file(struct device *dev,
|
2009-12-18 21:34:20 +08:00
|
|
|
const struct bin_attribute *attr);
|
2020-06-29 14:50:05 +08:00
|
|
|
void device_remove_bin_file(struct device *dev,
|
|
|
|
const struct bin_attribute *attr);
|
devres: device resource management
Implement device resource management, in short, devres. A device
driver can allocate arbirary size of devres data which is associated
with a release function. On driver detach, release function is
invoked on the devres data, then, devres data is freed.
devreses are typed by associated release functions. Some devreses are
better represented by single instance of the type while others need
multiple instances sharing the same release function. Both usages are
supported.
devreses can be grouped using devres group such that a device driver
can easily release acquired resources halfway through initialization
or selectively release resources (e.g. resources for port 1 out of 4
ports).
This patch adds devres core including documentation and the following
managed interfaces.
* alloc/free : devm_kzalloc(), devm_kzfree()
* IO region : devm_request_region(), devm_release_region()
* IRQ : devm_request_irq(), devm_free_irq()
* DMA : dmam_alloc_coherent(), dmam_free_coherent(),
dmam_declare_coherent_memory(), dmam_pool_create(),
dmam_pool_destroy()
* PCI : pcim_enable_device(), pcim_pin_device(), pci_is_managed()
* iomap : devm_ioport_map(), devm_ioport_unmap(), devm_ioremap(),
devm_ioremap_nocache(), devm_iounmap(), pcim_iomap_table(),
pcim_iomap(), pcim_iounmap()
Signed-off-by: Tejun Heo <htejun@gmail.com>
Signed-off-by: Jeff Garzik <jeff@garzik.org>
2007-01-20 15:00:26 +08:00
|
|
|
|
|
|
|
/* device resource management */
|
|
|
|
typedef void (*dr_release_t)(struct device *dev, void *res);
|
|
|
|
typedef int (*dr_match_t)(struct device *dev, void *res, void *match_data);
|
|
|
|
|
2020-06-29 14:50:05 +08:00
|
|
|
void *__devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp,
|
|
|
|
int nid, const char *name) __malloc;
|
devres: device resource management
Implement device resource management, in short, devres. A device
driver can allocate arbirary size of devres data which is associated
with a release function. On driver detach, release function is
invoked on the devres data, then, devres data is freed.
devreses are typed by associated release functions. Some devreses are
better represented by single instance of the type while others need
multiple instances sharing the same release function. Both usages are
supported.
devreses can be grouped using devres group such that a device driver
can easily release acquired resources halfway through initialization
or selectively release resources (e.g. resources for port 1 out of 4
ports).
This patch adds devres core including documentation and the following
managed interfaces.
* alloc/free : devm_kzalloc(), devm_kzfree()
* IO region : devm_request_region(), devm_release_region()
* IRQ : devm_request_irq(), devm_free_irq()
* DMA : dmam_alloc_coherent(), dmam_free_coherent(),
dmam_declare_coherent_memory(), dmam_pool_create(),
dmam_pool_destroy()
* PCI : pcim_enable_device(), pcim_pin_device(), pci_is_managed()
* iomap : devm_ioport_map(), devm_ioport_unmap(), devm_ioremap(),
devm_ioremap_nocache(), devm_iounmap(), pcim_iomap_table(),
pcim_iomap(), pcim_iounmap()
Signed-off-by: Tejun Heo <htejun@gmail.com>
Signed-off-by: Jeff Garzik <jeff@garzik.org>
2007-01-20 15:00:26 +08:00
|
|
|
#define devres_alloc(release, size, gfp) \
|
2015-10-06 08:35:55 +08:00
|
|
|
__devres_alloc_node(release, size, gfp, NUMA_NO_NODE, #release)
|
|
|
|
#define devres_alloc_node(release, size, gfp, nid) \
|
|
|
|
__devres_alloc_node(release, size, gfp, nid, #release)
|
|
|
|
|
2020-06-29 14:50:05 +08:00
|
|
|
void devres_for_each_res(struct device *dev, dr_release_t release,
|
|
|
|
dr_match_t match, void *match_data,
|
|
|
|
void (*fn)(struct device *, void *, void *),
|
|
|
|
void *data);
|
|
|
|
void devres_free(void *res);
|
|
|
|
void devres_add(struct device *dev, void *res);
|
|
|
|
void *devres_find(struct device *dev, dr_release_t release,
|
|
|
|
dr_match_t match, void *match_data);
|
|
|
|
void *devres_get(struct device *dev, void *new_res,
|
|
|
|
dr_match_t match, void *match_data);
|
|
|
|
void *devres_remove(struct device *dev, dr_release_t release,
|
|
|
|
dr_match_t match, void *match_data);
|
|
|
|
int devres_destroy(struct device *dev, dr_release_t release,
|
|
|
|
dr_match_t match, void *match_data);
|
|
|
|
int devres_release(struct device *dev, dr_release_t release,
|
|
|
|
dr_match_t match, void *match_data);
|
devres: device resource management
Implement device resource management, in short, devres. A device
driver can allocate arbirary size of devres data which is associated
with a release function. On driver detach, release function is
invoked on the devres data, then, devres data is freed.
devreses are typed by associated release functions. Some devreses are
better represented by single instance of the type while others need
multiple instances sharing the same release function. Both usages are
supported.
devreses can be grouped using devres group such that a device driver
can easily release acquired resources halfway through initialization
or selectively release resources (e.g. resources for port 1 out of 4
ports).
This patch adds devres core including documentation and the following
managed interfaces.
* alloc/free : devm_kzalloc(), devm_kzfree()
* IO region : devm_request_region(), devm_release_region()
* IRQ : devm_request_irq(), devm_free_irq()
* DMA : dmam_alloc_coherent(), dmam_free_coherent(),
dmam_declare_coherent_memory(), dmam_pool_create(),
dmam_pool_destroy()
* PCI : pcim_enable_device(), pcim_pin_device(), pci_is_managed()
* iomap : devm_ioport_map(), devm_ioport_unmap(), devm_ioremap(),
devm_ioremap_nocache(), devm_iounmap(), pcim_iomap_table(),
pcim_iomap(), pcim_iounmap()
Signed-off-by: Tejun Heo <htejun@gmail.com>
Signed-off-by: Jeff Garzik <jeff@garzik.org>
2007-01-20 15:00:26 +08:00
|
|
|
|
|
|
|
/* devres group */
|
2020-06-29 14:50:05 +08:00
|
|
|
void * __must_check devres_open_group(struct device *dev, void *id, gfp_t gfp);
|
|
|
|
void devres_close_group(struct device *dev, void *id);
|
|
|
|
void devres_remove_group(struct device *dev, void *id);
|
|
|
|
int devres_release_group(struct device *dev, void *id);
|
devres: device resource management
Implement device resource management, in short, devres. A device
driver can allocate arbirary size of devres data which is associated
with a release function. On driver detach, release function is
invoked on the devres data, then, devres data is freed.
devreses are typed by associated release functions. Some devreses are
better represented by single instance of the type while others need
multiple instances sharing the same release function. Both usages are
supported.
devreses can be grouped using devres group such that a device driver
can easily release acquired resources halfway through initialization
or selectively release resources (e.g. resources for port 1 out of 4
ports).
This patch adds devres core including documentation and the following
managed interfaces.
* alloc/free : devm_kzalloc(), devm_kzfree()
* IO region : devm_request_region(), devm_release_region()
* IRQ : devm_request_irq(), devm_free_irq()
* DMA : dmam_alloc_coherent(), dmam_free_coherent(),
dmam_declare_coherent_memory(), dmam_pool_create(),
dmam_pool_destroy()
* PCI : pcim_enable_device(), pcim_pin_device(), pci_is_managed()
* iomap : devm_ioport_map(), devm_ioport_unmap(), devm_ioremap(),
devm_ioremap_nocache(), devm_iounmap(), pcim_iomap_table(),
pcim_iomap(), pcim_iounmap()
Signed-off-by: Tejun Heo <htejun@gmail.com>
Signed-off-by: Jeff Garzik <jeff@garzik.org>
2007-01-20 15:00:26 +08:00
|
|
|
|
2013-10-12 04:11:38 +08:00
|
|
|
/* managed devm_k.alloc/kfree for device drivers */
|
2020-06-29 14:50:05 +08:00
|
|
|
void *devm_kmalloc(struct device *dev, size_t size, gfp_t gfp) __malloc;
|
2020-08-25 01:38:57 +08:00
|
|
|
void *devm_krealloc(struct device *dev, void *ptr, size_t size,
|
|
|
|
gfp_t gfp) __must_check;
|
2020-06-29 14:50:05 +08:00
|
|
|
__printf(3, 0) char *devm_kvasprintf(struct device *dev, gfp_t gfp,
|
|
|
|
const char *fmt, va_list ap) __malloc;
|
|
|
|
__printf(3, 4) char *devm_kasprintf(struct device *dev, gfp_t gfp,
|
|
|
|
const char *fmt, ...) __malloc;
|
2013-10-12 04:11:38 +08:00
|
|
|
static inline void *devm_kzalloc(struct device *dev, size_t size, gfp_t gfp)
|
|
|
|
{
|
|
|
|
return devm_kmalloc(dev, size, gfp | __GFP_ZERO);
|
|
|
|
}
|
|
|
|
static inline void *devm_kmalloc_array(struct device *dev,
|
|
|
|
size_t n, size_t size, gfp_t flags)
|
|
|
|
{
|
2018-05-09 13:29:52 +08:00
|
|
|
size_t bytes;
|
|
|
|
|
|
|
|
if (unlikely(check_mul_overflow(n, size, &bytes)))
|
2013-10-12 04:11:38 +08:00
|
|
|
return NULL;
|
2018-05-09 13:29:52 +08:00
|
|
|
|
|
|
|
return devm_kmalloc(dev, bytes, flags);
|
2013-10-12 04:11:38 +08:00
|
|
|
}
|
|
|
|
static inline void *devm_kcalloc(struct device *dev,
|
|
|
|
size_t n, size_t size, gfp_t flags)
|
|
|
|
{
|
|
|
|
return devm_kmalloc_array(dev, n, size, flags | __GFP_ZERO);
|
|
|
|
}
|
2020-06-29 14:50:05 +08:00
|
|
|
void devm_kfree(struct device *dev, const void *p);
|
|
|
|
char *devm_kstrdup(struct device *dev, const char *s, gfp_t gfp) __malloc;
|
|
|
|
const char *devm_kstrdup_const(struct device *dev, const char *s, gfp_t gfp);
|
|
|
|
void *devm_kmemdup(struct device *dev, const void *src, size_t len, gfp_t gfp);
|
devres: device resource management
Implement device resource management, in short, devres. A device
driver can allocate arbirary size of devres data which is associated
with a release function. On driver detach, release function is
invoked on the devres data, then, devres data is freed.
devreses are typed by associated release functions. Some devreses are
better represented by single instance of the type while others need
multiple instances sharing the same release function. Both usages are
supported.
devreses can be grouped using devres group such that a device driver
can easily release acquired resources halfway through initialization
or selectively release resources (e.g. resources for port 1 out of 4
ports).
This patch adds devres core including documentation and the following
managed interfaces.
* alloc/free : devm_kzalloc(), devm_kzfree()
* IO region : devm_request_region(), devm_release_region()
* IRQ : devm_request_irq(), devm_free_irq()
* DMA : dmam_alloc_coherent(), dmam_free_coherent(),
dmam_declare_coherent_memory(), dmam_pool_create(),
dmam_pool_destroy()
* PCI : pcim_enable_device(), pcim_pin_device(), pci_is_managed()
* iomap : devm_ioport_map(), devm_ioport_unmap(), devm_ioremap(),
devm_ioremap_nocache(), devm_iounmap(), pcim_iomap_table(),
pcim_iomap(), pcim_iounmap()
Signed-off-by: Tejun Heo <htejun@gmail.com>
Signed-off-by: Jeff Garzik <jeff@garzik.org>
2007-01-20 15:00:26 +08:00
|
|
|
|
2020-06-29 14:50:05 +08:00
|
|
|
unsigned long devm_get_free_pages(struct device *dev,
|
|
|
|
gfp_t gfp_mask, unsigned int order);
|
|
|
|
void devm_free_pages(struct device *dev, unsigned long addr);
|
devres: device resource management
Implement device resource management, in short, devres. A device
driver can allocate arbirary size of devres data which is associated
with a release function. On driver detach, release function is
invoked on the devres data, then, devres data is freed.
devreses are typed by associated release functions. Some devreses are
better represented by single instance of the type while others need
multiple instances sharing the same release function. Both usages are
supported.
devreses can be grouped using devres group such that a device driver
can easily release acquired resources halfway through initialization
or selectively release resources (e.g. resources for port 1 out of 4
ports).
This patch adds devres core including documentation and the following
managed interfaces.
* alloc/free : devm_kzalloc(), devm_kzfree()
* IO region : devm_request_region(), devm_release_region()
* IRQ : devm_request_irq(), devm_free_irq()
* DMA : dmam_alloc_coherent(), dmam_free_coherent(),
dmam_declare_coherent_memory(), dmam_pool_create(),
dmam_pool_destroy()
* PCI : pcim_enable_device(), pcim_pin_device(), pci_is_managed()
* iomap : devm_ioport_map(), devm_ioport_unmap(), devm_ioremap(),
devm_ioremap_nocache(), devm_iounmap(), pcim_iomap_table(),
pcim_iomap(), pcim_iounmap()
Signed-off-by: Tejun Heo <htejun@gmail.com>
Signed-off-by: Jeff Garzik <jeff@garzik.org>
2007-01-20 15:00:26 +08:00
|
|
|
|
2019-07-05 06:14:45 +08:00
|
|
|
void __iomem *devm_ioremap_resource(struct device *dev,
|
|
|
|
const struct resource *res);
|
2019-10-22 16:43:13 +08:00
|
|
|
void __iomem *devm_ioremap_resource_wc(struct device *dev,
|
|
|
|
const struct resource *res);
|
2011-10-25 21:16:47 +08:00
|
|
|
|
2018-06-05 11:21:26 +08:00
|
|
|
void __iomem *devm_of_iomap(struct device *dev,
|
|
|
|
struct device_node *node, int index,
|
|
|
|
resource_size_t *size);
|
|
|
|
|
2013-02-24 05:11:14 +08:00
|
|
|
/* allows to add/remove a custom action to devres stack */
|
|
|
|
int devm_add_action(struct device *dev, void (*action)(void *), void *data);
|
|
|
|
void devm_remove_action(struct device *dev, void (*action)(void *), void *data);
|
2019-06-14 06:56:18 +08:00
|
|
|
void devm_release_action(struct device *dev, void (*action)(void *), void *data);
|
2013-02-24 05:11:14 +08:00
|
|
|
|
2015-12-23 20:27:19 +08:00
|
|
|
static inline int devm_add_action_or_reset(struct device *dev,
|
|
|
|
void (*action)(void *), void *data)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = devm_add_action(dev, action, data);
|
|
|
|
if (ret)
|
|
|
|
action(data);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2016-11-15 16:41:01 +08:00
|
|
|
/**
|
|
|
|
* devm_alloc_percpu - Resource-managed alloc_percpu
|
|
|
|
* @dev: Device to allocate per-cpu memory for
|
|
|
|
* @type: Type to allocate per-cpu memory for
|
|
|
|
*
|
|
|
|
* Managed alloc_percpu. Per-cpu memory allocated with this function is
|
|
|
|
* automatically freed on driver detach.
|
|
|
|
*
|
|
|
|
* RETURNS:
|
|
|
|
* Pointer to allocated memory on success, NULL on failure.
|
|
|
|
*/
|
|
|
|
#define devm_alloc_percpu(dev, type) \
|
|
|
|
((typeof(type) __percpu *)__devm_alloc_percpu((dev), sizeof(type), \
|
|
|
|
__alignof__(type)))
|
|
|
|
|
|
|
|
void __percpu *__devm_alloc_percpu(struct device *dev, size_t size,
|
|
|
|
size_t align);
|
|
|
|
void devm_free_percpu(struct device *dev, void __percpu *pdata);
|
|
|
|
|
2008-02-05 14:27:55 +08:00
|
|
|
struct device_dma_parameters {
|
|
|
|
/*
|
|
|
|
* a low level driver may set these to teach IOMMU code about
|
|
|
|
* sg limitations.
|
|
|
|
*/
|
|
|
|
unsigned int max_segment_size;
|
2021-02-02 02:30:15 +08:00
|
|
|
unsigned int min_align_mask;
|
2008-02-05 14:27:55 +08:00
|
|
|
unsigned long segment_boundary_mask;
|
|
|
|
};
|
|
|
|
|
driver core: Functional dependencies tracking support
Currently, there is a problem with taking functional dependencies
between devices into account.
What I mean by a "functional dependency" is when the driver of device
B needs device A to be functional and (generally) its driver to be
present in order to work properly. This has certain consequences
for power management (suspend/resume and runtime PM ordering) and
shutdown ordering of these devices. In general, it also implies that
the driver of A needs to be working for B to be probed successfully
and it cannot be unbound from the device before the B's driver.
Support for representing those functional dependencies between
devices is added here to allow the driver core to track them and act
on them in certain cases where applicable.
The argument for doing that in the driver core is that there are
quite a few distinct use cases involving device dependencies, they
are relatively hard to get right in a driver (if one wants to
address all of them properly) and it only gets worse if multiplied
by the number of drivers potentially needing to do it. Morever, at
least one case (asynchronous system suspend/resume) cannot be handled
in a single driver at all, because it requires the driver of A to
wait for B to suspend (during system suspend) and the driver of B to
wait for A to resume (during system resume).
For this reason, represent dependencies between devices as "links",
with the help of struct device_link objects each containing pointers
to the "linked" devices, a list node for each of them, status
information, flags, and an RCU head for synchronization.
Also add two new list heads, representing the lists of links to the
devices that depend on the given one (consumers) and to the devices
depended on by it (suppliers), and a "driver presence status" field
(needed for figuring out initial states of device links) to struct
device.
The entire data structure consisting of all of the lists of link
objects for all devices is protected by a mutex (for link object
addition/removal and for list walks during device driver probing
and removal) and by SRCU (for list walking in other case that will
be introduced by subsequent change sets). If CONFIG_SRCU is not
selected, however, an rwsem is used for protecting the entire data
structure.
In addition, each link object has an internal status field whose
value reflects whether or not drivers are bound to the devices
pointed to by the link or probing/removal of their drivers is in
progress etc. That field is only modified under the device links
mutex, but it may be read outside of it in some cases (introduced by
subsequent change sets), so modifications of it are annotated with
WRITE_ONCE().
New links are added by calling device_link_add() which takes three
arguments: pointers to the devices in question and flags. In
particular, if DL_FLAG_STATELESS is set in the flags, the link status
is not to be taken into account for this link and the driver core
will not manage it. In turn, if DL_FLAG_AUTOREMOVE is set in the
flags, the driver core will remove the link automatically when the
consumer device driver unbinds from it.
One of the actions carried out by device_link_add() is to reorder
the lists used for device shutdown and system suspend/resume to
put the consumer device along with all of its children and all of
its consumers (and so on, recursively) to the ends of those lists
in order to ensure the right ordering between all of the supplier
and consumer devices.
For this reason, it is not possible to create a link between two
devices if the would-be supplier device already depends on the
would-be consumer device as either a direct descendant of it or a
consumer of one of its direct descendants or one of its consumers
and so on.
There are two types of link objects, persistent and non-persistent.
The persistent ones stay around until one of the target devices is
deleted, while the non-persistent ones are removed automatically when
the consumer driver unbinds from its device (ie. they are assumed to
be valid only as long as the consumer device has a driver bound to
it). Persistent links are created by default and non-persistent
links are created when the DL_FLAG_AUTOREMOVE flag is passed
to device_link_add().
Both persistent and non-persistent device links can be deleted
with an explicit call to device_link_del().
Links created without the DL_FLAG_STATELESS flag set are managed
by the driver core using a simple state machine. There are 5 states
each link can be in: DORMANT (unused), AVAILABLE (the supplier driver
is present and functional), CONSUMER_PROBE (the consumer driver is
probing), ACTIVE (both supplier and consumer drivers are present and
functional), and SUPPLIER_UNBIND (the supplier driver is unbinding).
The driver core updates the link state automatically depending on
what happens to the linked devices and for each link state specific
actions are taken in addition to that.
For example, if the supplier driver unbinds from its device, the
driver core will also unbind the drivers of all of its consumers
automatically under the assumption that they cannot function
properly without the supplier. Analogously, the driver core will
only allow the consumer driver to bind to its device if the
supplier driver is present and functional (ie. the link is in
the AVAILABLE state). If that's not the case, it will rely on
the existing deferred probing mechanism to wait for the supplier
driver to become available.
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2016-10-31 00:32:16 +08:00
|
|
|
/**
|
|
|
|
* enum device_link_state - Device link states.
|
|
|
|
* @DL_STATE_NONE: The presence of the drivers is not being tracked.
|
|
|
|
* @DL_STATE_DORMANT: None of the supplier/consumer drivers is present.
|
|
|
|
* @DL_STATE_AVAILABLE: The supplier driver is present, but the consumer is not.
|
|
|
|
* @DL_STATE_CONSUMER_PROBE: The consumer is probing (supplier driver present).
|
|
|
|
* @DL_STATE_ACTIVE: Both the supplier and consumer drivers are present.
|
|
|
|
* @DL_STATE_SUPPLIER_UNBIND: The supplier driver is unbinding.
|
|
|
|
*/
|
|
|
|
enum device_link_state {
|
|
|
|
DL_STATE_NONE = -1,
|
|
|
|
DL_STATE_DORMANT = 0,
|
|
|
|
DL_STATE_AVAILABLE,
|
|
|
|
DL_STATE_CONSUMER_PROBE,
|
|
|
|
DL_STATE_ACTIVE,
|
|
|
|
DL_STATE_SUPPLIER_UNBIND,
|
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Device link flags.
|
|
|
|
*
|
driver core: Remove device link creation limitation
If device_link_add() is called for a consumer/supplier pair with an
existing device link between them and the existing link's type is
not in agreement with the flags passed to that function by its
caller, NULL will be returned. That is seriously inconvenient,
because it forces the callers of device_link_add() to worry about
what others may or may not do even if that is not relevant to them
for any other reasons.
It turns out, however, that this limitation can be made go away
relatively easily.
The underlying observation is that if DL_FLAG_STATELESS has been
passed to device_link_add() in flags for the given consumer/supplier
pair at least once, calling either device_link_del() or
device_link_remove() to release the link returned by it should work,
but there are no other requirements associated with that flag. In
turn, if at least one of the callers of device_link_add() for the
given consumer/supplier pair has not passed DL_FLAG_STATELESS to it
in flags, the driver core should track the status of the link and act
on it as appropriate (ie. the link should be treated as "managed").
This means that DL_FLAG_STATELESS needs to be set for managed device
links and it should be valid to call device_link_del() or
device_link_remove() to drop references to them in certain
sutiations.
To allow that to happen, introduce a new (internal) device link flag
called DL_FLAG_MANAGED and make device_link_add() set it automatically
whenever DL_FLAG_STATELESS is not passed to it. Also make it take
additional references to existing device links that were previously
stateless (that is, with DL_FLAG_STATELESS set and DL_FLAG_MANAGED
unset) and will need to be managed going forward and initialize
their status (which has been DL_STATE_NONE so far).
Accordingly, when a managed device link is dropped automatically
by the driver core, make it clear DL_FLAG_MANAGED, reset the link's
status back to DL_STATE_NONE and drop the reference to it associated
with DL_FLAG_MANAGED instead of just deleting it right away (to
allow it to stay around in case it still needs to be released
explicitly by someone).
With that, since setting DL_FLAG_STATELESS doesn't mean that the
device link in question is not managed any more, replace all of the
status-tracking checks against DL_FLAG_STATELESS with analogous
checks against DL_FLAG_MANAGED and update the documentation to
reflect these changes.
While at it, make device_link_add() reject flags that it does not
recognize, including DL_FLAG_MANAGED.
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Reviewed-by: Saravana Kannan <saravanak@google.com>
Tested-by: Marek Szyprowski <m.szyprowski@samsung.com>
Review-by: Saravana Kannan <saravanak@google.com>
Link: https://lore.kernel.org/r/2305283.AStDPdUUnE@kreacher
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2019-07-16 23:21:06 +08:00
|
|
|
* STATELESS: The core will not remove this link automatically.
|
2018-06-27 20:50:55 +08:00
|
|
|
* AUTOREMOVE_CONSUMER: Remove the link automatically on consumer driver unbind.
|
2016-10-31 00:32:31 +08:00
|
|
|
* PM_RUNTIME: If set, the runtime PM framework will use this link.
|
|
|
|
* RPM_ACTIVE: Run pm_runtime_get_sync() on the supplier during link creation.
|
2018-06-27 20:50:56 +08:00
|
|
|
* AUTOREMOVE_SUPPLIER: Remove the link automatically on supplier driver unbind.
|
2019-02-01 08:59:42 +08:00
|
|
|
* AUTOPROBE_CONSUMER: Probe consumer driver automatically after supplier binds.
|
driver core: Remove device link creation limitation
If device_link_add() is called for a consumer/supplier pair with an
existing device link between them and the existing link's type is
not in agreement with the flags passed to that function by its
caller, NULL will be returned. That is seriously inconvenient,
because it forces the callers of device_link_add() to worry about
what others may or may not do even if that is not relevant to them
for any other reasons.
It turns out, however, that this limitation can be made go away
relatively easily.
The underlying observation is that if DL_FLAG_STATELESS has been
passed to device_link_add() in flags for the given consumer/supplier
pair at least once, calling either device_link_del() or
device_link_remove() to release the link returned by it should work,
but there are no other requirements associated with that flag. In
turn, if at least one of the callers of device_link_add() for the
given consumer/supplier pair has not passed DL_FLAG_STATELESS to it
in flags, the driver core should track the status of the link and act
on it as appropriate (ie. the link should be treated as "managed").
This means that DL_FLAG_STATELESS needs to be set for managed device
links and it should be valid to call device_link_del() or
device_link_remove() to drop references to them in certain
sutiations.
To allow that to happen, introduce a new (internal) device link flag
called DL_FLAG_MANAGED and make device_link_add() set it automatically
whenever DL_FLAG_STATELESS is not passed to it. Also make it take
additional references to existing device links that were previously
stateless (that is, with DL_FLAG_STATELESS set and DL_FLAG_MANAGED
unset) and will need to be managed going forward and initialize
their status (which has been DL_STATE_NONE so far).
Accordingly, when a managed device link is dropped automatically
by the driver core, make it clear DL_FLAG_MANAGED, reset the link's
status back to DL_STATE_NONE and drop the reference to it associated
with DL_FLAG_MANAGED instead of just deleting it right away (to
allow it to stay around in case it still needs to be released
explicitly by someone).
With that, since setting DL_FLAG_STATELESS doesn't mean that the
device link in question is not managed any more, replace all of the
status-tracking checks against DL_FLAG_STATELESS with analogous
checks against DL_FLAG_MANAGED and update the documentation to
reflect these changes.
While at it, make device_link_add() reject flags that it does not
recognize, including DL_FLAG_MANAGED.
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Reviewed-by: Saravana Kannan <saravanak@google.com>
Tested-by: Marek Szyprowski <m.szyprowski@samsung.com>
Review-by: Saravana Kannan <saravanak@google.com>
Link: https://lore.kernel.org/r/2305283.AStDPdUUnE@kreacher
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2019-07-16 23:21:06 +08:00
|
|
|
* MANAGED: The core tracks presence of supplier/consumer drivers (internal).
|
2019-10-29 06:00:22 +08:00
|
|
|
* SYNC_STATE_ONLY: Link only affects sync_state() behavior.
|
2020-12-18 11:17:00 +08:00
|
|
|
* INFERRED: Inferred from data (eg: firmware) and not from driver actions.
|
driver core: Functional dependencies tracking support
Currently, there is a problem with taking functional dependencies
between devices into account.
What I mean by a "functional dependency" is when the driver of device
B needs device A to be functional and (generally) its driver to be
present in order to work properly. This has certain consequences
for power management (suspend/resume and runtime PM ordering) and
shutdown ordering of these devices. In general, it also implies that
the driver of A needs to be working for B to be probed successfully
and it cannot be unbound from the device before the B's driver.
Support for representing those functional dependencies between
devices is added here to allow the driver core to track them and act
on them in certain cases where applicable.
The argument for doing that in the driver core is that there are
quite a few distinct use cases involving device dependencies, they
are relatively hard to get right in a driver (if one wants to
address all of them properly) and it only gets worse if multiplied
by the number of drivers potentially needing to do it. Morever, at
least one case (asynchronous system suspend/resume) cannot be handled
in a single driver at all, because it requires the driver of A to
wait for B to suspend (during system suspend) and the driver of B to
wait for A to resume (during system resume).
For this reason, represent dependencies between devices as "links",
with the help of struct device_link objects each containing pointers
to the "linked" devices, a list node for each of them, status
information, flags, and an RCU head for synchronization.
Also add two new list heads, representing the lists of links to the
devices that depend on the given one (consumers) and to the devices
depended on by it (suppliers), and a "driver presence status" field
(needed for figuring out initial states of device links) to struct
device.
The entire data structure consisting of all of the lists of link
objects for all devices is protected by a mutex (for link object
addition/removal and for list walks during device driver probing
and removal) and by SRCU (for list walking in other case that will
be introduced by subsequent change sets). If CONFIG_SRCU is not
selected, however, an rwsem is used for protecting the entire data
structure.
In addition, each link object has an internal status field whose
value reflects whether or not drivers are bound to the devices
pointed to by the link or probing/removal of their drivers is in
progress etc. That field is only modified under the device links
mutex, but it may be read outside of it in some cases (introduced by
subsequent change sets), so modifications of it are annotated with
WRITE_ONCE().
New links are added by calling device_link_add() which takes three
arguments: pointers to the devices in question and flags. In
particular, if DL_FLAG_STATELESS is set in the flags, the link status
is not to be taken into account for this link and the driver core
will not manage it. In turn, if DL_FLAG_AUTOREMOVE is set in the
flags, the driver core will remove the link automatically when the
consumer device driver unbinds from it.
One of the actions carried out by device_link_add() is to reorder
the lists used for device shutdown and system suspend/resume to
put the consumer device along with all of its children and all of
its consumers (and so on, recursively) to the ends of those lists
in order to ensure the right ordering between all of the supplier
and consumer devices.
For this reason, it is not possible to create a link between two
devices if the would-be supplier device already depends on the
would-be consumer device as either a direct descendant of it or a
consumer of one of its direct descendants or one of its consumers
and so on.
There are two types of link objects, persistent and non-persistent.
The persistent ones stay around until one of the target devices is
deleted, while the non-persistent ones are removed automatically when
the consumer driver unbinds from its device (ie. they are assumed to
be valid only as long as the consumer device has a driver bound to
it). Persistent links are created by default and non-persistent
links are created when the DL_FLAG_AUTOREMOVE flag is passed
to device_link_add().
Both persistent and non-persistent device links can be deleted
with an explicit call to device_link_del().
Links created without the DL_FLAG_STATELESS flag set are managed
by the driver core using a simple state machine. There are 5 states
each link can be in: DORMANT (unused), AVAILABLE (the supplier driver
is present and functional), CONSUMER_PROBE (the consumer driver is
probing), ACTIVE (both supplier and consumer drivers are present and
functional), and SUPPLIER_UNBIND (the supplier driver is unbinding).
The driver core updates the link state automatically depending on
what happens to the linked devices and for each link state specific
actions are taken in addition to that.
For example, if the supplier driver unbinds from its device, the
driver core will also unbind the drivers of all of its consumers
automatically under the assumption that they cannot function
properly without the supplier. Analogously, the driver core will
only allow the consumer driver to bind to its device if the
supplier driver is present and functional (ie. the link is in
the AVAILABLE state). If that's not the case, it will rely on
the existing deferred probing mechanism to wait for the supplier
driver to become available.
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2016-10-31 00:32:16 +08:00
|
|
|
*/
|
2018-06-27 20:50:55 +08:00
|
|
|
#define DL_FLAG_STATELESS BIT(0)
|
|
|
|
#define DL_FLAG_AUTOREMOVE_CONSUMER BIT(1)
|
|
|
|
#define DL_FLAG_PM_RUNTIME BIT(2)
|
|
|
|
#define DL_FLAG_RPM_ACTIVE BIT(3)
|
2018-06-27 20:50:56 +08:00
|
|
|
#define DL_FLAG_AUTOREMOVE_SUPPLIER BIT(4)
|
2019-02-01 08:59:42 +08:00
|
|
|
#define DL_FLAG_AUTOPROBE_CONSUMER BIT(5)
|
driver core: Remove device link creation limitation
If device_link_add() is called for a consumer/supplier pair with an
existing device link between them and the existing link's type is
not in agreement with the flags passed to that function by its
caller, NULL will be returned. That is seriously inconvenient,
because it forces the callers of device_link_add() to worry about
what others may or may not do even if that is not relevant to them
for any other reasons.
It turns out, however, that this limitation can be made go away
relatively easily.
The underlying observation is that if DL_FLAG_STATELESS has been
passed to device_link_add() in flags for the given consumer/supplier
pair at least once, calling either device_link_del() or
device_link_remove() to release the link returned by it should work,
but there are no other requirements associated with that flag. In
turn, if at least one of the callers of device_link_add() for the
given consumer/supplier pair has not passed DL_FLAG_STATELESS to it
in flags, the driver core should track the status of the link and act
on it as appropriate (ie. the link should be treated as "managed").
This means that DL_FLAG_STATELESS needs to be set for managed device
links and it should be valid to call device_link_del() or
device_link_remove() to drop references to them in certain
sutiations.
To allow that to happen, introduce a new (internal) device link flag
called DL_FLAG_MANAGED and make device_link_add() set it automatically
whenever DL_FLAG_STATELESS is not passed to it. Also make it take
additional references to existing device links that were previously
stateless (that is, with DL_FLAG_STATELESS set and DL_FLAG_MANAGED
unset) and will need to be managed going forward and initialize
their status (which has been DL_STATE_NONE so far).
Accordingly, when a managed device link is dropped automatically
by the driver core, make it clear DL_FLAG_MANAGED, reset the link's
status back to DL_STATE_NONE and drop the reference to it associated
with DL_FLAG_MANAGED instead of just deleting it right away (to
allow it to stay around in case it still needs to be released
explicitly by someone).
With that, since setting DL_FLAG_STATELESS doesn't mean that the
device link in question is not managed any more, replace all of the
status-tracking checks against DL_FLAG_STATELESS with analogous
checks against DL_FLAG_MANAGED and update the documentation to
reflect these changes.
While at it, make device_link_add() reject flags that it does not
recognize, including DL_FLAG_MANAGED.
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Reviewed-by: Saravana Kannan <saravanak@google.com>
Tested-by: Marek Szyprowski <m.szyprowski@samsung.com>
Review-by: Saravana Kannan <saravanak@google.com>
Link: https://lore.kernel.org/r/2305283.AStDPdUUnE@kreacher
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2019-07-16 23:21:06 +08:00
|
|
|
#define DL_FLAG_MANAGED BIT(6)
|
2019-10-29 06:00:22 +08:00
|
|
|
#define DL_FLAG_SYNC_STATE_ONLY BIT(7)
|
2020-12-18 11:17:00 +08:00
|
|
|
#define DL_FLAG_INFERRED BIT(8)
|
driver core: Functional dependencies tracking support
Currently, there is a problem with taking functional dependencies
between devices into account.
What I mean by a "functional dependency" is when the driver of device
B needs device A to be functional and (generally) its driver to be
present in order to work properly. This has certain consequences
for power management (suspend/resume and runtime PM ordering) and
shutdown ordering of these devices. In general, it also implies that
the driver of A needs to be working for B to be probed successfully
and it cannot be unbound from the device before the B's driver.
Support for representing those functional dependencies between
devices is added here to allow the driver core to track them and act
on them in certain cases where applicable.
The argument for doing that in the driver core is that there are
quite a few distinct use cases involving device dependencies, they
are relatively hard to get right in a driver (if one wants to
address all of them properly) and it only gets worse if multiplied
by the number of drivers potentially needing to do it. Morever, at
least one case (asynchronous system suspend/resume) cannot be handled
in a single driver at all, because it requires the driver of A to
wait for B to suspend (during system suspend) and the driver of B to
wait for A to resume (during system resume).
For this reason, represent dependencies between devices as "links",
with the help of struct device_link objects each containing pointers
to the "linked" devices, a list node for each of them, status
information, flags, and an RCU head for synchronization.
Also add two new list heads, representing the lists of links to the
devices that depend on the given one (consumers) and to the devices
depended on by it (suppliers), and a "driver presence status" field
(needed for figuring out initial states of device links) to struct
device.
The entire data structure consisting of all of the lists of link
objects for all devices is protected by a mutex (for link object
addition/removal and for list walks during device driver probing
and removal) and by SRCU (for list walking in other case that will
be introduced by subsequent change sets). If CONFIG_SRCU is not
selected, however, an rwsem is used for protecting the entire data
structure.
In addition, each link object has an internal status field whose
value reflects whether or not drivers are bound to the devices
pointed to by the link or probing/removal of their drivers is in
progress etc. That field is only modified under the device links
mutex, but it may be read outside of it in some cases (introduced by
subsequent change sets), so modifications of it are annotated with
WRITE_ONCE().
New links are added by calling device_link_add() which takes three
arguments: pointers to the devices in question and flags. In
particular, if DL_FLAG_STATELESS is set in the flags, the link status
is not to be taken into account for this link and the driver core
will not manage it. In turn, if DL_FLAG_AUTOREMOVE is set in the
flags, the driver core will remove the link automatically when the
consumer device driver unbinds from it.
One of the actions carried out by device_link_add() is to reorder
the lists used for device shutdown and system suspend/resume to
put the consumer device along with all of its children and all of
its consumers (and so on, recursively) to the ends of those lists
in order to ensure the right ordering between all of the supplier
and consumer devices.
For this reason, it is not possible to create a link between two
devices if the would-be supplier device already depends on the
would-be consumer device as either a direct descendant of it or a
consumer of one of its direct descendants or one of its consumers
and so on.
There are two types of link objects, persistent and non-persistent.
The persistent ones stay around until one of the target devices is
deleted, while the non-persistent ones are removed automatically when
the consumer driver unbinds from its device (ie. they are assumed to
be valid only as long as the consumer device has a driver bound to
it). Persistent links are created by default and non-persistent
links are created when the DL_FLAG_AUTOREMOVE flag is passed
to device_link_add().
Both persistent and non-persistent device links can be deleted
with an explicit call to device_link_del().
Links created without the DL_FLAG_STATELESS flag set are managed
by the driver core using a simple state machine. There are 5 states
each link can be in: DORMANT (unused), AVAILABLE (the supplier driver
is present and functional), CONSUMER_PROBE (the consumer driver is
probing), ACTIVE (both supplier and consumer drivers are present and
functional), and SUPPLIER_UNBIND (the supplier driver is unbinding).
The driver core updates the link state automatically depending on
what happens to the linked devices and for each link state specific
actions are taken in addition to that.
For example, if the supplier driver unbinds from its device, the
driver core will also unbind the drivers of all of its consumers
automatically under the assumption that they cannot function
properly without the supplier. Analogously, the driver core will
only allow the consumer driver to bind to its device if the
supplier driver is present and functional (ie. the link is in
the AVAILABLE state). If that's not the case, it will rely on
the existing deferred probing mechanism to wait for the supplier
driver to become available.
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2016-10-31 00:32:16 +08:00
|
|
|
|
|
|
|
/**
|
|
|
|
* enum dl_dev_state - Device driver presence tracking information.
|
|
|
|
* @DL_DEV_NO_DRIVER: There is no driver attached to the device.
|
|
|
|
* @DL_DEV_PROBING: A driver is probing.
|
|
|
|
* @DL_DEV_DRIVER_BOUND: The driver has been bound to the device.
|
|
|
|
* @DL_DEV_UNBINDING: The driver is unbinding from the device.
|
|
|
|
*/
|
|
|
|
enum dl_dev_state {
|
|
|
|
DL_DEV_NO_DRIVER = 0,
|
|
|
|
DL_DEV_PROBING,
|
|
|
|
DL_DEV_DRIVER_BOUND,
|
|
|
|
DL_DEV_UNBINDING,
|
|
|
|
};
|
|
|
|
|
2021-05-25 01:18:11 +08:00
|
|
|
/**
|
|
|
|
* enum device_removable - Whether the device is removable. The criteria for a
|
|
|
|
* device to be classified as removable is determined by its subsystem or bus.
|
|
|
|
* @DEVICE_REMOVABLE_NOT_SUPPORTED: This attribute is not supported for this
|
|
|
|
* device (default).
|
|
|
|
* @DEVICE_REMOVABLE_UNKNOWN: Device location is Unknown.
|
|
|
|
* @DEVICE_FIXED: Device is not removable by the user.
|
|
|
|
* @DEVICE_REMOVABLE: Device is removable by the user.
|
|
|
|
*/
|
|
|
|
enum device_removable {
|
|
|
|
DEVICE_REMOVABLE_NOT_SUPPORTED = 0, /* must be 0 */
|
|
|
|
DEVICE_REMOVABLE_UNKNOWN,
|
|
|
|
DEVICE_FIXED,
|
|
|
|
DEVICE_REMOVABLE,
|
|
|
|
};
|
|
|
|
|
driver core: Functional dependencies tracking support
Currently, there is a problem with taking functional dependencies
between devices into account.
What I mean by a "functional dependency" is when the driver of device
B needs device A to be functional and (generally) its driver to be
present in order to work properly. This has certain consequences
for power management (suspend/resume and runtime PM ordering) and
shutdown ordering of these devices. In general, it also implies that
the driver of A needs to be working for B to be probed successfully
and it cannot be unbound from the device before the B's driver.
Support for representing those functional dependencies between
devices is added here to allow the driver core to track them and act
on them in certain cases where applicable.
The argument for doing that in the driver core is that there are
quite a few distinct use cases involving device dependencies, they
are relatively hard to get right in a driver (if one wants to
address all of them properly) and it only gets worse if multiplied
by the number of drivers potentially needing to do it. Morever, at
least one case (asynchronous system suspend/resume) cannot be handled
in a single driver at all, because it requires the driver of A to
wait for B to suspend (during system suspend) and the driver of B to
wait for A to resume (during system resume).
For this reason, represent dependencies between devices as "links",
with the help of struct device_link objects each containing pointers
to the "linked" devices, a list node for each of them, status
information, flags, and an RCU head for synchronization.
Also add two new list heads, representing the lists of links to the
devices that depend on the given one (consumers) and to the devices
depended on by it (suppliers), and a "driver presence status" field
(needed for figuring out initial states of device links) to struct
device.
The entire data structure consisting of all of the lists of link
objects for all devices is protected by a mutex (for link object
addition/removal and for list walks during device driver probing
and removal) and by SRCU (for list walking in other case that will
be introduced by subsequent change sets). If CONFIG_SRCU is not
selected, however, an rwsem is used for protecting the entire data
structure.
In addition, each link object has an internal status field whose
value reflects whether or not drivers are bound to the devices
pointed to by the link or probing/removal of their drivers is in
progress etc. That field is only modified under the device links
mutex, but it may be read outside of it in some cases (introduced by
subsequent change sets), so modifications of it are annotated with
WRITE_ONCE().
New links are added by calling device_link_add() which takes three
arguments: pointers to the devices in question and flags. In
particular, if DL_FLAG_STATELESS is set in the flags, the link status
is not to be taken into account for this link and the driver core
will not manage it. In turn, if DL_FLAG_AUTOREMOVE is set in the
flags, the driver core will remove the link automatically when the
consumer device driver unbinds from it.
One of the actions carried out by device_link_add() is to reorder
the lists used for device shutdown and system suspend/resume to
put the consumer device along with all of its children and all of
its consumers (and so on, recursively) to the ends of those lists
in order to ensure the right ordering between all of the supplier
and consumer devices.
For this reason, it is not possible to create a link between two
devices if the would-be supplier device already depends on the
would-be consumer device as either a direct descendant of it or a
consumer of one of its direct descendants or one of its consumers
and so on.
There are two types of link objects, persistent and non-persistent.
The persistent ones stay around until one of the target devices is
deleted, while the non-persistent ones are removed automatically when
the consumer driver unbinds from its device (ie. they are assumed to
be valid only as long as the consumer device has a driver bound to
it). Persistent links are created by default and non-persistent
links are created when the DL_FLAG_AUTOREMOVE flag is passed
to device_link_add().
Both persistent and non-persistent device links can be deleted
with an explicit call to device_link_del().
Links created without the DL_FLAG_STATELESS flag set are managed
by the driver core using a simple state machine. There are 5 states
each link can be in: DORMANT (unused), AVAILABLE (the supplier driver
is present and functional), CONSUMER_PROBE (the consumer driver is
probing), ACTIVE (both supplier and consumer drivers are present and
functional), and SUPPLIER_UNBIND (the supplier driver is unbinding).
The driver core updates the link state automatically depending on
what happens to the linked devices and for each link state specific
actions are taken in addition to that.
For example, if the supplier driver unbinds from its device, the
driver core will also unbind the drivers of all of its consumers
automatically under the assumption that they cannot function
properly without the supplier. Analogously, the driver core will
only allow the consumer driver to bind to its device if the
supplier driver is present and functional (ie. the link is in
the AVAILABLE state). If that's not the case, it will rely on
the existing deferred probing mechanism to wait for the supplier
driver to become available.
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2016-10-31 00:32:16 +08:00
|
|
|
/**
|
|
|
|
* struct dev_links_info - Device data related to device links.
|
|
|
|
* @suppliers: List of links to supplier devices.
|
|
|
|
* @consumers: List of links to consumer devices.
|
2020-11-21 10:02:17 +08:00
|
|
|
* @defer_sync: Hook to global list of devices that have deferred sync_state.
|
driver core: Functional dependencies tracking support
Currently, there is a problem with taking functional dependencies
between devices into account.
What I mean by a "functional dependency" is when the driver of device
B needs device A to be functional and (generally) its driver to be
present in order to work properly. This has certain consequences
for power management (suspend/resume and runtime PM ordering) and
shutdown ordering of these devices. In general, it also implies that
the driver of A needs to be working for B to be probed successfully
and it cannot be unbound from the device before the B's driver.
Support for representing those functional dependencies between
devices is added here to allow the driver core to track them and act
on them in certain cases where applicable.
The argument for doing that in the driver core is that there are
quite a few distinct use cases involving device dependencies, they
are relatively hard to get right in a driver (if one wants to
address all of them properly) and it only gets worse if multiplied
by the number of drivers potentially needing to do it. Morever, at
least one case (asynchronous system suspend/resume) cannot be handled
in a single driver at all, because it requires the driver of A to
wait for B to suspend (during system suspend) and the driver of B to
wait for A to resume (during system resume).
For this reason, represent dependencies between devices as "links",
with the help of struct device_link objects each containing pointers
to the "linked" devices, a list node for each of them, status
information, flags, and an RCU head for synchronization.
Also add two new list heads, representing the lists of links to the
devices that depend on the given one (consumers) and to the devices
depended on by it (suppliers), and a "driver presence status" field
(needed for figuring out initial states of device links) to struct
device.
The entire data structure consisting of all of the lists of link
objects for all devices is protected by a mutex (for link object
addition/removal and for list walks during device driver probing
and removal) and by SRCU (for list walking in other case that will
be introduced by subsequent change sets). If CONFIG_SRCU is not
selected, however, an rwsem is used for protecting the entire data
structure.
In addition, each link object has an internal status field whose
value reflects whether or not drivers are bound to the devices
pointed to by the link or probing/removal of their drivers is in
progress etc. That field is only modified under the device links
mutex, but it may be read outside of it in some cases (introduced by
subsequent change sets), so modifications of it are annotated with
WRITE_ONCE().
New links are added by calling device_link_add() which takes three
arguments: pointers to the devices in question and flags. In
particular, if DL_FLAG_STATELESS is set in the flags, the link status
is not to be taken into account for this link and the driver core
will not manage it. In turn, if DL_FLAG_AUTOREMOVE is set in the
flags, the driver core will remove the link automatically when the
consumer device driver unbinds from it.
One of the actions carried out by device_link_add() is to reorder
the lists used for device shutdown and system suspend/resume to
put the consumer device along with all of its children and all of
its consumers (and so on, recursively) to the ends of those lists
in order to ensure the right ordering between all of the supplier
and consumer devices.
For this reason, it is not possible to create a link between two
devices if the would-be supplier device already depends on the
would-be consumer device as either a direct descendant of it or a
consumer of one of its direct descendants or one of its consumers
and so on.
There are two types of link objects, persistent and non-persistent.
The persistent ones stay around until one of the target devices is
deleted, while the non-persistent ones are removed automatically when
the consumer driver unbinds from its device (ie. they are assumed to
be valid only as long as the consumer device has a driver bound to
it). Persistent links are created by default and non-persistent
links are created when the DL_FLAG_AUTOREMOVE flag is passed
to device_link_add().
Both persistent and non-persistent device links can be deleted
with an explicit call to device_link_del().
Links created without the DL_FLAG_STATELESS flag set are managed
by the driver core using a simple state machine. There are 5 states
each link can be in: DORMANT (unused), AVAILABLE (the supplier driver
is present and functional), CONSUMER_PROBE (the consumer driver is
probing), ACTIVE (both supplier and consumer drivers are present and
functional), and SUPPLIER_UNBIND (the supplier driver is unbinding).
The driver core updates the link state automatically depending on
what happens to the linked devices and for each link state specific
actions are taken in addition to that.
For example, if the supplier driver unbinds from its device, the
driver core will also unbind the drivers of all of its consumers
automatically under the assumption that they cannot function
properly without the supplier. Analogously, the driver core will
only allow the consumer driver to bind to its device if the
supplier driver is present and functional (ie. the link is in
the AVAILABLE state). If that's not the case, it will rely on
the existing deferred probing mechanism to wait for the supplier
driver to become available.
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2016-10-31 00:32:16 +08:00
|
|
|
* @status: Driver status information.
|
|
|
|
*/
|
|
|
|
struct dev_links_info {
|
|
|
|
struct list_head suppliers;
|
|
|
|
struct list_head consumers;
|
2020-11-21 10:02:17 +08:00
|
|
|
struct list_head defer_sync;
|
driver core: Functional dependencies tracking support
Currently, there is a problem with taking functional dependencies
between devices into account.
What I mean by a "functional dependency" is when the driver of device
B needs device A to be functional and (generally) its driver to be
present in order to work properly. This has certain consequences
for power management (suspend/resume and runtime PM ordering) and
shutdown ordering of these devices. In general, it also implies that
the driver of A needs to be working for B to be probed successfully
and it cannot be unbound from the device before the B's driver.
Support for representing those functional dependencies between
devices is added here to allow the driver core to track them and act
on them in certain cases where applicable.
The argument for doing that in the driver core is that there are
quite a few distinct use cases involving device dependencies, they
are relatively hard to get right in a driver (if one wants to
address all of them properly) and it only gets worse if multiplied
by the number of drivers potentially needing to do it. Morever, at
least one case (asynchronous system suspend/resume) cannot be handled
in a single driver at all, because it requires the driver of A to
wait for B to suspend (during system suspend) and the driver of B to
wait for A to resume (during system resume).
For this reason, represent dependencies between devices as "links",
with the help of struct device_link objects each containing pointers
to the "linked" devices, a list node for each of them, status
information, flags, and an RCU head for synchronization.
Also add two new list heads, representing the lists of links to the
devices that depend on the given one (consumers) and to the devices
depended on by it (suppliers), and a "driver presence status" field
(needed for figuring out initial states of device links) to struct
device.
The entire data structure consisting of all of the lists of link
objects for all devices is protected by a mutex (for link object
addition/removal and for list walks during device driver probing
and removal) and by SRCU (for list walking in other case that will
be introduced by subsequent change sets). If CONFIG_SRCU is not
selected, however, an rwsem is used for protecting the entire data
structure.
In addition, each link object has an internal status field whose
value reflects whether or not drivers are bound to the devices
pointed to by the link or probing/removal of their drivers is in
progress etc. That field is only modified under the device links
mutex, but it may be read outside of it in some cases (introduced by
subsequent change sets), so modifications of it are annotated with
WRITE_ONCE().
New links are added by calling device_link_add() which takes three
arguments: pointers to the devices in question and flags. In
particular, if DL_FLAG_STATELESS is set in the flags, the link status
is not to be taken into account for this link and the driver core
will not manage it. In turn, if DL_FLAG_AUTOREMOVE is set in the
flags, the driver core will remove the link automatically when the
consumer device driver unbinds from it.
One of the actions carried out by device_link_add() is to reorder
the lists used for device shutdown and system suspend/resume to
put the consumer device along with all of its children and all of
its consumers (and so on, recursively) to the ends of those lists
in order to ensure the right ordering between all of the supplier
and consumer devices.
For this reason, it is not possible to create a link between two
devices if the would-be supplier device already depends on the
would-be consumer device as either a direct descendant of it or a
consumer of one of its direct descendants or one of its consumers
and so on.
There are two types of link objects, persistent and non-persistent.
The persistent ones stay around until one of the target devices is
deleted, while the non-persistent ones are removed automatically when
the consumer driver unbinds from its device (ie. they are assumed to
be valid only as long as the consumer device has a driver bound to
it). Persistent links are created by default and non-persistent
links are created when the DL_FLAG_AUTOREMOVE flag is passed
to device_link_add().
Both persistent and non-persistent device links can be deleted
with an explicit call to device_link_del().
Links created without the DL_FLAG_STATELESS flag set are managed
by the driver core using a simple state machine. There are 5 states
each link can be in: DORMANT (unused), AVAILABLE (the supplier driver
is present and functional), CONSUMER_PROBE (the consumer driver is
probing), ACTIVE (both supplier and consumer drivers are present and
functional), and SUPPLIER_UNBIND (the supplier driver is unbinding).
The driver core updates the link state automatically depending on
what happens to the linked devices and for each link state specific
actions are taken in addition to that.
For example, if the supplier driver unbinds from its device, the
driver core will also unbind the drivers of all of its consumers
automatically under the assumption that they cannot function
properly without the supplier. Analogously, the driver core will
only allow the consumer driver to bind to its device if the
supplier driver is present and functional (ie. the link is in
the AVAILABLE state). If that's not the case, it will rely on
the existing deferred probing mechanism to wait for the supplier
driver to become available.
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2016-10-31 00:32:16 +08:00
|
|
|
enum dl_dev_state status;
|
|
|
|
};
|
|
|
|
|
2011-05-05 07:55:36 +08:00
|
|
|
/**
|
|
|
|
* struct device - The basic device structure
|
|
|
|
* @parent: The device's "parent" device, the device to which it is attached.
|
|
|
|
* In most cases, a parent device is some sort of bus or host
|
|
|
|
* controller. If parent is NULL, the device, is a top-level device,
|
|
|
|
* which is not usually what you want.
|
|
|
|
* @p: Holds the private data of the driver core portions of the device.
|
|
|
|
* See the comment of the struct device_private for detail.
|
|
|
|
* @kobj: A top-level, abstract class from which other classes are derived.
|
|
|
|
* @init_name: Initial name of the device.
|
|
|
|
* @type: The type of device.
|
|
|
|
* This identifies the device type and carries type-specific
|
|
|
|
* information.
|
|
|
|
* @mutex: Mutex to synchronize calls to its driver.
|
2019-07-18 09:08:26 +08:00
|
|
|
* @lockdep_mutex: An optional debug lock that a subsystem can use as a
|
|
|
|
* peer lock to gain localized lockdep coverage of the device_lock.
|
2011-05-05 07:55:36 +08:00
|
|
|
* @bus: Type of bus device is on.
|
|
|
|
* @driver: Which driver has allocated this
|
|
|
|
* @platform_data: Platform data specific to the device.
|
|
|
|
* Example: For devices on custom boards, as typical of embedded
|
|
|
|
* and SOC based hardware, Linux often uses platform_data to point
|
|
|
|
* to board-specific structures describing devices and how they
|
|
|
|
* are wired. That can include what ports are available, chip
|
|
|
|
* variants, which GPIO pins act in what additional roles, and so
|
|
|
|
* on. This shrinks the "Board Support Packages" (BSPs) and
|
|
|
|
* minimizes board-specific #ifdefs in drivers.
|
2014-04-14 18:54:47 +08:00
|
|
|
* @driver_data: Private pointer for driver specific info.
|
2016-12-04 20:10:04 +08:00
|
|
|
* @links: Links to suppliers and consumers of this device.
|
2011-05-05 07:55:36 +08:00
|
|
|
* @power: For device power management.
|
2017-09-06 02:16:27 +08:00
|
|
|
* See Documentation/driver-api/pm/devices.rst for details.
|
2011-06-23 07:52:55 +08:00
|
|
|
* @pm_domain: Provide callbacks that are executed during system suspend,
|
2011-05-05 07:55:36 +08:00
|
|
|
* hibernation, system resume and during runtime PM transitions
|
|
|
|
* along with subsystem-level and driver-level callbacks.
|
2020-09-07 11:42:52 +08:00
|
|
|
* @em_pd: device's energy model performance domain
|
drivers/pinctrl: grab default handles from device core
This makes the device core auto-grab the pinctrl handle and set
the "default" (PINCTRL_STATE_DEFAULT) state for every device
that is present in the device model right before probe. This will
account for the lion's share of embedded silicon devcies.
A modification of the semantics for pinctrl_get() is also done:
previously if the pinctrl handle for a certain device was already
taken, the pinctrl core would return an error. Now, since the
core may have already default-grabbed the handle and set its
state to "default", if the handle was already taken, this will
be disregarded and the located, previously instanitated handle
will be returned to the caller.
This way all code in drivers explicitly requesting their pinctrl
handlers will still be functional, and drivers that want to
explicitly retrieve and switch their handles can still do that.
But if the desired functionality is just boilerplate of this
type in the probe() function:
struct pinctrl *p;
p = devm_pinctrl_get_select_default(&dev);
if (IS_ERR(p)) {
if (PTR_ERR(p) == -EPROBE_DEFER)
return -EPROBE_DEFER;
dev_warn(&dev, "no pinctrl handle\n");
}
The discussion began with the addition of such boilerplate
to the omap4 keypad driver:
http://marc.info/?l=linux-input&m=135091157719300&w=2
A previous approach using notifiers was discussed:
http://marc.info/?l=linux-kernel&m=135263661110528&w=2
This failed because it could not handle deferred probes.
This patch alone does not solve the entire dilemma faced:
whether code should be distributed into the drivers or
if it should be centralized to e.g. a PM domain. But it
solves the immediate issue of the addition of boilerplate
to a lot of drivers that just want to grab the default
state. As mentioned, they can later explicitly retrieve
the handle and set different states, and this could as
well be done by e.g. PM domains as it is only related
to a certain struct device * pointer.
ChangeLog v4->v5 (Stephen):
- Simplified the devicecore grab code.
- Deleted a piece of documentation recommending that pins
be mapped to a device rather than hogged.
ChangeLog v3->v4 (Linus):
- Drop overzealous NULL checks.
- Move kref initialization to pinctrl_create().
- Seeking Tested-by from Stephen Warren so we do not disturb
the Tegra platform.
- Seeking ACK on this from Greg (and others who like it) so I
can merge it through the pinctrl subsystem.
ChangeLog v2->v3 (Linus):
- Abstain from using IS_ERR_OR_NULL() in the driver core,
Russell recently sent a patch to remove it. Handle the
NULL case explicitly even though it's a bogus case.
- Make sure we handle probe deferral correctly in the device
core file. devm_kfree() the container on error so we don't
waste memory for devices without pinctrl handles.
- Introduce reference counting into the pinctrl core using
<linux/kref.h> so that we don't release pinctrl handles
that have been obtained for two or more places.
ChangeLog v1->v2 (Linus):
- Only store a pointer in the device struct, and only allocate
this if it's really used by the device.
Cc: Felipe Balbi <balbi@ti.com>
Cc: Benoit Cousson <b-cousson@ti.com>
Cc: Dmitry Torokhov <dmitry.torokhov@gmail.com>
Cc: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
Cc: Mitch Bradley <wmb@firmworks.com>
Cc: Ulf Hansson <ulf.hansson@linaro.org>
Cc: Rafael J. Wysocki <rjw@sisk.pl>
Cc: Jean-Christophe PLAGNIOL-VILLARD <plagnioj@jcrosoft.com>
Cc: Rickard Andersson <rickard.andersson@stericsson.com>
Cc: Russell King <linux@arm.linux.org.uk>
Reviewed-by: Mark Brown <broonie@opensource.wolfsonmicro.com>
Acked-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
[swarren: fixed and simplified error-handling in pinctrl_bind_pins(), to
correctly handle deferred probe. Removed admonition from docs not to use
pinctrl hogs for devices]
Signed-off-by: Stephen Warren <swarren@nvidia.com>
Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
2013-01-23 01:56:14 +08:00
|
|
|
* @pins: For device pin management.
|
2021-05-19 16:51:42 +08:00
|
|
|
* See Documentation/driver-api/pin-control.rst for details.
|
2021-08-13 18:36:14 +08:00
|
|
|
* @msi_lock: Lock to protect MSI mask cache and mask register
|
2015-07-09 16:00:44 +08:00
|
|
|
* @msi_list: Hosts MSI descriptors
|
2015-07-28 21:46:10 +08:00
|
|
|
* @msi_domain: The generic MSI domain this device is using.
|
2011-05-05 07:55:36 +08:00
|
|
|
* @numa_node: NUMA node this device is close to.
|
2017-08-25 06:09:10 +08:00
|
|
|
* @dma_ops: DMA mapping operations for this device.
|
2011-05-05 07:55:36 +08:00
|
|
|
* @dma_mask: Dma mask (if dma'ble device).
|
|
|
|
* @coherent_dma_mask: Like dma_mask, but for alloc_coherent mapping as not all
|
|
|
|
* hardware supports 64-bit addresses for consistent allocations
|
|
|
|
* such descriptors.
|
2019-11-21 17:26:44 +08:00
|
|
|
* @bus_dma_limit: Limit of an upstream bridge or bus which imposes a smaller
|
|
|
|
* DMA limit than the device itself supports.
|
2020-09-18 00:43:40 +08:00
|
|
|
* @dma_range_map: map for DMA memory ranges relative to that of RAM
|
2011-05-05 07:55:36 +08:00
|
|
|
* @dma_parms: A low level driver may set these to teach IOMMU code about
|
|
|
|
* segment limitations.
|
|
|
|
* @dma_pools: Dma pools (if dma'ble device).
|
|
|
|
* @dma_mem: Internal for coherent mem override.
|
2013-06-26 11:57:06 +08:00
|
|
|
* @cma_area: Contiguous memory area for dma allocations
|
2021-06-19 11:40:34 +08:00
|
|
|
* @dma_io_tlb_mem: Pointer to the swiotlb pool used. Not for driver use.
|
2011-05-05 07:55:36 +08:00
|
|
|
* @archdata: For arch-specific additions.
|
|
|
|
* @of_node: Associated device tree node.
|
2015-03-17 06:49:03 +08:00
|
|
|
* @fwnode: Associated device node supplied by platform firmware.
|
2011-05-05 07:55:36 +08:00
|
|
|
* @devt: For creating the sysfs "dev".
|
2012-01-22 03:02:51 +08:00
|
|
|
* @id: device instance
|
2011-05-05 07:55:36 +08:00
|
|
|
* @devres_lock: Spinlock to protect the resource of the device.
|
|
|
|
* @devres_head: The resources list of the device.
|
|
|
|
* @knode_class: The node used to add the device to the class list.
|
|
|
|
* @class: The class of the device.
|
|
|
|
* @groups: Optional attribute groups.
|
|
|
|
* @release: Callback to free the device after all references have
|
|
|
|
* gone away. This should be set by the allocator of the
|
|
|
|
* device (i.e. the bus driver that discovered the device).
|
2013-06-26 11:57:06 +08:00
|
|
|
* @iommu_group: IOMMU group the device belongs to.
|
2020-03-26 23:08:30 +08:00
|
|
|
* @iommu: Per device generic IOMMU runtime data
|
2021-05-25 01:18:11 +08:00
|
|
|
* @removable: Whether the device can be removed from the system. This
|
|
|
|
* should be set by the subsystem / bus driver that discovered
|
|
|
|
* the device.
|
2011-05-05 07:55:36 +08:00
|
|
|
*
|
Driver core: Add offline/online device operations
In some cases, graceful hot-removal of devices is not possible,
although in principle the devices in question support hotplug.
For example, that may happen for the last CPU in the system or
for memory modules holding kernel memory.
In those cases it is nice to be able to check if the given device
can be gracefully hot-removed before triggering a removal procedure
that cannot be aborted or reversed. Unfortunately, however, the
kernel currently doesn't provide any support for that.
To address that deficiency, introduce support for offline and
online operations that can be performed on devices, respectively,
before a hot-removal and in case when it is necessary (or convenient)
to put a device back online after a successful offline (that has not
been followed by removal). The idea is that the offline will fail
whenever the given device cannot be gracefully removed from the
system and it will not be allowed to use the device after a
successful offline (until a subsequent online) in analogy with the
existing CPU offline/online mechanism.
For now, the offline and online operations are introduced at the
bus type level, as that should be sufficient for the most urgent use
cases (CPUs and memory modules). In the future, however, the
approach may be extended to cover some more complicated device
offline/online scenarios involving device drivers etc.
The lock_device_hotplug() and unlock_device_hotplug() functions are
introduced because subsequent patches need to put larger pieces of
code under device_hotplug_lock to prevent race conditions between
device offline and removal from happening.
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Acked-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Reviewed-by: Toshi Kani <toshi.kani@hp.com>
2013-05-03 04:15:29 +08:00
|
|
|
* @offline_disabled: If set, the device is permanently online.
|
|
|
|
* @offline: Set after successful invocation of bus type's .offline().
|
driver core: add helper to reuse a device-tree node
Add a helper function to be used when reusing the device-tree node of
another device.
It is fairly common for drivers to reuse the device-tree node of a
parent (or other ancestor) device when creating class or bus devices
(e.g. gpio chips, i2c adapters, iio chips, spi masters, serdev, phys,
usb root hubs). But reusing a device-tree node may cause problems if the
new device is later probed as for example driver core would currently
attempt to reinitialise an already active associated pinmux
configuration.
Other potential issues include the platform-bus code unconditionally
dropping the device-tree node reference in its device destructor,
reinitialisation of other bus-managed resources such as clocks, and the
recently added DMA-setup in driver core.
Note that for most examples above this is currently not an issue as the
devices are never probed, but this is a problem for the USB bus which
has recently gained device-tree support. This was discovered and
worked-around in a rather ad-hoc fashion by commit dc5878abf49c ("usb:
core: move root hub's device node assignment after it is added to bus")
by not setting the of_node pointer until after the root-hub device has
been registered.
Instead we can allow devices to reuse a device-tree node by setting a
flag in their struct device that can be used by core, bus and driver
code to avoid resources from being over-allocated.
Note that the helper also grabs an extra reference to the device node,
which specifically balances the unconditional put in the platform-device
destructor.
Signed-off-by: Johan Hovold <johan@kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2017-06-06 23:59:00 +08:00
|
|
|
* @of_node_reused: Set if the device-tree node is shared with an ancestor
|
|
|
|
* device.
|
2019-09-05 05:11:23 +08:00
|
|
|
* @state_synced: The hardware state of this device has been synced to match
|
|
|
|
* the software state of this device by calling the driver/bus
|
|
|
|
* sync_state() callback.
|
2021-03-03 05:11:30 +08:00
|
|
|
* @can_match: The device has matched with a driver at least once or it is in
|
|
|
|
* a bus (like AMBA) which can't check for matching drivers until
|
|
|
|
* other devices probe successfully.
|
2018-08-19 20:53:20 +08:00
|
|
|
* @dma_coherent: this particular device is dma coherent, even if the
|
|
|
|
* architecture supports non-coherent devices.
|
2020-03-24 01:19:30 +08:00
|
|
|
* @dma_ops_bypass: If set to %true then the dma_ops are bypassed for the
|
|
|
|
* streaming DMA operations (->map_* / ->unmap_* / ->sync_*),
|
|
|
|
* and optionall (if the coherent mask is large enough) also
|
|
|
|
* for dma allocations. This flag is managed by the dma ops
|
|
|
|
* instance from ->dma_supported.
|
2011-05-05 07:55:36 +08:00
|
|
|
*
|
|
|
|
* At the lowest level, every device in a Linux system is represented by an
|
|
|
|
* instance of struct device. The device structure contains the information
|
|
|
|
* that the device model core needs to model the system. Most subsystems,
|
|
|
|
* however, track additional information about the devices they host. As a
|
|
|
|
* result, it is rare for devices to be represented by bare device structures;
|
|
|
|
* instead, that structure, like kobject structures, is usually embedded within
|
|
|
|
* a higher-level representation of the device.
|
|
|
|
*/
|
2005-04-17 06:20:36 +08:00
|
|
|
struct device {
|
device.h: reorganize struct device
struct device is big, around 760 bytes on x86_64. It's not a critical
structure, but it is embedded everywhere, so making it smaller is always
a good thing.
With a recent patch that moved a field from struct device to the private
structure, some benchmarks showed a very odd regression, despite this
structure having nothing to do with those benchmarks. That caused me to
look into the layout of the structure. Using 'pahole', it showed a
number of holes and ways that the structure could be reordered in order
to align some cachelines better, as well as reduce the size of the
overall structure.
Move 'struct kobj' to the start of the structure, to keep that access
in the first cacheline, and try to organize things a bit more compactly
where possible
By doing these few moves, the result removes at least 8 bytes from
'struct device' on a 64bit system. Given we know there are systems with
at least 30k devices in memory at once, every little byte counts, and
this change could be a savings of 240k of kernel memory for them. On
"normal" systems the overall memory savings would be much less.
Cc: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>
Cc: Johan Hovold <johan@kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2019-02-26 22:32:29 +08:00
|
|
|
struct kobject kobj;
|
2007-05-08 15:29:39 +08:00
|
|
|
struct device *parent;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2008-12-17 04:23:36 +08:00
|
|
|
struct device_private *p;
|
|
|
|
|
2008-05-31 01:45:12 +08:00
|
|
|
const char *init_name; /* initial name of the device */
|
2011-03-29 00:12:52 +08:00
|
|
|
const struct device_type *type;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2008-01-25 13:04:46 +08:00
|
|
|
struct bus_type *bus; /* type of bus device is on */
|
2005-04-17 06:20:36 +08:00
|
|
|
struct device_driver *driver; /* which driver has allocated this
|
|
|
|
device */
|
2009-03-08 23:13:32 +08:00
|
|
|
void *platform_data; /* Platform specific data, device
|
|
|
|
core doesn't touch it */
|
2014-04-14 18:54:47 +08:00
|
|
|
void *driver_data; /* Driver data, set and get with
|
2019-02-05 20:19:52 +08:00
|
|
|
dev_set_drvdata/dev_get_drvdata */
|
2019-07-18 09:08:26 +08:00
|
|
|
#ifdef CONFIG_PROVE_LOCKING
|
|
|
|
struct mutex lockdep_mutex;
|
|
|
|
#endif
|
device.h: reorganize struct device
struct device is big, around 760 bytes on x86_64. It's not a critical
structure, but it is embedded everywhere, so making it smaller is always
a good thing.
With a recent patch that moved a field from struct device to the private
structure, some benchmarks showed a very odd regression, despite this
structure having nothing to do with those benchmarks. That caused me to
look into the layout of the structure. Using 'pahole', it showed a
number of holes and ways that the structure could be reordered in order
to align some cachelines better, as well as reduce the size of the
overall structure.
Move 'struct kobj' to the start of the structure, to keep that access
in the first cacheline, and try to organize things a bit more compactly
where possible
By doing these few moves, the result removes at least 8 bytes from
'struct device' on a 64bit system. Given we know there are systems with
at least 30k devices in memory at once, every little byte counts, and
this change could be a savings of 240k of kernel memory for them. On
"normal" systems the overall memory savings would be much less.
Cc: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>
Cc: Johan Hovold <johan@kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2019-02-26 22:32:29 +08:00
|
|
|
struct mutex mutex; /* mutex to synchronize calls to
|
|
|
|
* its driver.
|
|
|
|
*/
|
|
|
|
|
driver core: Functional dependencies tracking support
Currently, there is a problem with taking functional dependencies
between devices into account.
What I mean by a "functional dependency" is when the driver of device
B needs device A to be functional and (generally) its driver to be
present in order to work properly. This has certain consequences
for power management (suspend/resume and runtime PM ordering) and
shutdown ordering of these devices. In general, it also implies that
the driver of A needs to be working for B to be probed successfully
and it cannot be unbound from the device before the B's driver.
Support for representing those functional dependencies between
devices is added here to allow the driver core to track them and act
on them in certain cases where applicable.
The argument for doing that in the driver core is that there are
quite a few distinct use cases involving device dependencies, they
are relatively hard to get right in a driver (if one wants to
address all of them properly) and it only gets worse if multiplied
by the number of drivers potentially needing to do it. Morever, at
least one case (asynchronous system suspend/resume) cannot be handled
in a single driver at all, because it requires the driver of A to
wait for B to suspend (during system suspend) and the driver of B to
wait for A to resume (during system resume).
For this reason, represent dependencies between devices as "links",
with the help of struct device_link objects each containing pointers
to the "linked" devices, a list node for each of them, status
information, flags, and an RCU head for synchronization.
Also add two new list heads, representing the lists of links to the
devices that depend on the given one (consumers) and to the devices
depended on by it (suppliers), and a "driver presence status" field
(needed for figuring out initial states of device links) to struct
device.
The entire data structure consisting of all of the lists of link
objects for all devices is protected by a mutex (for link object
addition/removal and for list walks during device driver probing
and removal) and by SRCU (for list walking in other case that will
be introduced by subsequent change sets). If CONFIG_SRCU is not
selected, however, an rwsem is used for protecting the entire data
structure.
In addition, each link object has an internal status field whose
value reflects whether or not drivers are bound to the devices
pointed to by the link or probing/removal of their drivers is in
progress etc. That field is only modified under the device links
mutex, but it may be read outside of it in some cases (introduced by
subsequent change sets), so modifications of it are annotated with
WRITE_ONCE().
New links are added by calling device_link_add() which takes three
arguments: pointers to the devices in question and flags. In
particular, if DL_FLAG_STATELESS is set in the flags, the link status
is not to be taken into account for this link and the driver core
will not manage it. In turn, if DL_FLAG_AUTOREMOVE is set in the
flags, the driver core will remove the link automatically when the
consumer device driver unbinds from it.
One of the actions carried out by device_link_add() is to reorder
the lists used for device shutdown and system suspend/resume to
put the consumer device along with all of its children and all of
its consumers (and so on, recursively) to the ends of those lists
in order to ensure the right ordering between all of the supplier
and consumer devices.
For this reason, it is not possible to create a link between two
devices if the would-be supplier device already depends on the
would-be consumer device as either a direct descendant of it or a
consumer of one of its direct descendants or one of its consumers
and so on.
There are two types of link objects, persistent and non-persistent.
The persistent ones stay around until one of the target devices is
deleted, while the non-persistent ones are removed automatically when
the consumer driver unbinds from its device (ie. they are assumed to
be valid only as long as the consumer device has a driver bound to
it). Persistent links are created by default and non-persistent
links are created when the DL_FLAG_AUTOREMOVE flag is passed
to device_link_add().
Both persistent and non-persistent device links can be deleted
with an explicit call to device_link_del().
Links created without the DL_FLAG_STATELESS flag set are managed
by the driver core using a simple state machine. There are 5 states
each link can be in: DORMANT (unused), AVAILABLE (the supplier driver
is present and functional), CONSUMER_PROBE (the consumer driver is
probing), ACTIVE (both supplier and consumer drivers are present and
functional), and SUPPLIER_UNBIND (the supplier driver is unbinding).
The driver core updates the link state automatically depending on
what happens to the linked devices and for each link state specific
actions are taken in addition to that.
For example, if the supplier driver unbinds from its device, the
driver core will also unbind the drivers of all of its consumers
automatically under the assumption that they cannot function
properly without the supplier. Analogously, the driver core will
only allow the consumer driver to bind to its device if the
supplier driver is present and functional (ie. the link is in
the AVAILABLE state). If that's not the case, it will rely on
the existing deferred probing mechanism to wait for the supplier
driver to become available.
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2016-10-31 00:32:16 +08:00
|
|
|
struct dev_links_info links;
|
2005-04-17 06:20:36 +08:00
|
|
|
struct dev_pm_info power;
|
2011-06-23 07:52:55 +08:00
|
|
|
struct dev_pm_domain *pm_domain;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2020-06-10 18:12:23 +08:00
|
|
|
#ifdef CONFIG_ENERGY_MODEL
|
|
|
|
struct em_perf_domain *em_pd;
|
|
|
|
#endif
|
|
|
|
|
2015-07-28 21:46:10 +08:00
|
|
|
#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
|
|
|
|
struct irq_domain *msi_domain;
|
|
|
|
#endif
|
drivers/pinctrl: grab default handles from device core
This makes the device core auto-grab the pinctrl handle and set
the "default" (PINCTRL_STATE_DEFAULT) state for every device
that is present in the device model right before probe. This will
account for the lion's share of embedded silicon devcies.
A modification of the semantics for pinctrl_get() is also done:
previously if the pinctrl handle for a certain device was already
taken, the pinctrl core would return an error. Now, since the
core may have already default-grabbed the handle and set its
state to "default", if the handle was already taken, this will
be disregarded and the located, previously instanitated handle
will be returned to the caller.
This way all code in drivers explicitly requesting their pinctrl
handlers will still be functional, and drivers that want to
explicitly retrieve and switch their handles can still do that.
But if the desired functionality is just boilerplate of this
type in the probe() function:
struct pinctrl *p;
p = devm_pinctrl_get_select_default(&dev);
if (IS_ERR(p)) {
if (PTR_ERR(p) == -EPROBE_DEFER)
return -EPROBE_DEFER;
dev_warn(&dev, "no pinctrl handle\n");
}
The discussion began with the addition of such boilerplate
to the omap4 keypad driver:
http://marc.info/?l=linux-input&m=135091157719300&w=2
A previous approach using notifiers was discussed:
http://marc.info/?l=linux-kernel&m=135263661110528&w=2
This failed because it could not handle deferred probes.
This patch alone does not solve the entire dilemma faced:
whether code should be distributed into the drivers or
if it should be centralized to e.g. a PM domain. But it
solves the immediate issue of the addition of boilerplate
to a lot of drivers that just want to grab the default
state. As mentioned, they can later explicitly retrieve
the handle and set different states, and this could as
well be done by e.g. PM domains as it is only related
to a certain struct device * pointer.
ChangeLog v4->v5 (Stephen):
- Simplified the devicecore grab code.
- Deleted a piece of documentation recommending that pins
be mapped to a device rather than hogged.
ChangeLog v3->v4 (Linus):
- Drop overzealous NULL checks.
- Move kref initialization to pinctrl_create().
- Seeking Tested-by from Stephen Warren so we do not disturb
the Tegra platform.
- Seeking ACK on this from Greg (and others who like it) so I
can merge it through the pinctrl subsystem.
ChangeLog v2->v3 (Linus):
- Abstain from using IS_ERR_OR_NULL() in the driver core,
Russell recently sent a patch to remove it. Handle the
NULL case explicitly even though it's a bogus case.
- Make sure we handle probe deferral correctly in the device
core file. devm_kfree() the container on error so we don't
waste memory for devices without pinctrl handles.
- Introduce reference counting into the pinctrl core using
<linux/kref.h> so that we don't release pinctrl handles
that have been obtained for two or more places.
ChangeLog v1->v2 (Linus):
- Only store a pointer in the device struct, and only allocate
this if it's really used by the device.
Cc: Felipe Balbi <balbi@ti.com>
Cc: Benoit Cousson <b-cousson@ti.com>
Cc: Dmitry Torokhov <dmitry.torokhov@gmail.com>
Cc: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
Cc: Mitch Bradley <wmb@firmworks.com>
Cc: Ulf Hansson <ulf.hansson@linaro.org>
Cc: Rafael J. Wysocki <rjw@sisk.pl>
Cc: Jean-Christophe PLAGNIOL-VILLARD <plagnioj@jcrosoft.com>
Cc: Rickard Andersson <rickard.andersson@stericsson.com>
Cc: Russell King <linux@arm.linux.org.uk>
Reviewed-by: Mark Brown <broonie@opensource.wolfsonmicro.com>
Acked-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
[swarren: fixed and simplified error-handling in pinctrl_bind_pins(), to
correctly handle deferred probe. Removed admonition from docs not to use
pinctrl hogs for devices]
Signed-off-by: Stephen Warren <swarren@nvidia.com>
Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
2013-01-23 01:56:14 +08:00
|
|
|
#ifdef CONFIG_PINCTRL
|
|
|
|
struct dev_pin_info *pins;
|
|
|
|
#endif
|
2015-07-09 16:00:44 +08:00
|
|
|
#ifdef CONFIG_GENERIC_MSI_IRQ
|
2021-07-30 05:51:47 +08:00
|
|
|
raw_spinlock_t msi_lock;
|
2015-07-09 16:00:44 +08:00
|
|
|
struct list_head msi_list;
|
|
|
|
#endif
|
2020-07-08 15:30:00 +08:00
|
|
|
#ifdef CONFIG_DMA_OPS
|
2017-01-21 05:04:02 +08:00
|
|
|
const struct dma_map_ops *dma_ops;
|
2020-07-08 15:30:00 +08:00
|
|
|
#endif
|
2005-04-17 06:20:36 +08:00
|
|
|
u64 *dma_mask; /* dma mask (if dma'able device) */
|
|
|
|
u64 coherent_dma_mask;/* Like dma_mask, but for
|
|
|
|
alloc_coherent mappings as
|
|
|
|
not all hardware supports
|
|
|
|
64 bit addresses for consistent
|
|
|
|
allocations such descriptors. */
|
2019-11-21 17:26:44 +08:00
|
|
|
u64 bus_dma_limit; /* upstream dma constraint */
|
2020-09-18 00:43:40 +08:00
|
|
|
const struct bus_dma_region *dma_range_map;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2008-02-05 14:27:55 +08:00
|
|
|
struct device_dma_parameters *dma_parms;
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
struct list_head dma_pools; /* dma pools (if dma'ble) */
|
|
|
|
|
2019-02-04 03:12:02 +08:00
|
|
|
#ifdef CONFIG_DMA_DECLARE_COHERENT
|
2005-04-17 06:20:36 +08:00
|
|
|
struct dma_coherent_mem *dma_mem; /* internal for coherent mem
|
|
|
|
override */
|
2018-12-25 21:03:32 +08:00
|
|
|
#endif
|
2013-07-29 20:31:45 +08:00
|
|
|
#ifdef CONFIG_DMA_CMA
|
2011-12-29 20:09:51 +08:00
|
|
|
struct cma *cma_area; /* contiguous memory area for dma
|
|
|
|
allocations */
|
2021-06-19 11:40:34 +08:00
|
|
|
#endif
|
|
|
|
#ifdef CONFIG_SWIOTLB
|
|
|
|
struct io_tlb_mem *dma_io_tlb_mem;
|
2011-12-29 20:09:51 +08:00
|
|
|
#endif
|
2006-11-11 14:18:39 +08:00
|
|
|
/* arch specific additions */
|
|
|
|
struct dev_archdata archdata;
|
2011-01-22 00:24:48 +08:00
|
|
|
|
|
|
|
struct device_node *of_node; /* associated device tree node */
|
2015-03-17 06:49:03 +08:00
|
|
|
struct fwnode_handle *fwnode; /* firmware device node */
|
2005-04-17 06:20:36 +08:00
|
|
|
|
device.h: reorganize struct device
struct device is big, around 760 bytes on x86_64. It's not a critical
structure, but it is embedded everywhere, so making it smaller is always
a good thing.
With a recent patch that moved a field from struct device to the private
structure, some benchmarks showed a very odd regression, despite this
structure having nothing to do with those benchmarks. That caused me to
look into the layout of the structure. Using 'pahole', it showed a
number of holes and ways that the structure could be reordered in order
to align some cachelines better, as well as reduce the size of the
overall structure.
Move 'struct kobj' to the start of the structure, to keep that access
in the first cacheline, and try to organize things a bit more compactly
where possible
By doing these few moves, the result removes at least 8 bytes from
'struct device' on a 64bit system. Given we know there are systems with
at least 30k devices in memory at once, every little byte counts, and
this change could be a savings of 240k of kernel memory for them. On
"normal" systems the overall memory savings would be much less.
Cc: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>
Cc: Johan Hovold <johan@kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2019-02-26 22:32:29 +08:00
|
|
|
#ifdef CONFIG_NUMA
|
|
|
|
int numa_node; /* NUMA node this device is close to */
|
|
|
|
#endif
|
2008-10-17 05:51:35 +08:00
|
|
|
dev_t devt; /* dev_t, creates the sysfs "dev" */
|
2011-12-15 06:29:38 +08:00
|
|
|
u32 id; /* device instance */
|
2008-10-17 05:51:35 +08:00
|
|
|
|
devres: device resource management
Implement device resource management, in short, devres. A device
driver can allocate arbirary size of devres data which is associated
with a release function. On driver detach, release function is
invoked on the devres data, then, devres data is freed.
devreses are typed by associated release functions. Some devreses are
better represented by single instance of the type while others need
multiple instances sharing the same release function. Both usages are
supported.
devreses can be grouped using devres group such that a device driver
can easily release acquired resources halfway through initialization
or selectively release resources (e.g. resources for port 1 out of 4
ports).
This patch adds devres core including documentation and the following
managed interfaces.
* alloc/free : devm_kzalloc(), devm_kzfree()
* IO region : devm_request_region(), devm_release_region()
* IRQ : devm_request_irq(), devm_free_irq()
* DMA : dmam_alloc_coherent(), dmam_free_coherent(),
dmam_declare_coherent_memory(), dmam_pool_create(),
dmam_pool_destroy()
* PCI : pcim_enable_device(), pcim_pin_device(), pci_is_managed()
* iomap : devm_ioport_map(), devm_ioport_unmap(), devm_ioremap(),
devm_ioremap_nocache(), devm_iounmap(), pcim_iomap_table(),
pcim_iomap(), pcim_iounmap()
Signed-off-by: Tejun Heo <htejun@gmail.com>
Signed-off-by: Jeff Garzik <jeff@garzik.org>
2007-01-20 15:00:26 +08:00
|
|
|
spinlock_t devres_lock;
|
|
|
|
struct list_head devres_head;
|
|
|
|
|
2006-10-08 03:54:55 +08:00
|
|
|
struct class *class;
|
2009-06-25 01:06:31 +08:00
|
|
|
const struct attribute_group **groups; /* optional groups */
|
2006-06-15 03:14:34 +08:00
|
|
|
|
2008-01-25 13:04:46 +08:00
|
|
|
void (*release)(struct device *dev);
|
2012-05-31 04:18:41 +08:00
|
|
|
struct iommu_group *iommu_group;
|
2020-03-26 23:08:30 +08:00
|
|
|
struct dev_iommu *iommu;
|
Driver core: Add offline/online device operations
In some cases, graceful hot-removal of devices is not possible,
although in principle the devices in question support hotplug.
For example, that may happen for the last CPU in the system or
for memory modules holding kernel memory.
In those cases it is nice to be able to check if the given device
can be gracefully hot-removed before triggering a removal procedure
that cannot be aborted or reversed. Unfortunately, however, the
kernel currently doesn't provide any support for that.
To address that deficiency, introduce support for offline and
online operations that can be performed on devices, respectively,
before a hot-removal and in case when it is necessary (or convenient)
to put a device back online after a successful offline (that has not
been followed by removal). The idea is that the offline will fail
whenever the given device cannot be gracefully removed from the
system and it will not be allowed to use the device after a
successful offline (until a subsequent online) in analogy with the
existing CPU offline/online mechanism.
For now, the offline and online operations are introduced at the
bus type level, as that should be sufficient for the most urgent use
cases (CPUs and memory modules). In the future, however, the
approach may be extended to cover some more complicated device
offline/online scenarios involving device drivers etc.
The lock_device_hotplug() and unlock_device_hotplug() functions are
introduced because subsequent patches need to put larger pieces of
code under device_hotplug_lock to prevent race conditions between
device offline and removal from happening.
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Acked-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Reviewed-by: Toshi Kani <toshi.kani@hp.com>
2013-05-03 04:15:29 +08:00
|
|
|
|
2021-05-25 01:18:11 +08:00
|
|
|
enum device_removable removable;
|
|
|
|
|
Driver core: Add offline/online device operations
In some cases, graceful hot-removal of devices is not possible,
although in principle the devices in question support hotplug.
For example, that may happen for the last CPU in the system or
for memory modules holding kernel memory.
In those cases it is nice to be able to check if the given device
can be gracefully hot-removed before triggering a removal procedure
that cannot be aborted or reversed. Unfortunately, however, the
kernel currently doesn't provide any support for that.
To address that deficiency, introduce support for offline and
online operations that can be performed on devices, respectively,
before a hot-removal and in case when it is necessary (or convenient)
to put a device back online after a successful offline (that has not
been followed by removal). The idea is that the offline will fail
whenever the given device cannot be gracefully removed from the
system and it will not be allowed to use the device after a
successful offline (until a subsequent online) in analogy with the
existing CPU offline/online mechanism.
For now, the offline and online operations are introduced at the
bus type level, as that should be sufficient for the most urgent use
cases (CPUs and memory modules). In the future, however, the
approach may be extended to cover some more complicated device
offline/online scenarios involving device drivers etc.
The lock_device_hotplug() and unlock_device_hotplug() functions are
introduced because subsequent patches need to put larger pieces of
code under device_hotplug_lock to prevent race conditions between
device offline and removal from happening.
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Acked-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Reviewed-by: Toshi Kani <toshi.kani@hp.com>
2013-05-03 04:15:29 +08:00
|
|
|
bool offline_disabled:1;
|
|
|
|
bool offline:1;
|
driver core: add helper to reuse a device-tree node
Add a helper function to be used when reusing the device-tree node of
another device.
It is fairly common for drivers to reuse the device-tree node of a
parent (or other ancestor) device when creating class or bus devices
(e.g. gpio chips, i2c adapters, iio chips, spi masters, serdev, phys,
usb root hubs). But reusing a device-tree node may cause problems if the
new device is later probed as for example driver core would currently
attempt to reinitialise an already active associated pinmux
configuration.
Other potential issues include the platform-bus code unconditionally
dropping the device-tree node reference in its device destructor,
reinitialisation of other bus-managed resources such as clocks, and the
recently added DMA-setup in driver core.
Note that for most examples above this is currently not an issue as the
devices are never probed, but this is a problem for the USB bus which
has recently gained device-tree support. This was discovered and
worked-around in a rather ad-hoc fashion by commit dc5878abf49c ("usb:
core: move root hub's device node assignment after it is added to bus")
by not setting the of_node pointer until after the root-hub device has
been registered.
Instead we can allow devices to reuse a device-tree node by setting a
flag in their struct device that can be used by core, bus and driver
code to avoid resources from being over-allocated.
Note that the helper also grabs an extra reference to the device node,
which specifically balances the unconditional put in the platform-device
destructor.
Signed-off-by: Johan Hovold <johan@kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2017-06-06 23:59:00 +08:00
|
|
|
bool of_node_reused:1;
|
2019-09-05 05:11:23 +08:00
|
|
|
bool state_synced:1;
|
2021-03-03 05:11:30 +08:00
|
|
|
bool can_match:1;
|
2018-08-19 20:53:20 +08:00
|
|
|
#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
|
|
|
|
defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
|
|
|
|
defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL)
|
|
|
|
bool dma_coherent:1;
|
|
|
|
#endif
|
2020-03-24 01:19:30 +08:00
|
|
|
#ifdef CONFIG_DMA_OPS_BYPASS
|
|
|
|
bool dma_ops_bypass : 1;
|
|
|
|
#endif
|
2005-04-17 06:20:36 +08:00
|
|
|
};
|
|
|
|
|
2020-05-22 03:17:58 +08:00
|
|
|
/**
|
|
|
|
* struct device_link - Device link representation.
|
|
|
|
* @supplier: The device on the supplier end of the link.
|
|
|
|
* @s_node: Hook to the supplier device's list of links to consumers.
|
|
|
|
* @consumer: The device on the consumer end of the link.
|
|
|
|
* @c_node: Hook to the consumer device's list of links to suppliers.
|
|
|
|
* @link_dev: device used to expose link details in sysfs
|
|
|
|
* @status: The state of the link (with respect to the presence of drivers).
|
|
|
|
* @flags: Link flags.
|
|
|
|
* @rpm_active: Whether or not the consumer device is runtime-PM-active.
|
|
|
|
* @kref: Count repeated addition of the same link.
|
2021-05-14 20:10:15 +08:00
|
|
|
* @rm_work: Work structure used for removing the link.
|
2020-05-22 03:17:58 +08:00
|
|
|
* @supplier_preactivated: Supplier has been made active before consumer probe.
|
|
|
|
*/
|
|
|
|
struct device_link {
|
|
|
|
struct device *supplier;
|
|
|
|
struct list_head s_node;
|
|
|
|
struct device *consumer;
|
|
|
|
struct list_head c_node;
|
|
|
|
struct device link_dev;
|
|
|
|
enum device_link_state status;
|
|
|
|
u32 flags;
|
|
|
|
refcount_t rpm_active;
|
|
|
|
struct kref kref;
|
2021-05-14 20:10:15 +08:00
|
|
|
struct work_struct rm_work;
|
2020-05-22 03:17:58 +08:00
|
|
|
bool supplier_preactivated; /* Owned by consumer probe. */
|
|
|
|
};
|
|
|
|
|
2012-07-04 00:49:35 +08:00
|
|
|
static inline struct device *kobj_to_dev(struct kobject *kobj)
|
|
|
|
{
|
|
|
|
return container_of(kobj, struct device, kobj);
|
|
|
|
}
|
|
|
|
|
2018-11-30 19:51:52 +08:00
|
|
|
/**
|
|
|
|
* device_iommu_mapped - Returns true when the device DMA is translated
|
|
|
|
* by an IOMMU
|
|
|
|
* @dev: Device to perform the check on
|
|
|
|
*/
|
|
|
|
static inline bool device_iommu_mapped(struct device *dev)
|
|
|
|
{
|
|
|
|
return (dev->iommu_group != NULL);
|
|
|
|
}
|
|
|
|
|
2008-03-20 05:39:13 +08:00
|
|
|
/* Get the wakeup routines, which depend on struct device */
|
|
|
|
#include <linux/pm_wakeup.h>
|
|
|
|
|
2008-07-31 03:29:21 +08:00
|
|
|
static inline const char *dev_name(const struct device *dev)
|
2008-05-02 12:02:41 +08:00
|
|
|
{
|
2010-03-09 14:57:53 +08:00
|
|
|
/* Use the init name until the kobject becomes available */
|
|
|
|
if (dev->init_name)
|
|
|
|
return dev->init_name;
|
|
|
|
|
2009-01-25 22:17:37 +08:00
|
|
|
return kobject_name(&dev->kobj);
|
2008-05-02 12:02:41 +08:00
|
|
|
}
|
|
|
|
|
2021-01-11 01:54:07 +08:00
|
|
|
/**
|
|
|
|
* dev_bus_name - Return a device's bus/class name, if at all possible
|
|
|
|
* @dev: struct device to get the bus/class name of
|
|
|
|
*
|
|
|
|
* Will return the name of the bus/class the device is attached to. If it is
|
|
|
|
* not attached to a bus/class, an empty string will be returned.
|
|
|
|
*/
|
|
|
|
static inline const char *dev_bus_name(const struct device *dev)
|
|
|
|
{
|
|
|
|
return dev->bus ? dev->bus->name : (dev->class ? dev->class->name : "");
|
|
|
|
}
|
|
|
|
|
2020-06-29 14:50:05 +08:00
|
|
|
__printf(2, 3) int dev_set_name(struct device *dev, const char *name, ...);
|
2008-05-30 08:16:40 +08:00
|
|
|
|
2006-12-07 12:32:33 +08:00
|
|
|
#ifdef CONFIG_NUMA
|
|
|
|
static inline int dev_to_node(struct device *dev)
|
|
|
|
{
|
|
|
|
return dev->numa_node;
|
|
|
|
}
|
|
|
|
static inline void set_dev_node(struct device *dev, int node)
|
|
|
|
{
|
|
|
|
dev->numa_node = node;
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
static inline int dev_to_node(struct device *dev)
|
|
|
|
{
|
2019-03-06 07:42:58 +08:00
|
|
|
return NUMA_NO_NODE;
|
2006-12-07 12:32:33 +08:00
|
|
|
}
|
|
|
|
static inline void set_dev_node(struct device *dev, int node)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2015-07-28 21:46:10 +08:00
|
|
|
static inline struct irq_domain *dev_get_msi_domain(const struct device *dev)
|
|
|
|
{
|
|
|
|
#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
|
|
|
|
return dev->msi_domain;
|
|
|
|
#else
|
|
|
|
return NULL;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void dev_set_msi_domain(struct device *dev, struct irq_domain *d)
|
|
|
|
{
|
|
|
|
#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
|
|
|
|
dev->msi_domain = d;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2014-04-14 18:58:53 +08:00
|
|
|
static inline void *dev_get_drvdata(const struct device *dev)
|
|
|
|
{
|
|
|
|
return dev->driver_data;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void dev_set_drvdata(struct device *dev, void *data)
|
|
|
|
{
|
|
|
|
dev->driver_data = data;
|
|
|
|
}
|
|
|
|
|
2011-08-25 21:33:50 +08:00
|
|
|
static inline struct pm_subsys_data *dev_to_psd(struct device *dev)
|
|
|
|
{
|
|
|
|
return dev ? dev->power.subsys_data : NULL;
|
|
|
|
}
|
|
|
|
|
2009-03-01 21:10:49 +08:00
|
|
|
static inline unsigned int dev_get_uevent_suppress(const struct device *dev)
|
|
|
|
{
|
|
|
|
return dev->kobj.uevent_suppress;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void dev_set_uevent_suppress(struct device *dev, int val)
|
|
|
|
{
|
|
|
|
dev->kobj.uevent_suppress = val;
|
|
|
|
}
|
|
|
|
|
2005-09-22 15:47:24 +08:00
|
|
|
static inline int device_is_registered(struct device *dev)
|
|
|
|
{
|
2008-03-14 05:07:03 +08:00
|
|
|
return dev->kobj.state_in_sysfs;
|
2005-09-22 15:47:24 +08:00
|
|
|
}
|
|
|
|
|
PM: Asynchronous suspend and resume of devices
Theoretically, the total time of system sleep transitions (suspend
to RAM, hibernation) can be reduced by running suspend and resume
callbacks of device drivers in parallel with each other. However,
there are dependencies between devices such that we're not allowed
to suspend the parent of a device before suspending the device
itself. Analogously, we're not allowed to resume a device before
resuming its parent.
The most straightforward way to take these dependencies into accout
is to start the async threads used for suspending and resuming
devices at the core level, so that async_schedule() is called for
each suspend and resume callback supposed to be executed
asynchronously.
For this purpose, introduce a new device flag, power.async_suspend,
used to mark the devices whose suspend and resume callbacks are to be
executed asynchronously (ie. in parallel with the main suspend/resume
thread and possibly in parallel with each other) and helper function
device_enable_async_suspend() allowing one to set power.async_suspend
for given device (power.async_suspend is unset by default for all
devices). For each device with the power.async_suspend flag set the
PM core will use async_schedule() to execute its suspend and resume
callbacks.
The async threads started for different devices as a result of
calling async_schedule() are synchronized with each other and with
the main suspend/resume thread with the help of completions, in the
following way:
(1) There is a completion, power.completion, for each device object.
(2) Each device's completion is reset before calling async_schedule()
for the device or, in the case of devices with the
power.async_suspend flags unset, before executing the device's
suspend and resume callbacks.
(3) During suspend, right before running the bus type, device type
and device class suspend callbacks for the device, the PM core
waits for the completions of all the device's children to be
completed.
(4) During resume, right before running the bus type, device type and
device class resume callbacks for the device, the PM core waits
for the completion of the device's parent to be completed.
(5) The PM core completes power.completion for each device right
after the bus type, device type and device class suspend (or
resume) callbacks executed for the device have returned.
Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
2010-01-24 05:23:32 +08:00
|
|
|
static inline void device_enable_async_suspend(struct device *dev)
|
|
|
|
{
|
2011-06-19 02:22:23 +08:00
|
|
|
if (!dev->power.is_prepared)
|
PM: Asynchronous suspend and resume of devices
Theoretically, the total time of system sleep transitions (suspend
to RAM, hibernation) can be reduced by running suspend and resume
callbacks of device drivers in parallel with each other. However,
there are dependencies between devices such that we're not allowed
to suspend the parent of a device before suspending the device
itself. Analogously, we're not allowed to resume a device before
resuming its parent.
The most straightforward way to take these dependencies into accout
is to start the async threads used for suspending and resuming
devices at the core level, so that async_schedule() is called for
each suspend and resume callback supposed to be executed
asynchronously.
For this purpose, introduce a new device flag, power.async_suspend,
used to mark the devices whose suspend and resume callbacks are to be
executed asynchronously (ie. in parallel with the main suspend/resume
thread and possibly in parallel with each other) and helper function
device_enable_async_suspend() allowing one to set power.async_suspend
for given device (power.async_suspend is unset by default for all
devices). For each device with the power.async_suspend flag set the
PM core will use async_schedule() to execute its suspend and resume
callbacks.
The async threads started for different devices as a result of
calling async_schedule() are synchronized with each other and with
the main suspend/resume thread with the help of completions, in the
following way:
(1) There is a completion, power.completion, for each device object.
(2) Each device's completion is reset before calling async_schedule()
for the device or, in the case of devices with the
power.async_suspend flags unset, before executing the device's
suspend and resume callbacks.
(3) During suspend, right before running the bus type, device type
and device class suspend callbacks for the device, the PM core
waits for the completions of all the device's children to be
completed.
(4) During resume, right before running the bus type, device type and
device class resume callbacks for the device, the PM core waits
for the completion of the device's parent to be completed.
(5) The PM core completes power.completion for each device right
after the bus type, device type and device class suspend (or
resume) callbacks executed for the device have returned.
Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
2010-01-24 05:23:32 +08:00
|
|
|
dev->power.async_suspend = true;
|
|
|
|
}
|
|
|
|
|
2010-01-24 05:25:23 +08:00
|
|
|
static inline void device_disable_async_suspend(struct device *dev)
|
|
|
|
{
|
2011-06-19 02:22:23 +08:00
|
|
|
if (!dev->power.is_prepared)
|
2010-01-24 05:25:23 +08:00
|
|
|
dev->power.async_suspend = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool device_async_suspend_enabled(struct device *dev)
|
|
|
|
{
|
|
|
|
return !!dev->power.async_suspend;
|
|
|
|
}
|
|
|
|
|
2019-02-15 02:29:10 +08:00
|
|
|
static inline bool device_pm_not_required(struct device *dev)
|
|
|
|
{
|
|
|
|
return dev->power.no_pm;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void device_set_pm_not_required(struct device *dev)
|
|
|
|
{
|
|
|
|
dev->power.no_pm = true;
|
|
|
|
}
|
|
|
|
|
2012-08-13 20:00:25 +08:00
|
|
|
static inline void dev_pm_syscore_device(struct device *dev, bool val)
|
|
|
|
{
|
|
|
|
#ifdef CONFIG_PM_SLEEP
|
|
|
|
dev->power.syscore = val;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
PM / core: Add NEVER_SKIP and SMART_PREPARE driver flags
The motivation for this change is to provide a way to work around
a problem with the direct-complete mechanism used for avoiding
system suspend/resume handling for devices in runtime suspend.
The problem is that some middle layer code (the PCI bus type and
the ACPI PM domain in particular) returns positive values from its
system suspend ->prepare callbacks regardless of whether the driver's
->prepare returns a positive value or 0, which effectively prevents
drivers from being able to control the direct-complete feature.
Some drivers need that control, however, and the PCI bus type has
grown its own flag to deal with this issue, but since it is not
limited to PCI, it is better to address it by adding driver flags at
the core level.
To that end, add a driver_flags field to struct dev_pm_info for flags
that can be set by device drivers at the probe time to inform the PM
core and/or bus types, PM domains and so on on the capabilities and/or
preferences of device drivers. Also add two static inline helpers
for setting that field and testing it against a given set of flags
and make the driver core clear it automatically on driver remove
and probe failures.
Define and document two PM driver flags related to the direct-
complete feature: NEVER_SKIP and SMART_PREPARE that can be used,
respectively, to indicate to the PM core that the direct-complete
mechanism should never be used for the device and to inform the
middle layer code (bus types, PM domains etc) that it can only
request the PM core to use the direct-complete mechanism for
the device (by returning a positive value from its ->prepare
callback) if it also has been requested by the driver.
While at it, make the core check pm_runtime_suspended() when
setting power.direct_complete so that it doesn't need to be
checked by ->prepare callbacks.
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Acked-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Acked-by: Bjorn Helgaas <bhelgaas@google.com>
Reviewed-by: Ulf Hansson <ulf.hansson@linaro.org>
2017-10-25 20:12:29 +08:00
|
|
|
static inline void dev_pm_set_driver_flags(struct device *dev, u32 flags)
|
|
|
|
{
|
|
|
|
dev->power.driver_flags = flags;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool dev_pm_test_driver_flags(struct device *dev, u32 flags)
|
|
|
|
{
|
|
|
|
return !!(dev->power.driver_flags & flags);
|
|
|
|
}
|
|
|
|
|
2010-02-18 02:57:05 +08:00
|
|
|
static inline void device_lock(struct device *dev)
|
|
|
|
{
|
2010-01-30 04:39:02 +08:00
|
|
|
mutex_lock(&dev->mutex);
|
2010-02-18 02:57:05 +08:00
|
|
|
}
|
|
|
|
|
2016-01-21 22:18:47 +08:00
|
|
|
static inline int device_lock_interruptible(struct device *dev)
|
|
|
|
{
|
|
|
|
return mutex_lock_interruptible(&dev->mutex);
|
|
|
|
}
|
|
|
|
|
2010-02-18 02:57:05 +08:00
|
|
|
static inline int device_trylock(struct device *dev)
|
|
|
|
{
|
2010-01-30 04:39:02 +08:00
|
|
|
return mutex_trylock(&dev->mutex);
|
2010-02-18 02:57:05 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void device_unlock(struct device *dev)
|
|
|
|
{
|
2010-01-30 04:39:02 +08:00
|
|
|
mutex_unlock(&dev->mutex);
|
2010-02-18 02:57:05 +08:00
|
|
|
}
|
|
|
|
|
2014-12-04 05:40:27 +08:00
|
|
|
static inline void device_lock_assert(struct device *dev)
|
|
|
|
{
|
|
|
|
lockdep_assert_held(&dev->mutex);
|
|
|
|
}
|
|
|
|
|
2015-02-18 09:03:41 +08:00
|
|
|
static inline struct device_node *dev_of_node(struct device *dev)
|
|
|
|
{
|
2019-04-13 02:31:45 +08:00
|
|
|
if (!IS_ENABLED(CONFIG_OF) || !dev)
|
2015-02-18 09:03:41 +08:00
|
|
|
return NULL;
|
|
|
|
return dev->of_node;
|
|
|
|
}
|
|
|
|
|
2020-02-21 16:05:09 +08:00
|
|
|
static inline bool dev_has_sync_state(struct device *dev)
|
|
|
|
{
|
|
|
|
if (!dev)
|
|
|
|
return false;
|
|
|
|
if (dev->driver && dev->driver->sync_state)
|
|
|
|
return true;
|
|
|
|
if (dev->bus && dev->bus->sync_state)
|
|
|
|
return true;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2021-05-25 01:18:11 +08:00
|
|
|
static inline void dev_set_removable(struct device *dev,
|
|
|
|
enum device_removable removable)
|
|
|
|
{
|
|
|
|
dev->removable = removable;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool dev_is_removable(struct device *dev)
|
|
|
|
{
|
|
|
|
return dev->removable == DEVICE_REMOVABLE;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool dev_removable_is_valid(struct device *dev)
|
|
|
|
{
|
|
|
|
return dev->removable != DEVICE_REMOVABLE_NOT_SUPPORTED;
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* High level routines for use by the bus drivers
|
|
|
|
*/
|
2020-06-29 14:50:05 +08:00
|
|
|
int __must_check device_register(struct device *dev);
|
|
|
|
void device_unregister(struct device *dev);
|
|
|
|
void device_initialize(struct device *dev);
|
|
|
|
int __must_check device_add(struct device *dev);
|
|
|
|
void device_del(struct device *dev);
|
|
|
|
int device_for_each_child(struct device *dev, void *data,
|
|
|
|
int (*fn)(struct device *dev, void *data));
|
|
|
|
int device_for_each_child_reverse(struct device *dev, void *data,
|
|
|
|
int (*fn)(struct device *dev, void *data));
|
|
|
|
struct device *device_find_child(struct device *dev, void *data,
|
|
|
|
int (*match)(struct device *dev, void *data));
|
|
|
|
struct device *device_find_child_by_name(struct device *parent,
|
|
|
|
const char *name);
|
|
|
|
int device_rename(struct device *dev, const char *new_name);
|
|
|
|
int device_move(struct device *dev, struct device *new_parent,
|
|
|
|
enum dpm_order dpm_order);
|
|
|
|
int device_change_owner(struct device *dev, kuid_t kuid, kgid_t kgid);
|
|
|
|
const char *device_get_devnode(struct device *dev, umode_t *mode, kuid_t *uid,
|
|
|
|
kgid_t *gid, const char **tmp);
|
2020-08-06 04:02:45 +08:00
|
|
|
int device_is_dependent(struct device *dev, void *target);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
Driver core: Add offline/online device operations
In some cases, graceful hot-removal of devices is not possible,
although in principle the devices in question support hotplug.
For example, that may happen for the last CPU in the system or
for memory modules holding kernel memory.
In those cases it is nice to be able to check if the given device
can be gracefully hot-removed before triggering a removal procedure
that cannot be aborted or reversed. Unfortunately, however, the
kernel currently doesn't provide any support for that.
To address that deficiency, introduce support for offline and
online operations that can be performed on devices, respectively,
before a hot-removal and in case when it is necessary (or convenient)
to put a device back online after a successful offline (that has not
been followed by removal). The idea is that the offline will fail
whenever the given device cannot be gracefully removed from the
system and it will not be allowed to use the device after a
successful offline (until a subsequent online) in analogy with the
existing CPU offline/online mechanism.
For now, the offline and online operations are introduced at the
bus type level, as that should be sufficient for the most urgent use
cases (CPUs and memory modules). In the future, however, the
approach may be extended to cover some more complicated device
offline/online scenarios involving device drivers etc.
The lock_device_hotplug() and unlock_device_hotplug() functions are
introduced because subsequent patches need to put larger pieces of
code under device_hotplug_lock to prevent race conditions between
device offline and removal from happening.
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Acked-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Reviewed-by: Toshi Kani <toshi.kani@hp.com>
2013-05-03 04:15:29 +08:00
|
|
|
static inline bool device_supports_offline(struct device *dev)
|
|
|
|
{
|
|
|
|
return dev->bus && dev->bus->offline && dev->bus->online;
|
|
|
|
}
|
|
|
|
|
2020-06-29 14:50:05 +08:00
|
|
|
void lock_device_hotplug(void);
|
|
|
|
void unlock_device_hotplug(void);
|
|
|
|
int lock_device_hotplug_sysfs(void);
|
|
|
|
int device_offline(struct device *dev);
|
|
|
|
int device_online(struct device *dev);
|
|
|
|
void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode);
|
|
|
|
void set_secondary_fwnode(struct device *dev, struct fwnode_handle *fwnode);
|
driver core: add helper to reuse a device-tree node
Add a helper function to be used when reusing the device-tree node of
another device.
It is fairly common for drivers to reuse the device-tree node of a
parent (or other ancestor) device when creating class or bus devices
(e.g. gpio chips, i2c adapters, iio chips, spi masters, serdev, phys,
usb root hubs). But reusing a device-tree node may cause problems if the
new device is later probed as for example driver core would currently
attempt to reinitialise an already active associated pinmux
configuration.
Other potential issues include the platform-bus code unconditionally
dropping the device-tree node reference in its device destructor,
reinitialisation of other bus-managed resources such as clocks, and the
recently added DMA-setup in driver core.
Note that for most examples above this is currently not an issue as the
devices are never probed, but this is a problem for the USB bus which
has recently gained device-tree support. This was discovered and
worked-around in a rather ad-hoc fashion by commit dc5878abf49c ("usb:
core: move root hub's device node assignment after it is added to bus")
by not setting the of_node pointer until after the root-hub device has
been registered.
Instead we can allow devices to reuse a device-tree node by setting a
flag in their struct device that can be used by core, bus and driver
code to avoid resources from being over-allocated.
Note that the helper also grabs an extra reference to the device node,
which specifically balances the unconditional put in the platform-device
destructor.
Signed-off-by: Johan Hovold <johan@kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2017-06-06 23:59:00 +08:00
|
|
|
void device_set_of_node_from_dev(struct device *dev, const struct device *dev2);
|
2021-06-17 20:29:04 +08:00
|
|
|
void device_set_node(struct device *dev, struct fwnode_handle *fwnode);
|
2015-04-04 05:23:37 +08:00
|
|
|
|
2017-01-18 21:04:39 +08:00
|
|
|
static inline int dev_num_vf(struct device *dev)
|
|
|
|
{
|
|
|
|
if (dev->bus && dev->bus->num_vf)
|
|
|
|
return dev->bus->num_vf(dev);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2008-12-15 20:58:26 +08:00
|
|
|
/*
|
|
|
|
* Root device objects for grouping under /sys/devices
|
|
|
|
*/
|
2020-06-29 14:50:05 +08:00
|
|
|
struct device *__root_device_register(const char *name, struct module *owner);
|
2011-05-27 21:02:11 +08:00
|
|
|
|
2014-01-10 21:57:31 +08:00
|
|
|
/* This is a macro to avoid include problems with THIS_MODULE */
|
2011-05-27 21:02:11 +08:00
|
|
|
#define root_device_register(name) \
|
|
|
|
__root_device_register(name, THIS_MODULE)
|
|
|
|
|
2020-06-29 14:50:05 +08:00
|
|
|
void root_device_unregister(struct device *root);
|
2008-12-15 20:58:26 +08:00
|
|
|
|
2009-07-17 22:06:08 +08:00
|
|
|
static inline void *dev_get_platdata(const struct device *dev)
|
|
|
|
{
|
|
|
|
return dev->platform_data;
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* Manual binding of a device to driver. See drivers/base/bus.c
|
|
|
|
* for information on use.
|
|
|
|
*/
|
2021-06-17 22:22:13 +08:00
|
|
|
int __must_check device_driver_attach(struct device_driver *drv,
|
|
|
|
struct device *dev);
|
2020-06-29 14:50:05 +08:00
|
|
|
int __must_check device_bind_driver(struct device *dev);
|
|
|
|
void device_release_driver(struct device *dev);
|
|
|
|
int __must_check device_attach(struct device *dev);
|
|
|
|
int __must_check driver_attach(struct device_driver *drv);
|
|
|
|
void device_initial_probe(struct device *dev);
|
|
|
|
int __must_check device_reprobe(struct device *dev);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2020-06-29 14:50:05 +08:00
|
|
|
bool device_is_bound(struct device *dev);
|
2016-01-07 23:46:12 +08:00
|
|
|
|
2006-06-15 03:14:34 +08:00
|
|
|
/*
|
|
|
|
* Easy functions for dynamically creating devices on the fly
|
|
|
|
*/
|
2020-06-29 14:50:05 +08:00
|
|
|
__printf(5, 6) struct device *
|
|
|
|
device_create(struct class *cls, struct device *parent, dev_t devt,
|
|
|
|
void *drvdata, const char *fmt, ...);
|
|
|
|
__printf(6, 7) struct device *
|
|
|
|
device_create_with_groups(struct class *cls, struct device *parent, dev_t devt,
|
|
|
|
void *drvdata, const struct attribute_group **groups,
|
|
|
|
const char *fmt, ...);
|
|
|
|
void device_destroy(struct class *cls, dev_t devt);
|
|
|
|
|
|
|
|
int __must_check device_add_groups(struct device *dev,
|
|
|
|
const struct attribute_group **groups);
|
|
|
|
void device_remove_groups(struct device *dev,
|
|
|
|
const struct attribute_group **groups);
|
2017-07-20 08:24:31 +08:00
|
|
|
|
2017-07-20 08:24:32 +08:00
|
|
|
static inline int __must_check device_add_group(struct device *dev,
|
|
|
|
const struct attribute_group *grp)
|
|
|
|
{
|
|
|
|
const struct attribute_group *groups[] = { grp, NULL };
|
|
|
|
|
|
|
|
return device_add_groups(dev, groups);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void device_remove_group(struct device *dev,
|
|
|
|
const struct attribute_group *grp)
|
|
|
|
{
|
|
|
|
const struct attribute_group *groups[] = { grp, NULL };
|
|
|
|
|
|
|
|
return device_remove_groups(dev, groups);
|
|
|
|
}
|
|
|
|
|
2020-06-29 14:50:05 +08:00
|
|
|
int __must_check devm_device_add_groups(struct device *dev,
|
2017-07-20 08:24:33 +08:00
|
|
|
const struct attribute_group **groups);
|
2020-06-29 14:50:05 +08:00
|
|
|
void devm_device_remove_groups(struct device *dev,
|
|
|
|
const struct attribute_group **groups);
|
|
|
|
int __must_check devm_device_add_group(struct device *dev,
|
|
|
|
const struct attribute_group *grp);
|
|
|
|
void devm_device_remove_group(struct device *dev,
|
|
|
|
const struct attribute_group *grp);
|
2017-07-20 08:24:33 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* Platform "fixup" functions - allow the platform to have their say
|
|
|
|
* about devices and actions that the general device layer doesn't
|
|
|
|
* know about.
|
|
|
|
*/
|
|
|
|
/* Notify platform of device discovery */
|
2008-01-25 13:04:46 +08:00
|
|
|
extern int (*platform_notify)(struct device *dev);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2008-01-25 13:04:46 +08:00
|
|
|
extern int (*platform_notify_remove)(struct device *dev);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
|
2011-05-05 07:55:36 +08:00
|
|
|
/*
|
2005-04-17 06:20:36 +08:00
|
|
|
* get_device - atomically increment the reference count for the device.
|
|
|
|
*
|
|
|
|
*/
|
2020-06-29 14:50:05 +08:00
|
|
|
struct device *get_device(struct device *dev);
|
|
|
|
void put_device(struct device *dev);
|
|
|
|
bool kill_device(struct device *dev);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
Driver Core: devtmpfs - kernel-maintained tmpfs-based /dev
Devtmpfs lets the kernel create a tmpfs instance called devtmpfs
very early at kernel initialization, before any driver-core device
is registered. Every device with a major/minor will provide a
device node in devtmpfs.
Devtmpfs can be changed and altered by userspace at any time,
and in any way needed - just like today's udev-mounted tmpfs.
Unmodified udev versions will run just fine on top of it, and will
recognize an already existing kernel-created device node and use it.
The default node permissions are root:root 0600. Proper permissions
and user/group ownership, meaningful symlinks, all other policy still
needs to be applied by userspace.
If a node is created by devtmps, devtmpfs will remove the device node
when the device goes away. If the device node was created by
userspace, or the devtmpfs created node was replaced by userspace, it
will no longer be removed by devtmpfs.
If it is requested to auto-mount it, it makes init=/bin/sh work
without any further userspace support. /dev will be fully populated
and dynamic, and always reflect the current device state of the kernel.
With the commonly used dynamic device numbers, it solves the problem
where static devices nodes may point to the wrong devices.
It is intended to make the initial bootup logic simpler and more robust,
by de-coupling the creation of the inital environment, to reliably run
userspace processes, from a complex userspace bootstrap logic to provide
a working /dev.
Signed-off-by: Kay Sievers <kay.sievers@vrfy.org>
Signed-off-by: Jan Blunck <jblunck@suse.de>
Tested-By: Harald Hoyer <harald@redhat.com>
Tested-By: Scott James Remnant <scott@ubuntu.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
2009-04-30 21:23:42 +08:00
|
|
|
#ifdef CONFIG_DEVTMPFS
|
2020-06-29 14:50:05 +08:00
|
|
|
int devtmpfs_mount(void);
|
Driver Core: devtmpfs - kernel-maintained tmpfs-based /dev
Devtmpfs lets the kernel create a tmpfs instance called devtmpfs
very early at kernel initialization, before any driver-core device
is registered. Every device with a major/minor will provide a
device node in devtmpfs.
Devtmpfs can be changed and altered by userspace at any time,
and in any way needed - just like today's udev-mounted tmpfs.
Unmodified udev versions will run just fine on top of it, and will
recognize an already existing kernel-created device node and use it.
The default node permissions are root:root 0600. Proper permissions
and user/group ownership, meaningful symlinks, all other policy still
needs to be applied by userspace.
If a node is created by devtmps, devtmpfs will remove the device node
when the device goes away. If the device node was created by
userspace, or the devtmpfs created node was replaced by userspace, it
will no longer be removed by devtmpfs.
If it is requested to auto-mount it, it makes init=/bin/sh work
without any further userspace support. /dev will be fully populated
and dynamic, and always reflect the current device state of the kernel.
With the commonly used dynamic device numbers, it solves the problem
where static devices nodes may point to the wrong devices.
It is intended to make the initial bootup logic simpler and more robust,
by de-coupling the creation of the inital environment, to reliably run
userspace processes, from a complex userspace bootstrap logic to provide
a working /dev.
Signed-off-by: Kay Sievers <kay.sievers@vrfy.org>
Signed-off-by: Jan Blunck <jblunck@suse.de>
Tested-By: Harald Hoyer <harald@redhat.com>
Tested-By: Scott James Remnant <scott@ubuntu.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
2009-04-30 21:23:42 +08:00
|
|
|
#else
|
2018-10-24 04:10:35 +08:00
|
|
|
static inline int devtmpfs_mount(void) { return 0; }
|
Driver Core: devtmpfs - kernel-maintained tmpfs-based /dev
Devtmpfs lets the kernel create a tmpfs instance called devtmpfs
very early at kernel initialization, before any driver-core device
is registered. Every device with a major/minor will provide a
device node in devtmpfs.
Devtmpfs can be changed and altered by userspace at any time,
and in any way needed - just like today's udev-mounted tmpfs.
Unmodified udev versions will run just fine on top of it, and will
recognize an already existing kernel-created device node and use it.
The default node permissions are root:root 0600. Proper permissions
and user/group ownership, meaningful symlinks, all other policy still
needs to be applied by userspace.
If a node is created by devtmps, devtmpfs will remove the device node
when the device goes away. If the device node was created by
userspace, or the devtmpfs created node was replaced by userspace, it
will no longer be removed by devtmpfs.
If it is requested to auto-mount it, it makes init=/bin/sh work
without any further userspace support. /dev will be fully populated
and dynamic, and always reflect the current device state of the kernel.
With the commonly used dynamic device numbers, it solves the problem
where static devices nodes may point to the wrong devices.
It is intended to make the initial bootup logic simpler and more robust,
by de-coupling the creation of the inital environment, to reliably run
userspace processes, from a complex userspace bootstrap logic to provide
a working /dev.
Signed-off-by: Kay Sievers <kay.sievers@vrfy.org>
Signed-off-by: Jan Blunck <jblunck@suse.de>
Tested-By: Harald Hoyer <harald@redhat.com>
Tested-By: Scott James Remnant <scott@ubuntu.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
2009-04-30 21:23:42 +08:00
|
|
|
#endif
|
|
|
|
|
2006-03-22 07:58:53 +08:00
|
|
|
/* drivers/base/power/shutdown.c */
|
2020-06-29 14:50:05 +08:00
|
|
|
void device_shutdown(void);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/* debugging and troubleshooting/diagnostic helpers. */
|
2020-06-29 14:50:05 +08:00
|
|
|
const char *dev_driver_string(const struct device *dev);
|
2010-06-27 09:02:34 +08:00
|
|
|
|
driver core: Functional dependencies tracking support
Currently, there is a problem with taking functional dependencies
between devices into account.
What I mean by a "functional dependency" is when the driver of device
B needs device A to be functional and (generally) its driver to be
present in order to work properly. This has certain consequences
for power management (suspend/resume and runtime PM ordering) and
shutdown ordering of these devices. In general, it also implies that
the driver of A needs to be working for B to be probed successfully
and it cannot be unbound from the device before the B's driver.
Support for representing those functional dependencies between
devices is added here to allow the driver core to track them and act
on them in certain cases where applicable.
The argument for doing that in the driver core is that there are
quite a few distinct use cases involving device dependencies, they
are relatively hard to get right in a driver (if one wants to
address all of them properly) and it only gets worse if multiplied
by the number of drivers potentially needing to do it. Morever, at
least one case (asynchronous system suspend/resume) cannot be handled
in a single driver at all, because it requires the driver of A to
wait for B to suspend (during system suspend) and the driver of B to
wait for A to resume (during system resume).
For this reason, represent dependencies between devices as "links",
with the help of struct device_link objects each containing pointers
to the "linked" devices, a list node for each of them, status
information, flags, and an RCU head for synchronization.
Also add two new list heads, representing the lists of links to the
devices that depend on the given one (consumers) and to the devices
depended on by it (suppliers), and a "driver presence status" field
(needed for figuring out initial states of device links) to struct
device.
The entire data structure consisting of all of the lists of link
objects for all devices is protected by a mutex (for link object
addition/removal and for list walks during device driver probing
and removal) and by SRCU (for list walking in other case that will
be introduced by subsequent change sets). If CONFIG_SRCU is not
selected, however, an rwsem is used for protecting the entire data
structure.
In addition, each link object has an internal status field whose
value reflects whether or not drivers are bound to the devices
pointed to by the link or probing/removal of their drivers is in
progress etc. That field is only modified under the device links
mutex, but it may be read outside of it in some cases (introduced by
subsequent change sets), so modifications of it are annotated with
WRITE_ONCE().
New links are added by calling device_link_add() which takes three
arguments: pointers to the devices in question and flags. In
particular, if DL_FLAG_STATELESS is set in the flags, the link status
is not to be taken into account for this link and the driver core
will not manage it. In turn, if DL_FLAG_AUTOREMOVE is set in the
flags, the driver core will remove the link automatically when the
consumer device driver unbinds from it.
One of the actions carried out by device_link_add() is to reorder
the lists used for device shutdown and system suspend/resume to
put the consumer device along with all of its children and all of
its consumers (and so on, recursively) to the ends of those lists
in order to ensure the right ordering between all of the supplier
and consumer devices.
For this reason, it is not possible to create a link between two
devices if the would-be supplier device already depends on the
would-be consumer device as either a direct descendant of it or a
consumer of one of its direct descendants or one of its consumers
and so on.
There are two types of link objects, persistent and non-persistent.
The persistent ones stay around until one of the target devices is
deleted, while the non-persistent ones are removed automatically when
the consumer driver unbinds from its device (ie. they are assumed to
be valid only as long as the consumer device has a driver bound to
it). Persistent links are created by default and non-persistent
links are created when the DL_FLAG_AUTOREMOVE flag is passed
to device_link_add().
Both persistent and non-persistent device links can be deleted
with an explicit call to device_link_del().
Links created without the DL_FLAG_STATELESS flag set are managed
by the driver core using a simple state machine. There are 5 states
each link can be in: DORMANT (unused), AVAILABLE (the supplier driver
is present and functional), CONSUMER_PROBE (the consumer driver is
probing), ACTIVE (both supplier and consumer drivers are present and
functional), and SUPPLIER_UNBIND (the supplier driver is unbinding).
The driver core updates the link state automatically depending on
what happens to the linked devices and for each link state specific
actions are taken in addition to that.
For example, if the supplier driver unbinds from its device, the
driver core will also unbind the drivers of all of its consumers
automatically under the assumption that they cannot function
properly without the supplier. Analogously, the driver core will
only allow the consumer driver to bind to its device if the
supplier driver is present and functional (ie. the link is in
the AVAILABLE state). If that's not the case, it will rely on
the existing deferred probing mechanism to wait for the supplier
driver to become available.
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2016-10-31 00:32:16 +08:00
|
|
|
/* Device links interface. */
|
|
|
|
struct device_link *device_link_add(struct device *consumer,
|
|
|
|
struct device *supplier, u32 flags);
|
|
|
|
void device_link_del(struct device_link *link);
|
2018-07-05 22:25:56 +08:00
|
|
|
void device_link_remove(void *consumer, struct device *supplier);
|
2019-09-05 05:11:23 +08:00
|
|
|
void device_links_supplier_sync_state_pause(void);
|
|
|
|
void device_links_supplier_sync_state_resume(void);
|
2010-06-27 09:02:34 +08:00
|
|
|
|
2020-07-13 22:43:21 +08:00
|
|
|
extern __printf(3, 4)
|
2020-09-09 15:37:40 +08:00
|
|
|
int dev_err_probe(const struct device *dev, int err, const char *fmt, ...);
|
2020-07-13 22:43:21 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/* Create alias, so I can be autoloaded. */
|
|
|
|
#define MODULE_ALIAS_CHARDEV(major,minor) \
|
|
|
|
MODULE_ALIAS("char-major-" __stringify(major) "-" __stringify(minor))
|
|
|
|
#define MODULE_ALIAS_CHARDEV_MAJOR(major) \
|
|
|
|
MODULE_ALIAS("char-major-" __stringify(major) "-*")
|
2010-09-08 22:54:17 +08:00
|
|
|
|
|
|
|
#ifdef CONFIG_SYSFS_DEPRECATED
|
|
|
|
extern long sysfs_deprecated;
|
|
|
|
#else
|
|
|
|
#define sysfs_deprecated 0
|
|
|
|
#endif
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
#endif /* _DEVICE_H_ */
|