2016-11-14 19:58:23 +08:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2016 Intel Corporation
|
|
|
|
*
|
|
|
|
* Permission to use, copy, modify, distribute, and sell this software and its
|
|
|
|
* documentation for any purpose is hereby granted without fee, provided that
|
|
|
|
* the above copyright notice appear in all copies and that both that copyright
|
|
|
|
* notice and this permission notice appear in supporting documentation, and
|
|
|
|
* that the name of the copyright holders not be used in advertising or
|
|
|
|
* publicity pertaining to distribution of the software without specific,
|
|
|
|
* written prior permission. The copyright holders make no representations
|
|
|
|
* about the suitability of this software for any purpose. It is provided "as
|
|
|
|
* is" without express or implied warranty.
|
|
|
|
*
|
|
|
|
* THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
|
|
|
|
* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
|
|
|
|
* EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
|
|
|
|
* CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
|
|
|
|
* DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
|
|
|
|
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
|
|
|
|
* OF THIS SOFTWARE.
|
|
|
|
*/
|
|
|
|
|
2019-05-27 01:35:35 +08:00
|
|
|
#include <linux/uaccess.h>
|
|
|
|
|
|
|
|
#include <drm/drm_drv.h>
|
2016-11-29 02:51:09 +08:00
|
|
|
#include <drm/drm_encoder.h>
|
2019-05-27 01:35:35 +08:00
|
|
|
#include <drm/drm_file.h>
|
2022-06-14 17:54:49 +08:00
|
|
|
#include <drm/drm_framebuffer.h>
|
2020-03-23 22:49:25 +08:00
|
|
|
#include <drm/drm_managed.h>
|
2016-11-14 19:58:23 +08:00
|
|
|
#include <drm/drm_mode_config.h>
|
2019-05-27 01:35:35 +08:00
|
|
|
#include <drm/drm_print.h>
|
2019-11-20 05:08:42 +08:00
|
|
|
#include <linux/dma-resv.h>
|
2016-11-14 19:58:23 +08:00
|
|
|
|
|
|
|
#include "drm_crtc_internal.h"
|
|
|
|
#include "drm_internal.h"
|
|
|
|
|
|
|
|
int drm_modeset_register_all(struct drm_device *dev)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = drm_plane_register_all(dev);
|
|
|
|
if (ret)
|
|
|
|
goto err_plane;
|
|
|
|
|
|
|
|
ret = drm_crtc_register_all(dev);
|
|
|
|
if (ret)
|
|
|
|
goto err_crtc;
|
|
|
|
|
|
|
|
ret = drm_encoder_register_all(dev);
|
|
|
|
if (ret)
|
|
|
|
goto err_encoder;
|
|
|
|
|
|
|
|
ret = drm_connector_register_all(dev);
|
|
|
|
if (ret)
|
|
|
|
goto err_connector;
|
|
|
|
|
2022-12-19 20:06:17 +08:00
|
|
|
drm_debugfs_late_register(dev);
|
|
|
|
|
2016-11-14 19:58:23 +08:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_connector:
|
|
|
|
drm_encoder_unregister_all(dev);
|
|
|
|
err_encoder:
|
|
|
|
drm_crtc_unregister_all(dev);
|
|
|
|
err_crtc:
|
|
|
|
drm_plane_unregister_all(dev);
|
|
|
|
err_plane:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
void drm_modeset_unregister_all(struct drm_device *dev)
|
|
|
|
{
|
|
|
|
drm_connector_unregister_all(dev);
|
|
|
|
drm_encoder_unregister_all(dev);
|
|
|
|
drm_crtc_unregister_all(dev);
|
|
|
|
drm_plane_unregister_all(dev);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* drm_mode_getresources - get graphics configuration
|
|
|
|
* @dev: drm device for the ioctl
|
|
|
|
* @data: data pointer for the ioctl
|
|
|
|
* @file_priv: drm file for the ioctl call
|
|
|
|
*
|
|
|
|
* Construct a set of configuration description structures and return
|
|
|
|
* them to the user, including CRTC, connector and framebuffer configuration.
|
|
|
|
*
|
|
|
|
* Called by the user via ioctl.
|
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* Zero on success, negative errno on failure.
|
|
|
|
*/
|
|
|
|
int drm_mode_getresources(struct drm_device *dev, void *data,
|
|
|
|
struct drm_file *file_priv)
|
|
|
|
{
|
|
|
|
struct drm_mode_card_res *card_res = data;
|
|
|
|
struct drm_framebuffer *fb;
|
|
|
|
struct drm_connector *connector;
|
|
|
|
struct drm_crtc *crtc;
|
|
|
|
struct drm_encoder *encoder;
|
2016-12-12 03:20:19 +08:00
|
|
|
int count, ret = 0;
|
2016-11-14 19:58:23 +08:00
|
|
|
uint32_t __user *fb_id;
|
|
|
|
uint32_t __user *crtc_id;
|
|
|
|
uint32_t __user *connector_id;
|
|
|
|
uint32_t __user *encoder_id;
|
drm: locking&new iterators for connector_list
The requirements for connector_list locking are a bit tricky:
- We need to be able to jump over zombie conectors (i.e. with refcount
== 0, but not yet removed from the list). If instead we require that
there's no zombies on the list then the final kref_put must happen
under the list protection lock, which means that locking context
leaks all over the place. Not pretty - better to deal with zombies
and wrap the locking just around the list_del in the destructor.
- When we walk the list we must _not_ hold the connector list lock. We
walk the connector list at an absolutely massive amounts of places,
if all those places can't ever call drm_connector_unreference the
code would get unecessarily complicated.
- connector_list needs it own lock, again too many places that walk it
that we could reuse e.g. mode_config.mutex without resulting in
inversions.
- Lots of code uses these loops to look-up a connector, i.e. they want
to be able to call drm_connector_reference. But on the other hand we
want connectors to stay on that list until they're dead (i.e.
connector_list can't hold a full reference), which means despite the
"can't hold lock for the loop body" rule we need to make sure a
connector doesn't suddenly become a zombie.
At first Dave&I discussed various horror-show approaches using srcu,
but turns out it's fairly easy:
- For the loop body we always hold an additional reference to the
current connector. That means it can't zombify, and it also means
it'll stay on the list, which means we can use it as our iterator to
find the next connector.
- When we try to find the next connector we only have to jump over
zombies. To make sure we don't chase bad pointers that entire loop
is protected with the new connect_list_lock spinlock. And because we
know that we're starting out with a non-zombie (need to drop our
reference for the old connector only after we have our new one),
we're guranteed to still be on the connector_list and either find
the next non-zombie or complete the iteration.
- Only downside is that we need to make sure that the temporary
reference for the loop body doesn't leak. iter_get/put() functions +
lockdep make sure that's the case.
- To avoid a flag day the new iterator macro has an _iter postfix. We
can rename it back once all the users of the unsafe version are gone
(there's about 100 list walkers for the connector_list).
For now this patch only converts all the list walking in the core,
leaving helpers and drivers for later patches. The nice thing is that
we can now finally remove 2 FIXME comments from the
register/unregister functions.
v2:
- use irqsafe spinlocks, so that we can use this in drm_state_dump
too.
- nuke drm_modeset_lock_all from drm_connector_init, now entirely
cargo-culted nonsense.
v3:
- do {} while (!kref_get_unless_zero), makes for a tidier loop (Dave).
- pretty kerneldoc
- add EXPORT_SYMBOL, helpers&drivers are supposed to use this.
v4: Change lockdep annotations to only check whether we release the
iter fake lock again (i.e. make sure that iter_put is called), but
not check any locking dependecies itself. That seams to require a
recursive read lock in trylock mode.
Cc: Dave Airlie <airlied@gmail.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Sean Paul <seanpaul@chromium.org>
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20161213230814.19598-6-daniel.vetter@ffwll.ch
2016-12-14 07:08:06 +08:00
|
|
|
struct drm_connector_list_iter conn_iter;
|
2016-11-14 19:58:23 +08:00
|
|
|
|
|
|
|
if (!drm_core_check_feature(dev, DRIVER_MODESET))
|
2018-09-14 03:20:50 +08:00
|
|
|
return -EOPNOTSUPP;
|
2016-11-14 19:58:23 +08:00
|
|
|
|
|
|
|
mutex_lock(&file_priv->fbs_lock);
|
2016-12-12 03:20:19 +08:00
|
|
|
count = 0;
|
|
|
|
fb_id = u64_to_user_ptr(card_res->fb_id_ptr);
|
|
|
|
list_for_each_entry(fb, &file_priv->fbs, filp_head) {
|
|
|
|
if (count < card_res->count_fbs &&
|
|
|
|
put_user(fb->base.id, fb_id + count)) {
|
|
|
|
mutex_unlock(&file_priv->fbs_lock);
|
|
|
|
return -EFAULT;
|
2016-11-14 19:58:23 +08:00
|
|
|
}
|
2016-12-12 03:20:19 +08:00
|
|
|
count++;
|
2016-11-14 19:58:23 +08:00
|
|
|
}
|
2016-12-12 03:20:19 +08:00
|
|
|
card_res->count_fbs = count;
|
2016-11-14 19:58:23 +08:00
|
|
|
mutex_unlock(&file_priv->fbs_lock);
|
|
|
|
|
|
|
|
card_res->max_height = dev->mode_config.max_height;
|
|
|
|
card_res->min_height = dev->mode_config.min_height;
|
|
|
|
card_res->max_width = dev->mode_config.max_width;
|
|
|
|
card_res->min_width = dev->mode_config.min_width;
|
|
|
|
|
2016-12-12 03:20:19 +08:00
|
|
|
count = 0;
|
|
|
|
crtc_id = u64_to_user_ptr(card_res->crtc_id_ptr);
|
|
|
|
drm_for_each_crtc(crtc, dev) {
|
2017-04-10 12:35:34 +08:00
|
|
|
if (drm_lease_held(file_priv, crtc->base.id)) {
|
|
|
|
if (count < card_res->count_crtcs &&
|
|
|
|
put_user(crtc->base.id, crtc_id + count))
|
|
|
|
return -EFAULT;
|
|
|
|
count++;
|
|
|
|
}
|
2016-11-14 19:58:23 +08:00
|
|
|
}
|
2016-12-12 03:20:19 +08:00
|
|
|
card_res->count_crtcs = count;
|
|
|
|
|
|
|
|
count = 0;
|
|
|
|
encoder_id = u64_to_user_ptr(card_res->encoder_id_ptr);
|
|
|
|
drm_for_each_encoder(encoder, dev) {
|
|
|
|
if (count < card_res->count_encoders &&
|
drm: locking&new iterators for connector_list
The requirements for connector_list locking are a bit tricky:
- We need to be able to jump over zombie conectors (i.e. with refcount
== 0, but not yet removed from the list). If instead we require that
there's no zombies on the list then the final kref_put must happen
under the list protection lock, which means that locking context
leaks all over the place. Not pretty - better to deal with zombies
and wrap the locking just around the list_del in the destructor.
- When we walk the list we must _not_ hold the connector list lock. We
walk the connector list at an absolutely massive amounts of places,
if all those places can't ever call drm_connector_unreference the
code would get unecessarily complicated.
- connector_list needs it own lock, again too many places that walk it
that we could reuse e.g. mode_config.mutex without resulting in
inversions.
- Lots of code uses these loops to look-up a connector, i.e. they want
to be able to call drm_connector_reference. But on the other hand we
want connectors to stay on that list until they're dead (i.e.
connector_list can't hold a full reference), which means despite the
"can't hold lock for the loop body" rule we need to make sure a
connector doesn't suddenly become a zombie.
At first Dave&I discussed various horror-show approaches using srcu,
but turns out it's fairly easy:
- For the loop body we always hold an additional reference to the
current connector. That means it can't zombify, and it also means
it'll stay on the list, which means we can use it as our iterator to
find the next connector.
- When we try to find the next connector we only have to jump over
zombies. To make sure we don't chase bad pointers that entire loop
is protected with the new connect_list_lock spinlock. And because we
know that we're starting out with a non-zombie (need to drop our
reference for the old connector only after we have our new one),
we're guranteed to still be on the connector_list and either find
the next non-zombie or complete the iteration.
- Only downside is that we need to make sure that the temporary
reference for the loop body doesn't leak. iter_get/put() functions +
lockdep make sure that's the case.
- To avoid a flag day the new iterator macro has an _iter postfix. We
can rename it back once all the users of the unsafe version are gone
(there's about 100 list walkers for the connector_list).
For now this patch only converts all the list walking in the core,
leaving helpers and drivers for later patches. The nice thing is that
we can now finally remove 2 FIXME comments from the
register/unregister functions.
v2:
- use irqsafe spinlocks, so that we can use this in drm_state_dump
too.
- nuke drm_modeset_lock_all from drm_connector_init, now entirely
cargo-culted nonsense.
v3:
- do {} while (!kref_get_unless_zero), makes for a tidier loop (Dave).
- pretty kerneldoc
- add EXPORT_SYMBOL, helpers&drivers are supposed to use this.
v4: Change lockdep annotations to only check whether we release the
iter fake lock again (i.e. make sure that iter_put is called), but
not check any locking dependecies itself. That seams to require a
recursive read lock in trylock mode.
Cc: Dave Airlie <airlied@gmail.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Sean Paul <seanpaul@chromium.org>
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20161213230814.19598-6-daniel.vetter@ffwll.ch
2016-12-14 07:08:06 +08:00
|
|
|
put_user(encoder->base.id, encoder_id + count))
|
|
|
|
return -EFAULT;
|
2016-12-12 03:20:19 +08:00
|
|
|
count++;
|
2016-11-14 19:58:23 +08:00
|
|
|
}
|
2016-12-12 03:20:19 +08:00
|
|
|
card_res->count_encoders = count;
|
|
|
|
|
2017-02-28 22:46:43 +08:00
|
|
|
drm_connector_list_iter_begin(dev, &conn_iter);
|
2016-12-12 03:20:19 +08:00
|
|
|
count = 0;
|
|
|
|
connector_id = u64_to_user_ptr(card_res->connector_id_ptr);
|
drm: locking&new iterators for connector_list
The requirements for connector_list locking are a bit tricky:
- We need to be able to jump over zombie conectors (i.e. with refcount
== 0, but not yet removed from the list). If instead we require that
there's no zombies on the list then the final kref_put must happen
under the list protection lock, which means that locking context
leaks all over the place. Not pretty - better to deal with zombies
and wrap the locking just around the list_del in the destructor.
- When we walk the list we must _not_ hold the connector list lock. We
walk the connector list at an absolutely massive amounts of places,
if all those places can't ever call drm_connector_unreference the
code would get unecessarily complicated.
- connector_list needs it own lock, again too many places that walk it
that we could reuse e.g. mode_config.mutex without resulting in
inversions.
- Lots of code uses these loops to look-up a connector, i.e. they want
to be able to call drm_connector_reference. But on the other hand we
want connectors to stay on that list until they're dead (i.e.
connector_list can't hold a full reference), which means despite the
"can't hold lock for the loop body" rule we need to make sure a
connector doesn't suddenly become a zombie.
At first Dave&I discussed various horror-show approaches using srcu,
but turns out it's fairly easy:
- For the loop body we always hold an additional reference to the
current connector. That means it can't zombify, and it also means
it'll stay on the list, which means we can use it as our iterator to
find the next connector.
- When we try to find the next connector we only have to jump over
zombies. To make sure we don't chase bad pointers that entire loop
is protected with the new connect_list_lock spinlock. And because we
know that we're starting out with a non-zombie (need to drop our
reference for the old connector only after we have our new one),
we're guranteed to still be on the connector_list and either find
the next non-zombie or complete the iteration.
- Only downside is that we need to make sure that the temporary
reference for the loop body doesn't leak. iter_get/put() functions +
lockdep make sure that's the case.
- To avoid a flag day the new iterator macro has an _iter postfix. We
can rename it back once all the users of the unsafe version are gone
(there's about 100 list walkers for the connector_list).
For now this patch only converts all the list walking in the core,
leaving helpers and drivers for later patches. The nice thing is that
we can now finally remove 2 FIXME comments from the
register/unregister functions.
v2:
- use irqsafe spinlocks, so that we can use this in drm_state_dump
too.
- nuke drm_modeset_lock_all from drm_connector_init, now entirely
cargo-culted nonsense.
v3:
- do {} while (!kref_get_unless_zero), makes for a tidier loop (Dave).
- pretty kerneldoc
- add EXPORT_SYMBOL, helpers&drivers are supposed to use this.
v4: Change lockdep annotations to only check whether we release the
iter fake lock again (i.e. make sure that iter_put is called), but
not check any locking dependecies itself. That seams to require a
recursive read lock in trylock mode.
Cc: Dave Airlie <airlied@gmail.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Sean Paul <seanpaul@chromium.org>
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20161213230814.19598-6-daniel.vetter@ffwll.ch
2016-12-14 07:08:06 +08:00
|
|
|
drm_for_each_connector_iter(connector, &conn_iter) {
|
2018-02-28 22:11:23 +08:00
|
|
|
/* only expose writeback connectors if userspace understands them */
|
|
|
|
if (!file_priv->writeback_connectors &&
|
|
|
|
(connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK))
|
|
|
|
continue;
|
|
|
|
|
2017-04-10 12:35:34 +08:00
|
|
|
if (drm_lease_held(file_priv, connector->base.id)) {
|
|
|
|
if (count < card_res->count_connectors &&
|
|
|
|
put_user(connector->base.id, connector_id + count)) {
|
|
|
|
drm_connector_list_iter_end(&conn_iter);
|
|
|
|
return -EFAULT;
|
|
|
|
}
|
|
|
|
count++;
|
2016-11-14 19:58:23 +08:00
|
|
|
}
|
|
|
|
}
|
2016-12-12 03:20:19 +08:00
|
|
|
card_res->count_connectors = count;
|
2017-02-28 22:46:43 +08:00
|
|
|
drm_connector_list_iter_end(&conn_iter);
|
2016-11-14 19:58:23 +08:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* drm_mode_config_reset - call ->reset callbacks
|
|
|
|
* @dev: drm device
|
|
|
|
*
|
|
|
|
* This functions calls all the crtc's, encoder's and connector's ->reset
|
|
|
|
* callback. Drivers can use this in e.g. their driver load or resume code to
|
|
|
|
* reset hardware and software state.
|
|
|
|
*/
|
|
|
|
void drm_mode_config_reset(struct drm_device *dev)
|
|
|
|
{
|
|
|
|
struct drm_crtc *crtc;
|
|
|
|
struct drm_plane *plane;
|
|
|
|
struct drm_encoder *encoder;
|
|
|
|
struct drm_connector *connector;
|
drm: locking&new iterators for connector_list
The requirements for connector_list locking are a bit tricky:
- We need to be able to jump over zombie conectors (i.e. with refcount
== 0, but not yet removed from the list). If instead we require that
there's no zombies on the list then the final kref_put must happen
under the list protection lock, which means that locking context
leaks all over the place. Not pretty - better to deal with zombies
and wrap the locking just around the list_del in the destructor.
- When we walk the list we must _not_ hold the connector list lock. We
walk the connector list at an absolutely massive amounts of places,
if all those places can't ever call drm_connector_unreference the
code would get unecessarily complicated.
- connector_list needs it own lock, again too many places that walk it
that we could reuse e.g. mode_config.mutex without resulting in
inversions.
- Lots of code uses these loops to look-up a connector, i.e. they want
to be able to call drm_connector_reference. But on the other hand we
want connectors to stay on that list until they're dead (i.e.
connector_list can't hold a full reference), which means despite the
"can't hold lock for the loop body" rule we need to make sure a
connector doesn't suddenly become a zombie.
At first Dave&I discussed various horror-show approaches using srcu,
but turns out it's fairly easy:
- For the loop body we always hold an additional reference to the
current connector. That means it can't zombify, and it also means
it'll stay on the list, which means we can use it as our iterator to
find the next connector.
- When we try to find the next connector we only have to jump over
zombies. To make sure we don't chase bad pointers that entire loop
is protected with the new connect_list_lock spinlock. And because we
know that we're starting out with a non-zombie (need to drop our
reference for the old connector only after we have our new one),
we're guranteed to still be on the connector_list and either find
the next non-zombie or complete the iteration.
- Only downside is that we need to make sure that the temporary
reference for the loop body doesn't leak. iter_get/put() functions +
lockdep make sure that's the case.
- To avoid a flag day the new iterator macro has an _iter postfix. We
can rename it back once all the users of the unsafe version are gone
(there's about 100 list walkers for the connector_list).
For now this patch only converts all the list walking in the core,
leaving helpers and drivers for later patches. The nice thing is that
we can now finally remove 2 FIXME comments from the
register/unregister functions.
v2:
- use irqsafe spinlocks, so that we can use this in drm_state_dump
too.
- nuke drm_modeset_lock_all from drm_connector_init, now entirely
cargo-culted nonsense.
v3:
- do {} while (!kref_get_unless_zero), makes for a tidier loop (Dave).
- pretty kerneldoc
- add EXPORT_SYMBOL, helpers&drivers are supposed to use this.
v4: Change lockdep annotations to only check whether we release the
iter fake lock again (i.e. make sure that iter_put is called), but
not check any locking dependecies itself. That seams to require a
recursive read lock in trylock mode.
Cc: Dave Airlie <airlied@gmail.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Sean Paul <seanpaul@chromium.org>
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20161213230814.19598-6-daniel.vetter@ffwll.ch
2016-12-14 07:08:06 +08:00
|
|
|
struct drm_connector_list_iter conn_iter;
|
2016-11-14 19:58:23 +08:00
|
|
|
|
|
|
|
drm_for_each_plane(plane, dev)
|
|
|
|
if (plane->funcs->reset)
|
|
|
|
plane->funcs->reset(plane);
|
|
|
|
|
|
|
|
drm_for_each_crtc(crtc, dev)
|
|
|
|
if (crtc->funcs->reset)
|
|
|
|
crtc->funcs->reset(crtc);
|
|
|
|
|
|
|
|
drm_for_each_encoder(encoder, dev)
|
2020-12-10 23:38:27 +08:00
|
|
|
if (encoder->funcs && encoder->funcs->reset)
|
2016-11-14 19:58:23 +08:00
|
|
|
encoder->funcs->reset(encoder);
|
|
|
|
|
2017-02-28 22:46:43 +08:00
|
|
|
drm_connector_list_iter_begin(dev, &conn_iter);
|
drm: locking&new iterators for connector_list
The requirements for connector_list locking are a bit tricky:
- We need to be able to jump over zombie conectors (i.e. with refcount
== 0, but not yet removed from the list). If instead we require that
there's no zombies on the list then the final kref_put must happen
under the list protection lock, which means that locking context
leaks all over the place. Not pretty - better to deal with zombies
and wrap the locking just around the list_del in the destructor.
- When we walk the list we must _not_ hold the connector list lock. We
walk the connector list at an absolutely massive amounts of places,
if all those places can't ever call drm_connector_unreference the
code would get unecessarily complicated.
- connector_list needs it own lock, again too many places that walk it
that we could reuse e.g. mode_config.mutex without resulting in
inversions.
- Lots of code uses these loops to look-up a connector, i.e. they want
to be able to call drm_connector_reference. But on the other hand we
want connectors to stay on that list until they're dead (i.e.
connector_list can't hold a full reference), which means despite the
"can't hold lock for the loop body" rule we need to make sure a
connector doesn't suddenly become a zombie.
At first Dave&I discussed various horror-show approaches using srcu,
but turns out it's fairly easy:
- For the loop body we always hold an additional reference to the
current connector. That means it can't zombify, and it also means
it'll stay on the list, which means we can use it as our iterator to
find the next connector.
- When we try to find the next connector we only have to jump over
zombies. To make sure we don't chase bad pointers that entire loop
is protected with the new connect_list_lock spinlock. And because we
know that we're starting out with a non-zombie (need to drop our
reference for the old connector only after we have our new one),
we're guranteed to still be on the connector_list and either find
the next non-zombie or complete the iteration.
- Only downside is that we need to make sure that the temporary
reference for the loop body doesn't leak. iter_get/put() functions +
lockdep make sure that's the case.
- To avoid a flag day the new iterator macro has an _iter postfix. We
can rename it back once all the users of the unsafe version are gone
(there's about 100 list walkers for the connector_list).
For now this patch only converts all the list walking in the core,
leaving helpers and drivers for later patches. The nice thing is that
we can now finally remove 2 FIXME comments from the
register/unregister functions.
v2:
- use irqsafe spinlocks, so that we can use this in drm_state_dump
too.
- nuke drm_modeset_lock_all from drm_connector_init, now entirely
cargo-culted nonsense.
v3:
- do {} while (!kref_get_unless_zero), makes for a tidier loop (Dave).
- pretty kerneldoc
- add EXPORT_SYMBOL, helpers&drivers are supposed to use this.
v4: Change lockdep annotations to only check whether we release the
iter fake lock again (i.e. make sure that iter_put is called), but
not check any locking dependecies itself. That seams to require a
recursive read lock in trylock mode.
Cc: Dave Airlie <airlied@gmail.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Sean Paul <seanpaul@chromium.org>
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20161213230814.19598-6-daniel.vetter@ffwll.ch
2016-12-14 07:08:06 +08:00
|
|
|
drm_for_each_connector_iter(connector, &conn_iter)
|
2016-11-14 19:58:23 +08:00
|
|
|
if (connector->funcs->reset)
|
|
|
|
connector->funcs->reset(connector);
|
2017-02-28 22:46:43 +08:00
|
|
|
drm_connector_list_iter_end(&conn_iter);
|
2016-11-14 19:58:23 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(drm_mode_config_reset);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Global properties
|
|
|
|
*/
|
|
|
|
static const struct drm_prop_enum_list drm_plane_type_enum_list[] = {
|
|
|
|
{ DRM_PLANE_TYPE_OVERLAY, "Overlay" },
|
|
|
|
{ DRM_PLANE_TYPE_PRIMARY, "Primary" },
|
|
|
|
{ DRM_PLANE_TYPE_CURSOR, "Cursor" },
|
|
|
|
};
|
|
|
|
|
|
|
|
static int drm_mode_create_standard_properties(struct drm_device *dev)
|
|
|
|
{
|
|
|
|
struct drm_property *prop;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = drm_connector_create_standard_properties(dev);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
prop = drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE,
|
|
|
|
"type", drm_plane_type_enum_list,
|
|
|
|
ARRAY_SIZE(drm_plane_type_enum_list));
|
|
|
|
if (!prop)
|
|
|
|
return -ENOMEM;
|
|
|
|
dev->mode_config.plane_type_property = prop;
|
|
|
|
|
|
|
|
prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
|
|
|
|
"SRC_X", 0, UINT_MAX);
|
|
|
|
if (!prop)
|
|
|
|
return -ENOMEM;
|
|
|
|
dev->mode_config.prop_src_x = prop;
|
|
|
|
|
|
|
|
prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
|
|
|
|
"SRC_Y", 0, UINT_MAX);
|
|
|
|
if (!prop)
|
|
|
|
return -ENOMEM;
|
|
|
|
dev->mode_config.prop_src_y = prop;
|
|
|
|
|
|
|
|
prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
|
|
|
|
"SRC_W", 0, UINT_MAX);
|
|
|
|
if (!prop)
|
|
|
|
return -ENOMEM;
|
|
|
|
dev->mode_config.prop_src_w = prop;
|
|
|
|
|
|
|
|
prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
|
|
|
|
"SRC_H", 0, UINT_MAX);
|
|
|
|
if (!prop)
|
|
|
|
return -ENOMEM;
|
|
|
|
dev->mode_config.prop_src_h = prop;
|
|
|
|
|
|
|
|
prop = drm_property_create_signed_range(dev, DRM_MODE_PROP_ATOMIC,
|
|
|
|
"CRTC_X", INT_MIN, INT_MAX);
|
|
|
|
if (!prop)
|
|
|
|
return -ENOMEM;
|
|
|
|
dev->mode_config.prop_crtc_x = prop;
|
|
|
|
|
|
|
|
prop = drm_property_create_signed_range(dev, DRM_MODE_PROP_ATOMIC,
|
|
|
|
"CRTC_Y", INT_MIN, INT_MAX);
|
|
|
|
if (!prop)
|
|
|
|
return -ENOMEM;
|
|
|
|
dev->mode_config.prop_crtc_y = prop;
|
|
|
|
|
|
|
|
prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
|
|
|
|
"CRTC_W", 0, INT_MAX);
|
|
|
|
if (!prop)
|
|
|
|
return -ENOMEM;
|
|
|
|
dev->mode_config.prop_crtc_w = prop;
|
|
|
|
|
|
|
|
prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
|
|
|
|
"CRTC_H", 0, INT_MAX);
|
|
|
|
if (!prop)
|
|
|
|
return -ENOMEM;
|
|
|
|
dev->mode_config.prop_crtc_h = prop;
|
|
|
|
|
|
|
|
prop = drm_property_create_object(dev, DRM_MODE_PROP_ATOMIC,
|
|
|
|
"FB_ID", DRM_MODE_OBJECT_FB);
|
|
|
|
if (!prop)
|
|
|
|
return -ENOMEM;
|
|
|
|
dev->mode_config.prop_fb_id = prop;
|
|
|
|
|
2016-11-15 21:06:39 +08:00
|
|
|
prop = drm_property_create_signed_range(dev, DRM_MODE_PROP_ATOMIC,
|
|
|
|
"IN_FENCE_FD", -1, INT_MAX);
|
|
|
|
if (!prop)
|
|
|
|
return -ENOMEM;
|
|
|
|
dev->mode_config.prop_in_fence_fd = prop;
|
|
|
|
|
2016-11-16 21:00:21 +08:00
|
|
|
prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
|
|
|
|
"OUT_FENCE_PTR", 0, U64_MAX);
|
|
|
|
if (!prop)
|
|
|
|
return -ENOMEM;
|
|
|
|
dev->mode_config.prop_out_fence_ptr = prop;
|
|
|
|
|
2016-11-14 19:58:23 +08:00
|
|
|
prop = drm_property_create_object(dev, DRM_MODE_PROP_ATOMIC,
|
|
|
|
"CRTC_ID", DRM_MODE_OBJECT_CRTC);
|
|
|
|
if (!prop)
|
|
|
|
return -ENOMEM;
|
|
|
|
dev->mode_config.prop_crtc_id = prop;
|
|
|
|
|
2019-04-16 01:28:05 +08:00
|
|
|
prop = drm_property_create(dev,
|
|
|
|
DRM_MODE_PROP_ATOMIC | DRM_MODE_PROP_BLOB,
|
|
|
|
"FB_DAMAGE_CLIPS", 0);
|
2018-05-24 10:04:08 +08:00
|
|
|
if (!prop)
|
|
|
|
return -ENOMEM;
|
|
|
|
dev->mode_config.prop_fb_damage_clips = prop;
|
|
|
|
|
2016-11-14 19:58:23 +08:00
|
|
|
prop = drm_property_create_bool(dev, DRM_MODE_PROP_ATOMIC,
|
|
|
|
"ACTIVE");
|
|
|
|
if (!prop)
|
|
|
|
return -ENOMEM;
|
|
|
|
dev->mode_config.prop_active = prop;
|
|
|
|
|
|
|
|
prop = drm_property_create(dev,
|
|
|
|
DRM_MODE_PROP_ATOMIC | DRM_MODE_PROP_BLOB,
|
|
|
|
"MODE_ID", 0);
|
|
|
|
if (!prop)
|
|
|
|
return -ENOMEM;
|
|
|
|
dev->mode_config.prop_mode_id = prop;
|
|
|
|
|
2018-10-04 23:46:07 +08:00
|
|
|
prop = drm_property_create_bool(dev, 0,
|
|
|
|
"VRR_ENABLED");
|
|
|
|
if (!prop)
|
|
|
|
return -ENOMEM;
|
|
|
|
dev->mode_config.prop_vrr_enabled = prop;
|
|
|
|
|
2016-11-14 19:58:23 +08:00
|
|
|
prop = drm_property_create(dev,
|
|
|
|
DRM_MODE_PROP_BLOB,
|
|
|
|
"DEGAMMA_LUT", 0);
|
|
|
|
if (!prop)
|
|
|
|
return -ENOMEM;
|
|
|
|
dev->mode_config.degamma_lut_property = prop;
|
|
|
|
|
|
|
|
prop = drm_property_create_range(dev,
|
|
|
|
DRM_MODE_PROP_IMMUTABLE,
|
|
|
|
"DEGAMMA_LUT_SIZE", 0, UINT_MAX);
|
|
|
|
if (!prop)
|
|
|
|
return -ENOMEM;
|
|
|
|
dev->mode_config.degamma_lut_size_property = prop;
|
|
|
|
|
|
|
|
prop = drm_property_create(dev,
|
|
|
|
DRM_MODE_PROP_BLOB,
|
|
|
|
"CTM", 0);
|
|
|
|
if (!prop)
|
|
|
|
return -ENOMEM;
|
|
|
|
dev->mode_config.ctm_property = prop;
|
|
|
|
|
|
|
|
prop = drm_property_create(dev,
|
|
|
|
DRM_MODE_PROP_BLOB,
|
|
|
|
"GAMMA_LUT", 0);
|
|
|
|
if (!prop)
|
|
|
|
return -ENOMEM;
|
|
|
|
dev->mode_config.gamma_lut_property = prop;
|
|
|
|
|
|
|
|
prop = drm_property_create_range(dev,
|
|
|
|
DRM_MODE_PROP_IMMUTABLE,
|
|
|
|
"GAMMA_LUT_SIZE", 0, UINT_MAX);
|
|
|
|
if (!prop)
|
|
|
|
return -ENOMEM;
|
|
|
|
dev->mode_config.gamma_lut_size_property = prop;
|
|
|
|
|
2017-07-24 11:46:39 +08:00
|
|
|
prop = drm_property_create(dev,
|
|
|
|
DRM_MODE_PROP_IMMUTABLE | DRM_MODE_PROP_BLOB,
|
|
|
|
"IN_FORMATS", 0);
|
|
|
|
if (!prop)
|
|
|
|
return -ENOMEM;
|
|
|
|
dev->mode_config.modifiers_property = prop;
|
|
|
|
|
2016-11-14 19:58:23 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-03-23 22:49:25 +08:00
|
|
|
static void drm_mode_config_init_release(struct drm_device *dev, void *ptr)
|
|
|
|
{
|
|
|
|
drm_mode_config_cleanup(dev);
|
|
|
|
}
|
|
|
|
|
2016-11-14 19:58:23 +08:00
|
|
|
/**
|
2020-03-23 22:49:25 +08:00
|
|
|
* drmm_mode_config_init - managed DRM mode_configuration structure
|
|
|
|
* initialization
|
2016-11-14 19:58:23 +08:00
|
|
|
* @dev: DRM device
|
|
|
|
*
|
|
|
|
* Initialize @dev's mode_config structure, used for tracking the graphics
|
|
|
|
* configuration of @dev.
|
|
|
|
*
|
|
|
|
* Since this initializes the modeset locks, no locking is possible. Which is no
|
|
|
|
* problem, since this should happen single threaded at init time. It is the
|
|
|
|
* driver's problem to ensure this guarantee.
|
|
|
|
*
|
2020-03-23 22:49:25 +08:00
|
|
|
* Cleanup is automatically handled through registering drm_mode_config_cleanup
|
|
|
|
* with drmm_add_action().
|
|
|
|
*
|
|
|
|
* Returns: 0 on success, negative error value on failure.
|
2016-11-14 19:58:23 +08:00
|
|
|
*/
|
2020-03-23 22:49:25 +08:00
|
|
|
int drmm_mode_config_init(struct drm_device *dev)
|
2016-11-14 19:58:23 +08:00
|
|
|
{
|
2022-11-18 10:16:51 +08:00
|
|
|
int ret;
|
|
|
|
|
2016-11-14 19:58:23 +08:00
|
|
|
mutex_init(&dev->mode_config.mutex);
|
|
|
|
drm_modeset_lock_init(&dev->mode_config.connection_mutex);
|
|
|
|
mutex_init(&dev->mode_config.idr_mutex);
|
|
|
|
mutex_init(&dev->mode_config.fb_lock);
|
|
|
|
mutex_init(&dev->mode_config.blob_lock);
|
|
|
|
INIT_LIST_HEAD(&dev->mode_config.fb_list);
|
|
|
|
INIT_LIST_HEAD(&dev->mode_config.crtc_list);
|
|
|
|
INIT_LIST_HEAD(&dev->mode_config.connector_list);
|
|
|
|
INIT_LIST_HEAD(&dev->mode_config.encoder_list);
|
|
|
|
INIT_LIST_HEAD(&dev->mode_config.property_list);
|
|
|
|
INIT_LIST_HEAD(&dev->mode_config.property_blob_list);
|
|
|
|
INIT_LIST_HEAD(&dev->mode_config.plane_list);
|
2018-10-22 20:31:22 +08:00
|
|
|
INIT_LIST_HEAD(&dev->mode_config.privobj_list);
|
2022-07-02 02:52:58 +08:00
|
|
|
idr_init_base(&dev->mode_config.object_idr, 1);
|
2022-07-02 02:52:59 +08:00
|
|
|
idr_init_base(&dev->mode_config.tile_idr, 1);
|
2016-11-14 19:58:23 +08:00
|
|
|
ida_init(&dev->mode_config.connector_ida);
|
drm: locking&new iterators for connector_list
The requirements for connector_list locking are a bit tricky:
- We need to be able to jump over zombie conectors (i.e. with refcount
== 0, but not yet removed from the list). If instead we require that
there's no zombies on the list then the final kref_put must happen
under the list protection lock, which means that locking context
leaks all over the place. Not pretty - better to deal with zombies
and wrap the locking just around the list_del in the destructor.
- When we walk the list we must _not_ hold the connector list lock. We
walk the connector list at an absolutely massive amounts of places,
if all those places can't ever call drm_connector_unreference the
code would get unecessarily complicated.
- connector_list needs it own lock, again too many places that walk it
that we could reuse e.g. mode_config.mutex without resulting in
inversions.
- Lots of code uses these loops to look-up a connector, i.e. they want
to be able to call drm_connector_reference. But on the other hand we
want connectors to stay on that list until they're dead (i.e.
connector_list can't hold a full reference), which means despite the
"can't hold lock for the loop body" rule we need to make sure a
connector doesn't suddenly become a zombie.
At first Dave&I discussed various horror-show approaches using srcu,
but turns out it's fairly easy:
- For the loop body we always hold an additional reference to the
current connector. That means it can't zombify, and it also means
it'll stay on the list, which means we can use it as our iterator to
find the next connector.
- When we try to find the next connector we only have to jump over
zombies. To make sure we don't chase bad pointers that entire loop
is protected with the new connect_list_lock spinlock. And because we
know that we're starting out with a non-zombie (need to drop our
reference for the old connector only after we have our new one),
we're guranteed to still be on the connector_list and either find
the next non-zombie or complete the iteration.
- Only downside is that we need to make sure that the temporary
reference for the loop body doesn't leak. iter_get/put() functions +
lockdep make sure that's the case.
- To avoid a flag day the new iterator macro has an _iter postfix. We
can rename it back once all the users of the unsafe version are gone
(there's about 100 list walkers for the connector_list).
For now this patch only converts all the list walking in the core,
leaving helpers and drivers for later patches. The nice thing is that
we can now finally remove 2 FIXME comments from the
register/unregister functions.
v2:
- use irqsafe spinlocks, so that we can use this in drm_state_dump
too.
- nuke drm_modeset_lock_all from drm_connector_init, now entirely
cargo-culted nonsense.
v3:
- do {} while (!kref_get_unless_zero), makes for a tidier loop (Dave).
- pretty kerneldoc
- add EXPORT_SYMBOL, helpers&drivers are supposed to use this.
v4: Change lockdep annotations to only check whether we release the
iter fake lock again (i.e. make sure that iter_put is called), but
not check any locking dependecies itself. That seams to require a
recursive read lock in trylock mode.
Cc: Dave Airlie <airlied@gmail.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Sean Paul <seanpaul@chromium.org>
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20161213230814.19598-6-daniel.vetter@ffwll.ch
2016-12-14 07:08:06 +08:00
|
|
|
spin_lock_init(&dev->mode_config.connector_list_lock);
|
2016-11-14 19:58:23 +08:00
|
|
|
|
2017-12-13 20:49:36 +08:00
|
|
|
init_llist_head(&dev->mode_config.connector_free_list);
|
|
|
|
INIT_WORK(&dev->mode_config.connector_free_work, drm_connector_free_work_fn);
|
|
|
|
|
2022-11-18 10:16:51 +08:00
|
|
|
ret = drm_mode_create_standard_properties(dev);
|
|
|
|
if (ret) {
|
|
|
|
drm_mode_config_cleanup(dev);
|
|
|
|
return ret;
|
|
|
|
}
|
2016-11-14 19:58:23 +08:00
|
|
|
|
|
|
|
/* Just to be sure */
|
|
|
|
dev->mode_config.num_fb = 0;
|
|
|
|
dev->mode_config.num_connector = 0;
|
|
|
|
dev->mode_config.num_crtc = 0;
|
|
|
|
dev->mode_config.num_encoder = 0;
|
|
|
|
dev->mode_config.num_total_plane = 0;
|
2019-11-20 05:08:42 +08:00
|
|
|
|
|
|
|
if (IS_ENABLED(CONFIG_LOCKDEP)) {
|
|
|
|
struct drm_modeset_acquire_ctx modeset_ctx;
|
|
|
|
struct ww_acquire_ctx resv_ctx;
|
|
|
|
struct dma_resv resv;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
dma_resv_init(&resv);
|
|
|
|
|
|
|
|
drm_modeset_acquire_init(&modeset_ctx, 0);
|
|
|
|
ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
|
|
|
|
&modeset_ctx);
|
|
|
|
if (ret == -EDEADLK)
|
|
|
|
ret = drm_modeset_backoff(&modeset_ctx);
|
|
|
|
|
|
|
|
ww_acquire_init(&resv_ctx, &reservation_ww_class);
|
|
|
|
ret = dma_resv_lock(&resv, &resv_ctx);
|
|
|
|
if (ret == -EDEADLK)
|
|
|
|
dma_resv_lock_slow(&resv, &resv_ctx);
|
|
|
|
|
|
|
|
dma_resv_unlock(&resv);
|
|
|
|
ww_acquire_fini(&resv_ctx);
|
|
|
|
|
|
|
|
drm_modeset_drop_locks(&modeset_ctx);
|
|
|
|
drm_modeset_acquire_fini(&modeset_ctx);
|
|
|
|
dma_resv_fini(&resv);
|
|
|
|
}
|
2020-03-23 22:49:25 +08:00
|
|
|
|
|
|
|
return drmm_add_action_or_reset(dev, drm_mode_config_init_release,
|
|
|
|
NULL);
|
2016-11-14 19:58:23 +08:00
|
|
|
}
|
2020-03-23 22:49:25 +08:00
|
|
|
EXPORT_SYMBOL(drmm_mode_config_init);
|
2016-11-14 19:58:23 +08:00
|
|
|
|
|
|
|
/**
|
|
|
|
* drm_mode_config_cleanup - free up DRM mode_config info
|
|
|
|
* @dev: DRM device
|
|
|
|
*
|
|
|
|
* Free up all the connectors and CRTCs associated with this DRM device, then
|
|
|
|
* free up the framebuffers and associated buffer objects.
|
|
|
|
*
|
|
|
|
* Note that since this /should/ happen single-threaded at driver/device
|
|
|
|
* teardown time, no locking is required. It's the driver's job to ensure that
|
|
|
|
* this guarantee actually holds true.
|
2020-03-23 22:49:25 +08:00
|
|
|
*
|
|
|
|
* FIXME: With the managed drmm_mode_config_init() it is no longer necessary for
|
|
|
|
* drivers to explicitly call this function.
|
2016-11-14 19:58:23 +08:00
|
|
|
*/
|
|
|
|
void drm_mode_config_cleanup(struct drm_device *dev)
|
|
|
|
{
|
2016-12-14 07:08:08 +08:00
|
|
|
struct drm_connector *connector;
|
|
|
|
struct drm_connector_list_iter conn_iter;
|
2016-11-14 19:58:23 +08:00
|
|
|
struct drm_crtc *crtc, *ct;
|
|
|
|
struct drm_encoder *encoder, *enct;
|
|
|
|
struct drm_framebuffer *fb, *fbt;
|
|
|
|
struct drm_property *property, *pt;
|
|
|
|
struct drm_property_blob *blob, *bt;
|
|
|
|
struct drm_plane *plane, *plt;
|
|
|
|
|
|
|
|
list_for_each_entry_safe(encoder, enct, &dev->mode_config.encoder_list,
|
|
|
|
head) {
|
|
|
|
encoder->funcs->destroy(encoder);
|
|
|
|
}
|
|
|
|
|
2017-02-28 22:46:43 +08:00
|
|
|
drm_connector_list_iter_begin(dev, &conn_iter);
|
2016-12-14 07:08:08 +08:00
|
|
|
drm_for_each_connector_iter(connector, &conn_iter) {
|
|
|
|
/* drm_connector_list_iter holds an full reference to the
|
|
|
|
* current connector itself, which means it is inherently safe
|
|
|
|
* against unreferencing the current connector - but not against
|
|
|
|
* deleting it right away. */
|
2017-02-28 22:46:39 +08:00
|
|
|
drm_connector_put(connector);
|
2016-11-14 19:58:23 +08:00
|
|
|
}
|
2017-02-28 22:46:43 +08:00
|
|
|
drm_connector_list_iter_end(&conn_iter);
|
2017-12-05 04:48:18 +08:00
|
|
|
/* connector_iter drops references in a work item. */
|
2017-12-13 20:49:36 +08:00
|
|
|
flush_work(&dev->mode_config.connector_free_work);
|
2017-01-19 17:05:13 +08:00
|
|
|
if (WARN_ON(!list_empty(&dev->mode_config.connector_list))) {
|
2017-02-28 22:46:43 +08:00
|
|
|
drm_connector_list_iter_begin(dev, &conn_iter);
|
2017-01-19 17:05:13 +08:00
|
|
|
drm_for_each_connector_iter(connector, &conn_iter)
|
|
|
|
DRM_ERROR("connector %s leaked!\n", connector->name);
|
2017-02-28 22:46:43 +08:00
|
|
|
drm_connector_list_iter_end(&conn_iter);
|
2017-01-19 17:05:13 +08:00
|
|
|
}
|
2016-11-14 19:58:23 +08:00
|
|
|
|
|
|
|
list_for_each_entry_safe(property, pt, &dev->mode_config.property_list,
|
|
|
|
head) {
|
|
|
|
drm_property_destroy(dev, property);
|
|
|
|
}
|
|
|
|
|
|
|
|
list_for_each_entry_safe(plane, plt, &dev->mode_config.plane_list,
|
|
|
|
head) {
|
|
|
|
plane->funcs->destroy(plane);
|
|
|
|
}
|
|
|
|
|
|
|
|
list_for_each_entry_safe(crtc, ct, &dev->mode_config.crtc_list, head) {
|
|
|
|
crtc->funcs->destroy(crtc);
|
|
|
|
}
|
|
|
|
|
|
|
|
list_for_each_entry_safe(blob, bt, &dev->mode_config.property_blob_list,
|
|
|
|
head_global) {
|
2017-02-28 22:46:42 +08:00
|
|
|
drm_property_blob_put(blob);
|
2016-11-14 19:58:23 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Single-threaded teardown context, so it's not required to grab the
|
|
|
|
* fb_lock to protect against concurrent fb_list access. Contrary, it
|
|
|
|
* would actually deadlock with the drm_framebuffer_cleanup function.
|
|
|
|
*
|
|
|
|
* Also, if there are any framebuffers left, that's a driver leak now,
|
|
|
|
* so politely WARN about this.
|
|
|
|
*/
|
|
|
|
WARN_ON(!list_empty(&dev->mode_config.fb_list));
|
|
|
|
list_for_each_entry_safe(fb, fbt, &dev->mode_config.fb_list, head) {
|
2017-12-07 22:49:25 +08:00
|
|
|
struct drm_printer p = drm_debug_printer("[leaked fb]");
|
2020-07-02 21:23:32 +08:00
|
|
|
|
2017-12-07 22:49:25 +08:00
|
|
|
drm_printf(&p, "framebuffer[%u]:\n", fb->base.id);
|
|
|
|
drm_framebuffer_print_info(&p, 1, fb);
|
2016-11-14 19:58:23 +08:00
|
|
|
drm_framebuffer_free(&fb->base.refcount);
|
|
|
|
}
|
|
|
|
|
|
|
|
ida_destroy(&dev->mode_config.connector_ida);
|
|
|
|
idr_destroy(&dev->mode_config.tile_idr);
|
2018-12-14 05:29:57 +08:00
|
|
|
idr_destroy(&dev->mode_config.object_idr);
|
2016-11-14 19:58:23 +08:00
|
|
|
drm_modeset_lock_fini(&dev->mode_config.connection_mutex);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(drm_mode_config_cleanup);
|
2020-02-12 00:22:02 +08:00
|
|
|
|
2020-02-12 00:22:06 +08:00
|
|
|
static u32 full_encoder_mask(struct drm_device *dev)
|
|
|
|
{
|
|
|
|
struct drm_encoder *encoder;
|
|
|
|
u32 encoder_mask = 0;
|
|
|
|
|
|
|
|
drm_for_each_encoder(encoder, dev)
|
|
|
|
encoder_mask |= drm_encoder_mask(encoder);
|
|
|
|
|
|
|
|
return encoder_mask;
|
|
|
|
}
|
|
|
|
|
2020-02-12 00:22:02 +08:00
|
|
|
/*
|
|
|
|
* For some reason we want the encoder itself included in
|
|
|
|
* possible_clones. Make life easy for drivers by allowing them
|
|
|
|
* to leave possible_clones unset if no cloning is possible.
|
|
|
|
*/
|
|
|
|
static void fixup_encoder_possible_clones(struct drm_encoder *encoder)
|
|
|
|
{
|
|
|
|
if (encoder->possible_clones == 0)
|
|
|
|
encoder->possible_clones = drm_encoder_mask(encoder);
|
|
|
|
}
|
|
|
|
|
2020-02-12 00:22:06 +08:00
|
|
|
static void validate_encoder_possible_clones(struct drm_encoder *encoder)
|
|
|
|
{
|
|
|
|
struct drm_device *dev = encoder->dev;
|
|
|
|
u32 encoder_mask = full_encoder_mask(dev);
|
|
|
|
struct drm_encoder *other;
|
|
|
|
|
|
|
|
drm_for_each_encoder(other, dev) {
|
|
|
|
WARN(!!(encoder->possible_clones & drm_encoder_mask(other)) !=
|
|
|
|
!!(other->possible_clones & drm_encoder_mask(encoder)),
|
|
|
|
"possible_clones mismatch: "
|
|
|
|
"[ENCODER:%d:%s] mask=0x%x possible_clones=0x%x vs. "
|
|
|
|
"[ENCODER:%d:%s] mask=0x%x possible_clones=0x%x\n",
|
|
|
|
encoder->base.id, encoder->name,
|
|
|
|
drm_encoder_mask(encoder), encoder->possible_clones,
|
|
|
|
other->base.id, other->name,
|
|
|
|
drm_encoder_mask(other), other->possible_clones);
|
|
|
|
}
|
|
|
|
|
|
|
|
WARN((encoder->possible_clones & drm_encoder_mask(encoder)) == 0 ||
|
|
|
|
(encoder->possible_clones & ~encoder_mask) != 0,
|
|
|
|
"Bogus possible_clones: "
|
|
|
|
"[ENCODER:%d:%s] possible_clones=0x%x (full encoder mask=0x%x)\n",
|
|
|
|
encoder->base.id, encoder->name,
|
|
|
|
encoder->possible_clones, encoder_mask);
|
|
|
|
}
|
|
|
|
|
2020-02-12 00:22:07 +08:00
|
|
|
static u32 full_crtc_mask(struct drm_device *dev)
|
|
|
|
{
|
|
|
|
struct drm_crtc *crtc;
|
|
|
|
u32 crtc_mask = 0;
|
|
|
|
|
|
|
|
drm_for_each_crtc(crtc, dev)
|
|
|
|
crtc_mask |= drm_crtc_mask(crtc);
|
|
|
|
|
|
|
|
return crtc_mask;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void validate_encoder_possible_crtcs(struct drm_encoder *encoder)
|
|
|
|
{
|
|
|
|
u32 crtc_mask = full_crtc_mask(encoder->dev);
|
|
|
|
|
|
|
|
WARN((encoder->possible_crtcs & crtc_mask) == 0 ||
|
|
|
|
(encoder->possible_crtcs & ~crtc_mask) != 0,
|
|
|
|
"Bogus possible_crtcs: "
|
|
|
|
"[ENCODER:%d:%s] possible_crtcs=0x%x (full crtc mask=0x%x)\n",
|
|
|
|
encoder->base.id, encoder->name,
|
|
|
|
encoder->possible_crtcs, crtc_mask);
|
|
|
|
}
|
|
|
|
|
2020-02-12 00:22:02 +08:00
|
|
|
void drm_mode_config_validate(struct drm_device *dev)
|
|
|
|
{
|
|
|
|
struct drm_encoder *encoder;
|
2020-12-12 02:46:32 +08:00
|
|
|
struct drm_crtc *crtc;
|
2020-12-12 02:46:34 +08:00
|
|
|
struct drm_plane *plane;
|
|
|
|
u32 primary_with_crtc = 0, cursor_with_crtc = 0;
|
|
|
|
unsigned int num_primary = 0;
|
2020-02-12 00:22:02 +08:00
|
|
|
|
2020-03-19 02:25:18 +08:00
|
|
|
if (!drm_core_check_feature(dev, DRIVER_MODESET))
|
|
|
|
return;
|
|
|
|
|
2020-02-12 00:22:02 +08:00
|
|
|
drm_for_each_encoder(encoder, dev)
|
|
|
|
fixup_encoder_possible_clones(encoder);
|
2020-02-12 00:22:06 +08:00
|
|
|
|
2020-02-12 00:22:07 +08:00
|
|
|
drm_for_each_encoder(encoder, dev) {
|
2020-02-12 00:22:06 +08:00
|
|
|
validate_encoder_possible_clones(encoder);
|
2020-02-12 00:22:07 +08:00
|
|
|
validate_encoder_possible_crtcs(encoder);
|
|
|
|
}
|
2020-12-12 02:46:32 +08:00
|
|
|
|
|
|
|
drm_for_each_crtc(crtc, dev) {
|
2020-12-12 02:46:33 +08:00
|
|
|
WARN(!crtc->primary, "Missing primary plane on [CRTC:%d:%s]\n",
|
|
|
|
crtc->base.id, crtc->name);
|
|
|
|
|
2020-12-22 21:40:02 +08:00
|
|
|
WARN(crtc->cursor && crtc->funcs->cursor_set,
|
|
|
|
"[CRTC:%d:%s] must not have both a cursor plane and a cursor_set func",
|
|
|
|
crtc->base.id, crtc->name);
|
|
|
|
WARN(crtc->cursor && crtc->funcs->cursor_set2,
|
|
|
|
"[CRTC:%d:%s] must not have both a cursor plane and a cursor_set2 func",
|
|
|
|
crtc->base.id, crtc->name);
|
|
|
|
WARN(crtc->cursor && crtc->funcs->cursor_move,
|
|
|
|
"[CRTC:%d:%s] must not have both a cursor plane and a cursor_move func",
|
|
|
|
crtc->base.id, crtc->name);
|
|
|
|
|
2020-12-12 02:46:32 +08:00
|
|
|
if (crtc->primary) {
|
|
|
|
WARN(!(crtc->primary->possible_crtcs & drm_crtc_mask(crtc)),
|
|
|
|
"Bogus primary plane possible_crtcs: [PLANE:%d:%s] must be compatible with [CRTC:%d:%s]\n",
|
|
|
|
crtc->primary->base.id, crtc->primary->name,
|
|
|
|
crtc->base.id, crtc->name);
|
2020-12-12 02:46:34 +08:00
|
|
|
WARN(primary_with_crtc & drm_plane_mask(crtc->primary),
|
|
|
|
"Primary plane [PLANE:%d:%s] used for multiple CRTCs",
|
|
|
|
crtc->primary->base.id, crtc->primary->name);
|
|
|
|
primary_with_crtc |= drm_plane_mask(crtc->primary);
|
2020-12-12 02:46:32 +08:00
|
|
|
}
|
|
|
|
if (crtc->cursor) {
|
|
|
|
WARN(!(crtc->cursor->possible_crtcs & drm_crtc_mask(crtc)),
|
|
|
|
"Bogus cursor plane possible_crtcs: [PLANE:%d:%s] must be compatible with [CRTC:%d:%s]\n",
|
|
|
|
crtc->cursor->base.id, crtc->cursor->name,
|
|
|
|
crtc->base.id, crtc->name);
|
2020-12-12 02:46:34 +08:00
|
|
|
WARN(cursor_with_crtc & drm_plane_mask(crtc->cursor),
|
|
|
|
"Cursor plane [PLANE:%d:%s] used for multiple CRTCs",
|
|
|
|
crtc->cursor->base.id, crtc->cursor->name);
|
|
|
|
cursor_with_crtc |= drm_plane_mask(crtc->cursor);
|
2020-12-12 02:46:32 +08:00
|
|
|
}
|
|
|
|
}
|
2020-12-12 02:46:34 +08:00
|
|
|
|
|
|
|
drm_for_each_plane(plane, dev) {
|
|
|
|
if (plane->type == DRM_PLANE_TYPE_PRIMARY)
|
|
|
|
num_primary++;
|
|
|
|
}
|
|
|
|
|
|
|
|
WARN(num_primary != dev->mode_config.num_crtc,
|
|
|
|
"Must have as many primary planes as there are CRTCs, but have %u primary planes and %u CRTCs",
|
|
|
|
num_primary, dev->mode_config.num_crtc);
|
2020-02-12 00:22:02 +08:00
|
|
|
}
|