2019-04-29 20:29:36 +08:00
|
|
|
/* SPDX-License-Identifier: MIT */
|
|
|
|
/*
|
|
|
|
* Copyright © 2019 Intel Corporation
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef __INTEL_RUNTIME_PM_H__
|
|
|
|
#define __INTEL_RUNTIME_PM_H__
|
|
|
|
|
|
|
|
#include <linux/types.h>
|
|
|
|
|
2019-05-22 18:35:05 +08:00
|
|
|
#include "intel_display.h"
|
|
|
|
#include "intel_wakeref.h"
|
2019-04-29 20:29:36 +08:00
|
|
|
|
2019-06-14 07:21:52 +08:00
|
|
|
#include "i915_utils.h"
|
|
|
|
|
|
|
|
struct device;
|
2019-05-22 18:35:05 +08:00
|
|
|
struct drm_i915_private;
|
|
|
|
struct drm_printer;
|
2019-04-29 20:29:36 +08:00
|
|
|
|
|
|
|
enum i915_drm_suspend_mode {
|
|
|
|
I915_DRM_SUSPEND_IDLE,
|
|
|
|
I915_DRM_SUSPEND_MEM,
|
|
|
|
I915_DRM_SUSPEND_HIBERNATE,
|
|
|
|
};
|
|
|
|
|
2019-06-14 07:21:52 +08:00
|
|
|
/*
|
|
|
|
* This struct helps tracking the state needed for runtime PM, which puts the
|
|
|
|
* device in PCI D3 state. Notice that when this happens, nothing on the
|
|
|
|
* graphics device works, even register access, so we don't get interrupts nor
|
|
|
|
* anything else.
|
|
|
|
*
|
|
|
|
* Every piece of our code that needs to actually touch the hardware needs to
|
|
|
|
* either call intel_runtime_pm_get or call intel_display_power_get with the
|
|
|
|
* appropriate power domain.
|
|
|
|
*
|
|
|
|
* Our driver uses the autosuspend delay feature, which means we'll only really
|
|
|
|
* suspend if we stay with zero refcount for a certain amount of time. The
|
|
|
|
* default value is currently very conservative (see intel_runtime_pm_enable), but
|
|
|
|
* it can be changed with the standard runtime PM files from sysfs.
|
|
|
|
*
|
|
|
|
* The irqs_disabled variable becomes true exactly after we disable the IRQs and
|
|
|
|
* goes back to false exactly before we reenable the IRQs. We use this variable
|
|
|
|
* to check if someone is trying to enable/disable IRQs while they're supposed
|
|
|
|
* to be disabled. This shouldn't happen and we'll print some error messages in
|
|
|
|
* case it happens.
|
|
|
|
*
|
|
|
|
* For more, read the Documentation/power/runtime_pm.txt.
|
|
|
|
*/
|
|
|
|
struct intel_runtime_pm {
|
|
|
|
atomic_t wakeref_count;
|
|
|
|
struct device *kdev; /* points to i915->drm.pdev->dev */
|
|
|
|
bool available;
|
|
|
|
bool suspended;
|
|
|
|
bool irqs_enabled;
|
|
|
|
|
|
|
|
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
|
|
|
|
/*
|
|
|
|
* To aide detection of wakeref leaks and general misuse, we
|
|
|
|
* track all wakeref holders. With manual markup (i.e. returning
|
|
|
|
* a cookie to each rpm_get caller which they then supply to their
|
|
|
|
* paired rpm_put) we can remove corresponding pairs of and keep
|
|
|
|
* the array trimmed to active wakerefs.
|
|
|
|
*/
|
|
|
|
struct intel_runtime_pm_debug {
|
|
|
|
spinlock_t lock;
|
|
|
|
|
|
|
|
depot_stack_handle_t last_acquire;
|
|
|
|
depot_stack_handle_t last_release;
|
|
|
|
|
|
|
|
depot_stack_handle_t *owners;
|
|
|
|
unsigned long count;
|
|
|
|
} debug;
|
|
|
|
#endif
|
|
|
|
};
|
|
|
|
|
|
|
|
#define BITS_PER_WAKEREF \
|
|
|
|
BITS_PER_TYPE(struct_member(struct intel_runtime_pm, wakeref_count))
|
|
|
|
#define INTEL_RPM_WAKELOCK_SHIFT (BITS_PER_WAKEREF / 2)
|
|
|
|
#define INTEL_RPM_WAKELOCK_BIAS (1 << INTEL_RPM_WAKELOCK_SHIFT)
|
|
|
|
#define INTEL_RPM_RAW_WAKEREF_MASK (INTEL_RPM_WAKELOCK_BIAS - 1)
|
|
|
|
|
|
|
|
static inline int
|
|
|
|
intel_rpm_raw_wakeref_count(int wakeref_count)
|
|
|
|
{
|
|
|
|
return wakeref_count & INTEL_RPM_RAW_WAKEREF_MASK;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int
|
|
|
|
intel_rpm_wakelock_count(int wakeref_count)
|
|
|
|
{
|
|
|
|
return wakeref_count >> INTEL_RPM_WAKELOCK_SHIFT;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
assert_rpm_device_not_suspended(struct intel_runtime_pm *rpm)
|
|
|
|
{
|
|
|
|
WARN_ONCE(rpm->suspended,
|
|
|
|
"Device suspended during HW access\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
__assert_rpm_raw_wakeref_held(struct intel_runtime_pm *rpm, int wakeref_count)
|
|
|
|
{
|
|
|
|
assert_rpm_device_not_suspended(rpm);
|
|
|
|
WARN_ONCE(!intel_rpm_raw_wakeref_count(wakeref_count),
|
|
|
|
"RPM raw-wakeref not held\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
__assert_rpm_wakelock_held(struct intel_runtime_pm *rpm, int wakeref_count)
|
|
|
|
{
|
|
|
|
__assert_rpm_raw_wakeref_held(rpm, wakeref_count);
|
|
|
|
WARN_ONCE(!intel_rpm_wakelock_count(wakeref_count),
|
|
|
|
"RPM wakelock ref not held during HW access\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
assert_rpm_raw_wakeref_held(struct intel_runtime_pm *rpm)
|
|
|
|
{
|
|
|
|
__assert_rpm_raw_wakeref_held(rpm, atomic_read(&rpm->wakeref_count));
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
assert_rpm_wakelock_held(struct intel_runtime_pm *rpm)
|
|
|
|
{
|
|
|
|
__assert_rpm_wakelock_held(rpm, atomic_read(&rpm->wakeref_count));
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* disable_rpm_wakeref_asserts - disable the RPM assert checks
|
|
|
|
* @rpm: the intel_runtime_pm structure
|
|
|
|
*
|
|
|
|
* This function disable asserts that check if we hold an RPM wakelock
|
|
|
|
* reference, while keeping the device-not-suspended checks still enabled.
|
|
|
|
* It's meant to be used only in special circumstances where our rule about
|
|
|
|
* the wakelock refcount wrt. the device power state doesn't hold. According
|
|
|
|
* to this rule at any point where we access the HW or want to keep the HW in
|
|
|
|
* an active state we must hold an RPM wakelock reference acquired via one of
|
|
|
|
* the intel_runtime_pm_get() helpers. Currently there are a few special spots
|
|
|
|
* where this rule doesn't hold: the IRQ and suspend/resume handlers, the
|
|
|
|
* forcewake release timer, and the GPU RPS and hangcheck works. All other
|
|
|
|
* users should avoid using this function.
|
|
|
|
*
|
|
|
|
* Any calls to this function must have a symmetric call to
|
|
|
|
* enable_rpm_wakeref_asserts().
|
|
|
|
*/
|
|
|
|
static inline void
|
|
|
|
disable_rpm_wakeref_asserts(struct intel_runtime_pm *rpm)
|
|
|
|
{
|
|
|
|
atomic_add(INTEL_RPM_WAKELOCK_BIAS + 1,
|
|
|
|
&rpm->wakeref_count);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* enable_rpm_wakeref_asserts - re-enable the RPM assert checks
|
|
|
|
* @rpm: the intel_runtime_pm structure
|
|
|
|
*
|
|
|
|
* This function re-enables the RPM assert checks after disabling them with
|
|
|
|
* disable_rpm_wakeref_asserts. It's meant to be used only in special
|
|
|
|
* circumstances otherwise its use should be avoided.
|
|
|
|
*
|
|
|
|
* Any calls to this function must have a symmetric call to
|
|
|
|
* disable_rpm_wakeref_asserts().
|
|
|
|
*/
|
|
|
|
static inline void
|
|
|
|
enable_rpm_wakeref_asserts(struct intel_runtime_pm *rpm)
|
|
|
|
{
|
|
|
|
atomic_sub(INTEL_RPM_WAKELOCK_BIAS + 1,
|
|
|
|
&rpm->wakeref_count);
|
|
|
|
}
|
|
|
|
|
2019-06-14 07:21:53 +08:00
|
|
|
void intel_runtime_pm_init_early(struct intel_runtime_pm *rpm);
|
|
|
|
void intel_runtime_pm_enable(struct intel_runtime_pm *rpm);
|
|
|
|
void intel_runtime_pm_disable(struct intel_runtime_pm *rpm);
|
|
|
|
void intel_runtime_pm_cleanup(struct intel_runtime_pm *rpm);
|
2019-04-29 20:29:36 +08:00
|
|
|
|
2019-06-14 07:21:54 +08:00
|
|
|
intel_wakeref_t intel_runtime_pm_get(struct intel_runtime_pm *rpm);
|
|
|
|
intel_wakeref_t intel_runtime_pm_get_if_in_use(struct intel_runtime_pm *rpm);
|
|
|
|
intel_wakeref_t intel_runtime_pm_get_noresume(struct intel_runtime_pm *rpm);
|
|
|
|
intel_wakeref_t intel_runtime_pm_get_raw(struct intel_runtime_pm *rpm);
|
2019-04-29 20:29:36 +08:00
|
|
|
|
|
|
|
#define with_intel_runtime_pm(i915, wf) \
|
2019-06-14 07:21:54 +08:00
|
|
|
for ((wf) = intel_runtime_pm_get(&(i915)->runtime_pm); (wf); \
|
|
|
|
intel_runtime_pm_put(&(i915)->runtime_pm, (wf)), (wf) = 0)
|
2019-04-29 20:29:36 +08:00
|
|
|
|
|
|
|
#define with_intel_runtime_pm_if_in_use(i915, wf) \
|
2019-06-14 07:21:54 +08:00
|
|
|
for ((wf) = intel_runtime_pm_get_if_in_use(&(i915)->runtime_pm); (wf); \
|
|
|
|
intel_runtime_pm_put(&(i915)->runtime_pm, (wf)), (wf) = 0)
|
2019-04-29 20:29:36 +08:00
|
|
|
|
2019-06-14 07:21:54 +08:00
|
|
|
void intel_runtime_pm_put_unchecked(struct intel_runtime_pm *rpm);
|
2019-04-29 20:29:36 +08:00
|
|
|
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
|
2019-06-14 07:21:54 +08:00
|
|
|
void intel_runtime_pm_put(struct intel_runtime_pm *rpm, intel_wakeref_t wref);
|
2019-04-29 20:29:36 +08:00
|
|
|
#else
|
drm/i915: Add support for asynchronous display power disabling
By disabling a power domain asynchronously we can restrict holding a
reference on that power domain to the actual code sequence that
requires the power to be on for the HW access it's doing, by also
avoiding unneeded on-off-on togglings of the power domain (since the
disabling happens with a delay).
One benefit is potential power saving due to the following two reasons:
1. The fact that we will now be holding the reference only for the
necessary duration by the end of the patchset. While simply not
delaying the disabling has the same benefit, it has the problem that
frequent on-off-on power switching has its own power cost (see the 2.
point below) and the debug trace for power well on/off events will
cause a lot of dmesg spam (see details about this further below).
2. Avoiding the power cost of freuqent on-off-on power switching. This
requires us to find the optimal disabling delay based on the measured
power cost of on->off and off->on switching of each power well vs.
the power of keeping the given power well on.
In this patchset I'm not providing this optimal delay for two
reasons:
a) I don't have the means yet to perform the measurement (with high
enough signal-to-noise ratio, or with the help of an energy
counter that takes switching into account). I'm currently looking
for a way to measure this.
b) Before reducing the disabling delay we need an alternative way for
debug tracing powerwell on/off events. Simply avoiding/throttling
the debug messages is not a solution, see further below.
Note that even in the case where we can't measure any considerable
power cost of frequent on-off switching of powerwells, it still would
make sense to do the disabling asynchronously (with 0 delay) to avoid
blocking on the disabling. On VLV I measured this disabling time
overhead to be 1ms on average with a worst case of 4ms.
In the case of the AUX power domains on ICL we would also need to keep
the sequence where we hold the power reference short, the way it would
be by the end of this patchset where we hold it only for the actual AUX
transfer. Anything else would make the locking we need for ICL TypeC
ports (whenever we hold a reference on any AUX power domain) rather
problematic, adding for instance unnecessary lockdep dependencies to
the required TypeC port lock.
I chose the disabling delay to be 100msec for now to avoid the unneeded
toggling (and so not to introduce dmesg spamming) in the DP MST sideband
signaling code. We could optimize this delay later, once we have the
means to measure the switching power cost (see above).
Note that simply removing/throttling the debug tracing for power well
on/off events is not a solution. We need to know the exact spots of
these events and cannot rely only on incorrect register accesses caught
(due to not holding a wakeref at the time of access). Incorrect
powerwell enabling/disabling could lead to other problems, for instance
we need to keep certain powerwells enabled for the duration of modesets
and AUX transfers.
v2:
- Clarify the commit log parts about power cost measurement and the
problem of simply removing/throttling debug tracing. (Chris)
- Optimize out local wakeref vars at intel_runtime_pm_put_raw() and
intel_display_power_put_async() call sites if
CONFIG_DRM_I915_DEBUG_RUNTIME_PM=n. (Chris)
- Rebased on v2 of the wakeref w/o power-on guarantee patch.
- Add missing docbook headers.
v3:
- Checkpatch spelling/missing-empty-line fix.
v4:
- Fix unintended local wakeref var optimization when using
call-arguments with side-effects, by using inline funcs instead of
macros. In this patch in particular this will fix the
intel_display_power_grab_async_put_ref()->intel_runtime_pm_put_raw()
call).
No size change in practice (would be the same disregarding the
corresponding change in intel_display_power_grab_async_put_ref()):
$ size i915-macro.ko
text data bss dec hex filename
2455190 105890 10272 2571352 273c58 i915-macro.ko
$ size i915-inline.ko
text data bss dec hex filename
2455195 105890 10272 2571357 273c5d i915-inline.ko
Kudos to Stan for reporting the raw-wakeref WARNs this issue caused. His
config has CONFIG_DRM_I915_DEBUG_RUNTIME_PM=n, which I didn't retest
after v1, and we are also not testing this config in CI.
Now tested both with CONFIG_DRM_I915_DEBUG_RUNTIME_PM=y/n on ICL,
connecting both Chamelium and regular DP, HDMI sinks.
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Ville Syrjala <ville.syrjala@linux.intel.com>
Cc: Stanislav Lisovskiy <stanislav.lisovskiy@intel.com>
Signed-off-by: Imre Deak <imre.deak@intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20190513192533.12586-1-imre.deak@intel.com
2019-05-14 03:25:33 +08:00
|
|
|
static inline void
|
2019-06-14 07:21:54 +08:00
|
|
|
intel_runtime_pm_put(struct intel_runtime_pm *rpm, intel_wakeref_t wref)
|
drm/i915: Add support for asynchronous display power disabling
By disabling a power domain asynchronously we can restrict holding a
reference on that power domain to the actual code sequence that
requires the power to be on for the HW access it's doing, by also
avoiding unneeded on-off-on togglings of the power domain (since the
disabling happens with a delay).
One benefit is potential power saving due to the following two reasons:
1. The fact that we will now be holding the reference only for the
necessary duration by the end of the patchset. While simply not
delaying the disabling has the same benefit, it has the problem that
frequent on-off-on power switching has its own power cost (see the 2.
point below) and the debug trace for power well on/off events will
cause a lot of dmesg spam (see details about this further below).
2. Avoiding the power cost of freuqent on-off-on power switching. This
requires us to find the optimal disabling delay based on the measured
power cost of on->off and off->on switching of each power well vs.
the power of keeping the given power well on.
In this patchset I'm not providing this optimal delay for two
reasons:
a) I don't have the means yet to perform the measurement (with high
enough signal-to-noise ratio, or with the help of an energy
counter that takes switching into account). I'm currently looking
for a way to measure this.
b) Before reducing the disabling delay we need an alternative way for
debug tracing powerwell on/off events. Simply avoiding/throttling
the debug messages is not a solution, see further below.
Note that even in the case where we can't measure any considerable
power cost of frequent on-off switching of powerwells, it still would
make sense to do the disabling asynchronously (with 0 delay) to avoid
blocking on the disabling. On VLV I measured this disabling time
overhead to be 1ms on average with a worst case of 4ms.
In the case of the AUX power domains on ICL we would also need to keep
the sequence where we hold the power reference short, the way it would
be by the end of this patchset where we hold it only for the actual AUX
transfer. Anything else would make the locking we need for ICL TypeC
ports (whenever we hold a reference on any AUX power domain) rather
problematic, adding for instance unnecessary lockdep dependencies to
the required TypeC port lock.
I chose the disabling delay to be 100msec for now to avoid the unneeded
toggling (and so not to introduce dmesg spamming) in the DP MST sideband
signaling code. We could optimize this delay later, once we have the
means to measure the switching power cost (see above).
Note that simply removing/throttling the debug tracing for power well
on/off events is not a solution. We need to know the exact spots of
these events and cannot rely only on incorrect register accesses caught
(due to not holding a wakeref at the time of access). Incorrect
powerwell enabling/disabling could lead to other problems, for instance
we need to keep certain powerwells enabled for the duration of modesets
and AUX transfers.
v2:
- Clarify the commit log parts about power cost measurement and the
problem of simply removing/throttling debug tracing. (Chris)
- Optimize out local wakeref vars at intel_runtime_pm_put_raw() and
intel_display_power_put_async() call sites if
CONFIG_DRM_I915_DEBUG_RUNTIME_PM=n. (Chris)
- Rebased on v2 of the wakeref w/o power-on guarantee patch.
- Add missing docbook headers.
v3:
- Checkpatch spelling/missing-empty-line fix.
v4:
- Fix unintended local wakeref var optimization when using
call-arguments with side-effects, by using inline funcs instead of
macros. In this patch in particular this will fix the
intel_display_power_grab_async_put_ref()->intel_runtime_pm_put_raw()
call).
No size change in practice (would be the same disregarding the
corresponding change in intel_display_power_grab_async_put_ref()):
$ size i915-macro.ko
text data bss dec hex filename
2455190 105890 10272 2571352 273c58 i915-macro.ko
$ size i915-inline.ko
text data bss dec hex filename
2455195 105890 10272 2571357 273c5d i915-inline.ko
Kudos to Stan for reporting the raw-wakeref WARNs this issue caused. His
config has CONFIG_DRM_I915_DEBUG_RUNTIME_PM=n, which I didn't retest
after v1, and we are also not testing this config in CI.
Now tested both with CONFIG_DRM_I915_DEBUG_RUNTIME_PM=y/n on ICL,
connecting both Chamelium and regular DP, HDMI sinks.
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Ville Syrjala <ville.syrjala@linux.intel.com>
Cc: Stanislav Lisovskiy <stanislav.lisovskiy@intel.com>
Signed-off-by: Imre Deak <imre.deak@intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20190513192533.12586-1-imre.deak@intel.com
2019-05-14 03:25:33 +08:00
|
|
|
{
|
2019-06-14 07:21:54 +08:00
|
|
|
intel_runtime_pm_put_unchecked(rpm);
|
drm/i915: Add support for asynchronous display power disabling
By disabling a power domain asynchronously we can restrict holding a
reference on that power domain to the actual code sequence that
requires the power to be on for the HW access it's doing, by also
avoiding unneeded on-off-on togglings of the power domain (since the
disabling happens with a delay).
One benefit is potential power saving due to the following two reasons:
1. The fact that we will now be holding the reference only for the
necessary duration by the end of the patchset. While simply not
delaying the disabling has the same benefit, it has the problem that
frequent on-off-on power switching has its own power cost (see the 2.
point below) and the debug trace for power well on/off events will
cause a lot of dmesg spam (see details about this further below).
2. Avoiding the power cost of freuqent on-off-on power switching. This
requires us to find the optimal disabling delay based on the measured
power cost of on->off and off->on switching of each power well vs.
the power of keeping the given power well on.
In this patchset I'm not providing this optimal delay for two
reasons:
a) I don't have the means yet to perform the measurement (with high
enough signal-to-noise ratio, or with the help of an energy
counter that takes switching into account). I'm currently looking
for a way to measure this.
b) Before reducing the disabling delay we need an alternative way for
debug tracing powerwell on/off events. Simply avoiding/throttling
the debug messages is not a solution, see further below.
Note that even in the case where we can't measure any considerable
power cost of frequent on-off switching of powerwells, it still would
make sense to do the disabling asynchronously (with 0 delay) to avoid
blocking on the disabling. On VLV I measured this disabling time
overhead to be 1ms on average with a worst case of 4ms.
In the case of the AUX power domains on ICL we would also need to keep
the sequence where we hold the power reference short, the way it would
be by the end of this patchset where we hold it only for the actual AUX
transfer. Anything else would make the locking we need for ICL TypeC
ports (whenever we hold a reference on any AUX power domain) rather
problematic, adding for instance unnecessary lockdep dependencies to
the required TypeC port lock.
I chose the disabling delay to be 100msec for now to avoid the unneeded
toggling (and so not to introduce dmesg spamming) in the DP MST sideband
signaling code. We could optimize this delay later, once we have the
means to measure the switching power cost (see above).
Note that simply removing/throttling the debug tracing for power well
on/off events is not a solution. We need to know the exact spots of
these events and cannot rely only on incorrect register accesses caught
(due to not holding a wakeref at the time of access). Incorrect
powerwell enabling/disabling could lead to other problems, for instance
we need to keep certain powerwells enabled for the duration of modesets
and AUX transfers.
v2:
- Clarify the commit log parts about power cost measurement and the
problem of simply removing/throttling debug tracing. (Chris)
- Optimize out local wakeref vars at intel_runtime_pm_put_raw() and
intel_display_power_put_async() call sites if
CONFIG_DRM_I915_DEBUG_RUNTIME_PM=n. (Chris)
- Rebased on v2 of the wakeref w/o power-on guarantee patch.
- Add missing docbook headers.
v3:
- Checkpatch spelling/missing-empty-line fix.
v4:
- Fix unintended local wakeref var optimization when using
call-arguments with side-effects, by using inline funcs instead of
macros. In this patch in particular this will fix the
intel_display_power_grab_async_put_ref()->intel_runtime_pm_put_raw()
call).
No size change in practice (would be the same disregarding the
corresponding change in intel_display_power_grab_async_put_ref()):
$ size i915-macro.ko
text data bss dec hex filename
2455190 105890 10272 2571352 273c58 i915-macro.ko
$ size i915-inline.ko
text data bss dec hex filename
2455195 105890 10272 2571357 273c5d i915-inline.ko
Kudos to Stan for reporting the raw-wakeref WARNs this issue caused. His
config has CONFIG_DRM_I915_DEBUG_RUNTIME_PM=n, which I didn't retest
after v1, and we are also not testing this config in CI.
Now tested both with CONFIG_DRM_I915_DEBUG_RUNTIME_PM=y/n on ICL,
connecting both Chamelium and regular DP, HDMI sinks.
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Ville Syrjala <ville.syrjala@linux.intel.com>
Cc: Stanislav Lisovskiy <stanislav.lisovskiy@intel.com>
Signed-off-by: Imre Deak <imre.deak@intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20190513192533.12586-1-imre.deak@intel.com
2019-05-14 03:25:33 +08:00
|
|
|
}
|
2019-04-29 20:29:36 +08:00
|
|
|
#endif
|
2019-06-14 07:21:54 +08:00
|
|
|
void intel_runtime_pm_put_raw(struct intel_runtime_pm *rpm, intel_wakeref_t wref);
|
2019-04-29 20:29:36 +08:00
|
|
|
|
|
|
|
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
|
2019-06-14 07:21:53 +08:00
|
|
|
void print_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm,
|
2019-04-29 20:29:36 +08:00
|
|
|
struct drm_printer *p);
|
|
|
|
#else
|
2019-06-14 07:21:53 +08:00
|
|
|
static inline void print_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm,
|
2019-04-29 20:29:36 +08:00
|
|
|
struct drm_printer *p)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#endif /* __INTEL_RUNTIME_PM_H__ */
|