drm/i915: Fix the GT fence revocation runtime PM logic
To optimize some task deferring it until runtime resume unless someone
holds a runtime PM reference (because in this case the task can be done
w/o the overhead of runtime resume), we have to use the runtime PM
get-if-active logic: If the runtime PM usage count is 0 (and so
get-if-in-use would return false) the runtime suspend handler is not
necessarily called yet (it could be just pending), so the device is not
necessarily powered down, and so the runtime resume handler is not
guaranteed to be called.
The fence revocation depends on the above deferral, so add a
get-if-active helper and use it during fence revocation.
v2:
- Add code comment explaining the fence reg programming deferral logic
to i915_vma_revoke_fence(). (Chris)
- Add Cc: stable and Fixes: tags. (Chris)
- Fix the function docbook comment.
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: <stable@vger.kernel.org> # v4.12+
Fixes: 181df2d458
("drm/i915: Take rpm wakelock for releasing the fence on unbind")
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Imre Deak <imre.deak@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210322204223.919936-1-imre.deak@intel.com
This commit is contained in:
parent
2b5a4562ed
commit
9d58aa4629
|
@ -316,7 +316,18 @@ void i915_vma_revoke_fence(struct i915_vma *vma)
|
||||||
WRITE_ONCE(fence->vma, NULL);
|
WRITE_ONCE(fence->vma, NULL);
|
||||||
vma->fence = NULL;
|
vma->fence = NULL;
|
||||||
|
|
||||||
with_intel_runtime_pm_if_in_use(fence_to_uncore(fence)->rpm, wakeref)
|
/*
|
||||||
|
* Skip the write to HW if and only if the device is currently
|
||||||
|
* suspended.
|
||||||
|
*
|
||||||
|
* If the driver does not currently hold a wakeref (if_in_use == 0),
|
||||||
|
* the device may currently be runtime suspended, or it may be woken
|
||||||
|
* up before the suspend takes place. If the device is not suspended
|
||||||
|
* (powered down) and we skip clearing the fence register, the HW is
|
||||||
|
* left in an undefined state where we may end up with multiple
|
||||||
|
* registers overlapping.
|
||||||
|
*/
|
||||||
|
with_intel_runtime_pm_if_active(fence_to_uncore(fence)->rpm, wakeref)
|
||||||
fence_write(fence);
|
fence_write(fence);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -412,12 +412,20 @@ intel_wakeref_t intel_runtime_pm_get(struct intel_runtime_pm *rpm)
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* intel_runtime_pm_get_if_in_use - grab a runtime pm reference if device in use
|
* __intel_runtime_pm_get_if_active - grab a runtime pm reference if device is active
|
||||||
* @rpm: the intel_runtime_pm structure
|
* @rpm: the intel_runtime_pm structure
|
||||||
|
* @ignore_usecount: get a ref even if dev->power.usage_count is 0
|
||||||
*
|
*
|
||||||
* This function grabs a device-level runtime pm reference if the device is
|
* This function grabs a device-level runtime pm reference if the device is
|
||||||
* already in use and ensures that it is powered up. It is illegal to try
|
* already active and ensures that it is powered up. It is illegal to try
|
||||||
* and access the HW should intel_runtime_pm_get_if_in_use() report failure.
|
* and access the HW should intel_runtime_pm_get_if_active() report failure.
|
||||||
|
*
|
||||||
|
* If @ignore_usecount=true, a reference will be acquired even if there is no
|
||||||
|
* user requiring the device to be powered up (dev->power.usage_count == 0).
|
||||||
|
* If the function returns false in this case then it's guaranteed that the
|
||||||
|
* device's runtime suspend hook has been called already or that it will be
|
||||||
|
* called (and hence it's also guaranteed that the device's runtime resume
|
||||||
|
* hook will be called eventually).
|
||||||
*
|
*
|
||||||
* Any runtime pm reference obtained by this function must have a symmetric
|
* Any runtime pm reference obtained by this function must have a symmetric
|
||||||
* call to intel_runtime_pm_put() to release the reference again.
|
* call to intel_runtime_pm_put() to release the reference again.
|
||||||
|
@ -425,7 +433,8 @@ intel_wakeref_t intel_runtime_pm_get(struct intel_runtime_pm *rpm)
|
||||||
* Returns: the wakeref cookie to pass to intel_runtime_pm_put(), evaluates
|
* Returns: the wakeref cookie to pass to intel_runtime_pm_put(), evaluates
|
||||||
* as True if the wakeref was acquired, or False otherwise.
|
* as True if the wakeref was acquired, or False otherwise.
|
||||||
*/
|
*/
|
||||||
intel_wakeref_t intel_runtime_pm_get_if_in_use(struct intel_runtime_pm *rpm)
|
static intel_wakeref_t __intel_runtime_pm_get_if_active(struct intel_runtime_pm *rpm,
|
||||||
|
bool ignore_usecount)
|
||||||
{
|
{
|
||||||
if (IS_ENABLED(CONFIG_PM)) {
|
if (IS_ENABLED(CONFIG_PM)) {
|
||||||
/*
|
/*
|
||||||
|
@ -434,7 +443,7 @@ intel_wakeref_t intel_runtime_pm_get_if_in_use(struct intel_runtime_pm *rpm)
|
||||||
* function, since the power state is undefined. This applies
|
* function, since the power state is undefined. This applies
|
||||||
* atm to the late/early system suspend/resume handlers.
|
* atm to the late/early system suspend/resume handlers.
|
||||||
*/
|
*/
|
||||||
if (pm_runtime_get_if_in_use(rpm->kdev) <= 0)
|
if (pm_runtime_get_if_active(rpm->kdev, ignore_usecount) <= 0)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -443,6 +452,16 @@ intel_wakeref_t intel_runtime_pm_get_if_in_use(struct intel_runtime_pm *rpm)
|
||||||
return track_intel_runtime_pm_wakeref(rpm);
|
return track_intel_runtime_pm_wakeref(rpm);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
intel_wakeref_t intel_runtime_pm_get_if_in_use(struct intel_runtime_pm *rpm)
|
||||||
|
{
|
||||||
|
return __intel_runtime_pm_get_if_active(rpm, false);
|
||||||
|
}
|
||||||
|
|
||||||
|
intel_wakeref_t intel_runtime_pm_get_if_active(struct intel_runtime_pm *rpm)
|
||||||
|
{
|
||||||
|
return __intel_runtime_pm_get_if_active(rpm, true);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* intel_runtime_pm_get_noresume - grab a runtime pm reference
|
* intel_runtime_pm_get_noresume - grab a runtime pm reference
|
||||||
* @rpm: the intel_runtime_pm structure
|
* @rpm: the intel_runtime_pm structure
|
||||||
|
|
|
@ -177,6 +177,7 @@ void intel_runtime_pm_driver_release(struct intel_runtime_pm *rpm);
|
||||||
|
|
||||||
intel_wakeref_t intel_runtime_pm_get(struct intel_runtime_pm *rpm);
|
intel_wakeref_t intel_runtime_pm_get(struct intel_runtime_pm *rpm);
|
||||||
intel_wakeref_t intel_runtime_pm_get_if_in_use(struct intel_runtime_pm *rpm);
|
intel_wakeref_t intel_runtime_pm_get_if_in_use(struct intel_runtime_pm *rpm);
|
||||||
|
intel_wakeref_t intel_runtime_pm_get_if_active(struct intel_runtime_pm *rpm);
|
||||||
intel_wakeref_t intel_runtime_pm_get_noresume(struct intel_runtime_pm *rpm);
|
intel_wakeref_t intel_runtime_pm_get_noresume(struct intel_runtime_pm *rpm);
|
||||||
intel_wakeref_t intel_runtime_pm_get_raw(struct intel_runtime_pm *rpm);
|
intel_wakeref_t intel_runtime_pm_get_raw(struct intel_runtime_pm *rpm);
|
||||||
|
|
||||||
|
@ -188,6 +189,10 @@ intel_wakeref_t intel_runtime_pm_get_raw(struct intel_runtime_pm *rpm);
|
||||||
for ((wf) = intel_runtime_pm_get_if_in_use(rpm); (wf); \
|
for ((wf) = intel_runtime_pm_get_if_in_use(rpm); (wf); \
|
||||||
intel_runtime_pm_put((rpm), (wf)), (wf) = 0)
|
intel_runtime_pm_put((rpm), (wf)), (wf) = 0)
|
||||||
|
|
||||||
|
#define with_intel_runtime_pm_if_active(rpm, wf) \
|
||||||
|
for ((wf) = intel_runtime_pm_get_if_active(rpm); (wf); \
|
||||||
|
intel_runtime_pm_put((rpm), (wf)), (wf) = 0)
|
||||||
|
|
||||||
void intel_runtime_pm_put_unchecked(struct intel_runtime_pm *rpm);
|
void intel_runtime_pm_put_unchecked(struct intel_runtime_pm *rpm);
|
||||||
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
|
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
|
||||||
void intel_runtime_pm_put(struct intel_runtime_pm *rpm, intel_wakeref_t wref);
|
void intel_runtime_pm_put(struct intel_runtime_pm *rpm, intel_wakeref_t wref);
|
||||||
|
|
Loading…
Reference in New Issue