2014-07-25 00:04:10 +08:00
|
|
|
/*
|
|
|
|
* Copyright © 2014 Intel Corporation
|
|
|
|
*
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
|
|
* copy of this software and associated documentation files (the "Software"),
|
|
|
|
* to deal in the Software without restriction, including without limitation
|
|
|
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
|
|
* and/or sell copies of the Software, and to permit persons to whom the
|
|
|
|
* Software is furnished to do so, subject to the following conditions:
|
|
|
|
*
|
|
|
|
* The above copyright notice and this permission notice (including the next
|
|
|
|
* paragraph) shall be included in all copies or substantial portions of the
|
|
|
|
* Software.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
|
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
|
|
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
|
|
|
* DEALINGS IN THE SOFTWARE.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef _INTEL_LRC_H_
|
|
|
|
#define _INTEL_LRC_H_
|
|
|
|
|
2014-07-25 00:04:22 +08:00
|
|
|
/* Logical Rings */
|
|
|
|
void intel_logical_ring_stop(struct intel_engine_cs *ring);
|
|
|
|
void intel_logical_ring_cleanup(struct intel_engine_cs *ring);
|
|
|
|
int intel_logical_rings_init(struct drm_device *dev);
|
|
|
|
|
2014-07-25 00:04:29 +08:00
|
|
|
int logical_ring_flush_all_caches(struct intel_ringbuffer *ringbuf);
|
2014-07-25 00:04:26 +08:00
|
|
|
void intel_logical_ring_advance_and_submit(struct intel_ringbuffer *ringbuf);
|
|
|
|
static inline void intel_logical_ring_advance(struct intel_ringbuffer *ringbuf)
|
|
|
|
{
|
|
|
|
ringbuf->tail &= ringbuf->size - 1;
|
|
|
|
}
|
|
|
|
static inline void intel_logical_ring_emit(struct intel_ringbuffer *ringbuf,
|
|
|
|
u32 data)
|
|
|
|
{
|
|
|
|
iowrite32(data, ringbuf->virtual_start + ringbuf->tail);
|
|
|
|
ringbuf->tail += 4;
|
|
|
|
}
|
|
|
|
int intel_logical_ring_begin(struct intel_ringbuffer *ringbuf, int num_dwords);
|
|
|
|
|
2014-07-25 00:04:12 +08:00
|
|
|
/* Logical Ring Contexts */
|
|
|
|
void intel_lr_context_free(struct intel_context *ctx);
|
|
|
|
int intel_lr_context_deferred_create(struct intel_context *ctx,
|
|
|
|
struct intel_engine_cs *ring);
|
|
|
|
|
2014-07-25 00:04:11 +08:00
|
|
|
/* Execlists */
|
|
|
|
int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists);
|
2014-07-25 00:04:22 +08:00
|
|
|
int intel_execlists_submission(struct drm_device *dev, struct drm_file *file,
|
|
|
|
struct intel_engine_cs *ring,
|
|
|
|
struct intel_context *ctx,
|
|
|
|
struct drm_i915_gem_execbuffer2 *args,
|
|
|
|
struct list_head *vmas,
|
|
|
|
struct drm_i915_gem_object *batch_obj,
|
|
|
|
u64 exec_start, u32 flags);
|
2014-07-25 00:04:36 +08:00
|
|
|
u32 intel_execlists_ctx_id(struct drm_i915_gem_object *ctx_obj);
|
2014-07-25 00:04:11 +08:00
|
|
|
|
2014-07-25 00:04:38 +08:00
|
|
|
struct intel_ctx_submit_request {
|
|
|
|
struct intel_context *ctx;
|
|
|
|
struct intel_engine_cs *ring;
|
|
|
|
u32 tail;
|
|
|
|
|
|
|
|
struct list_head execlist_link;
|
drm/i915/bdw: Handle context switch events
Handle all context status events in the context status buffer on every
context switch interrupt. We only remove work from the execlist queue
after a context status buffer reports that it has completed and we only
attempt to schedule new contexts on interrupt when a previously submitted
context completes (unless no contexts are queued, which means the GPU is
free).
We canot call intel_runtime_pm_get() in an interrupt (or with a spinlock
grabbed, FWIW), because it might sleep, which is not a nice thing to do.
Instead, do the runtime_pm get/put together with the create/destroy request,
and handle the forcewake get/put directly.
Signed-off-by: Thomas Daniel <thomas.daniel@intel.com>
v2: Unreferencing the context when we are freeing the request might free
the backing bo, which requires the struct_mutex to be grabbed, so defer
unreferencing and freeing to a bottom half.
v3:
- Ack the interrupt inmediately, before trying to handle it (fix for
missing interrupts by Bob Beckett <robert.beckett@intel.com>).
- Update the Context Status Buffer Read Pointer, just in case (spotted
by Damien Lespiau).
v4: New namespace and multiple rebase changes.
v5: Squash with "drm/i915/bdw: Do not call intel_runtime_pm_get() in an
interrupt", as suggested by Daniel.
Signed-off-by: Oscar Mateo <oscar.mateo@intel.com>
Reviewed-by: Damien Lespiau <damien.lespiau@intel.com>
[danvet: Checkpatch ...]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2014-07-25 00:04:39 +08:00
|
|
|
struct work_struct work;
|
drm/i915/bdw: Avoid non-lite-restore preemptions
In the current Execlists feeding mechanism, full preemption is not
supported yet: only lite-restores are allowed (this is: the GPU
simply samples a new tail pointer for the context currently in
execution).
But we have identified an scenario in which a full preemption occurs:
1) We submit two contexts for execution (A & B).
2) The GPU finishes with the first one (A), switches to the second one
(B) and informs us.
3) We submit B again (hoping to cause a lite restore) together with C,
but in the time we spend writing to the ELSP, the GPU finishes B.
4) The GPU start executing B again (since we told it so).
5) We receive a B finished interrupt and, mistakenly, we submit C (again)
and D, causing a full preemption of B.
The race is avoided by keeping track of how many times a context has been
submitted to the hardware and by better discriminating the received context
switch interrupts: in the example, when we have submitted B twice, we won´t
submit C and D as soon as we receive the notification that B is completed
because we were expecting to get a LITE_RESTORE and we didn´t, so we know a
second completion will be received shortly.
Without this explicit checking, somehow, the batch buffer execution order
gets messed with. This can be verified with the IGT test I sent together with
the series. I don´t know the exact mechanism by which the pre-emption messes
with the execution order but, since other people is working on the Scheduler
+ Preemption on Execlists, I didn´t try to fix it. In these series, only Lite
Restores are supported (other kind of preemptions WARN).
v2: elsp_submitted belongs in the new intel_ctx_submit_request. Several
rebase changes.
v3: Clarify how the race is avoided, as requested by Daniel.
Signed-off-by: Oscar Mateo <oscar.mateo@intel.com>
Reviewed-by: Damien Lespiau <damien.lespiau@intel.com>
[danvet: Align function parameters ...]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2014-07-25 00:04:40 +08:00
|
|
|
|
|
|
|
int elsp_submitted;
|
2014-07-25 00:04:38 +08:00
|
|
|
};
|
|
|
|
|
drm/i915/bdw: Handle context switch events
Handle all context status events in the context status buffer on every
context switch interrupt. We only remove work from the execlist queue
after a context status buffer reports that it has completed and we only
attempt to schedule new contexts on interrupt when a previously submitted
context completes (unless no contexts are queued, which means the GPU is
free).
We canot call intel_runtime_pm_get() in an interrupt (or with a spinlock
grabbed, FWIW), because it might sleep, which is not a nice thing to do.
Instead, do the runtime_pm get/put together with the create/destroy request,
and handle the forcewake get/put directly.
Signed-off-by: Thomas Daniel <thomas.daniel@intel.com>
v2: Unreferencing the context when we are freeing the request might free
the backing bo, which requires the struct_mutex to be grabbed, so defer
unreferencing and freeing to a bottom half.
v3:
- Ack the interrupt inmediately, before trying to handle it (fix for
missing interrupts by Bob Beckett <robert.beckett@intel.com>).
- Update the Context Status Buffer Read Pointer, just in case (spotted
by Damien Lespiau).
v4: New namespace and multiple rebase changes.
v5: Squash with "drm/i915/bdw: Do not call intel_runtime_pm_get() in an
interrupt", as suggested by Daniel.
Signed-off-by: Oscar Mateo <oscar.mateo@intel.com>
Reviewed-by: Damien Lespiau <damien.lespiau@intel.com>
[danvet: Checkpatch ...]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2014-07-25 00:04:39 +08:00
|
|
|
void intel_execlists_handle_ctx_events(struct intel_engine_cs *ring);
|
|
|
|
|
2014-07-25 00:04:10 +08:00
|
|
|
#endif /* _INTEL_LRC_H_ */
|