drm/i915/guc: Add multi-lrc context registration
Add multi-lrc context registration H2G. In addition a workqueue and process descriptor are setup during multi-lrc context registration as these data structures are needed for multi-lrc submission. v2: (John Harrison) - Move GuC specific fields into sub-struct - Clean up WQ defines - Add comment explaining math to derive WQ / PD address v3: (John Harrison) - Add PARENT_SCRATCH_SIZE define - Update comment explaining multi-lrc register v4: (John Harrison) - Move PARENT_SCRATCH_SIZE to common file Signed-off-by: Matthew Brost <matthew.brost@intel.com> Reviewed-by: John Harrison <John.C.Harrison@Intel.com> Signed-off-by: John Harrison <John.C.Harrison@Intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20211014172005.27155-9-matthew.brost@intel.com
This commit is contained in:
parent
3897df4c01
commit
c2aa552ff0
|
@ -44,6 +44,8 @@ void intel_context_free(struct intel_context *ce);
|
||||||
int intel_context_reconfigure_sseu(struct intel_context *ce,
|
int intel_context_reconfigure_sseu(struct intel_context *ce,
|
||||||
const struct intel_sseu sseu);
|
const struct intel_sseu sseu);
|
||||||
|
|
||||||
|
#define PARENT_SCRATCH_SIZE PAGE_SIZE
|
||||||
|
|
||||||
static inline bool intel_context_is_child(struct intel_context *ce)
|
static inline bool intel_context_is_child(struct intel_context *ce)
|
||||||
{
|
{
|
||||||
return !!ce->parallel.parent;
|
return !!ce->parallel.parent;
|
||||||
|
|
|
@ -239,6 +239,18 @@ struct intel_context {
|
||||||
struct intel_context *parent;
|
struct intel_context *parent;
|
||||||
/** @number_children: number of children if parent */
|
/** @number_children: number of children if parent */
|
||||||
u8 number_children;
|
u8 number_children;
|
||||||
|
/** @guc: GuC specific members for parallel submission */
|
||||||
|
struct {
|
||||||
|
/** @wqi_head: head pointer in work queue */
|
||||||
|
u16 wqi_head;
|
||||||
|
/** @wqi_tail: tail pointer in work queue */
|
||||||
|
u16 wqi_tail;
|
||||||
|
/**
|
||||||
|
* @parent_page: page in context state (ce->state) used
|
||||||
|
* by parent for work queue, process descriptor
|
||||||
|
*/
|
||||||
|
u8 parent_page;
|
||||||
|
} guc;
|
||||||
} parallel;
|
} parallel;
|
||||||
|
|
||||||
#ifdef CONFIG_DRM_I915_SELFTEST
|
#ifdef CONFIG_DRM_I915_SELFTEST
|
||||||
|
|
|
@ -942,6 +942,11 @@ __lrc_alloc_state(struct intel_context *ce, struct intel_engine_cs *engine)
|
||||||
context_size += PAGE_SIZE;
|
context_size += PAGE_SIZE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (intel_context_is_parent(ce) && intel_engine_uses_guc(engine)) {
|
||||||
|
ce->parallel.guc.parent_page = context_size / PAGE_SIZE;
|
||||||
|
context_size += PARENT_SCRATCH_SIZE;
|
||||||
|
}
|
||||||
|
|
||||||
obj = i915_gem_object_create_lmem(engine->i915, context_size,
|
obj = i915_gem_object_create_lmem(engine->i915, context_size,
|
||||||
I915_BO_ALLOC_PM_VOLATILE);
|
I915_BO_ALLOC_PM_VOLATILE);
|
||||||
if (IS_ERR(obj))
|
if (IS_ERR(obj))
|
||||||
|
|
|
@ -142,6 +142,7 @@ enum intel_guc_action {
|
||||||
INTEL_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER = 0x4505,
|
INTEL_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER = 0x4505,
|
||||||
INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER = 0x4506,
|
INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER = 0x4506,
|
||||||
INTEL_GUC_ACTION_DEREGISTER_CONTEXT_DONE = 0x4600,
|
INTEL_GUC_ACTION_DEREGISTER_CONTEXT_DONE = 0x4600,
|
||||||
|
INTEL_GUC_ACTION_REGISTER_CONTEXT_MULTI_LRC = 0x4601,
|
||||||
INTEL_GUC_ACTION_RESET_CLIENT = 0x5507,
|
INTEL_GUC_ACTION_RESET_CLIENT = 0x5507,
|
||||||
INTEL_GUC_ACTION_LIMIT
|
INTEL_GUC_ACTION_LIMIT
|
||||||
};
|
};
|
||||||
|
|
|
@ -52,8 +52,6 @@
|
||||||
|
|
||||||
#define GUC_DOORBELL_INVALID 256
|
#define GUC_DOORBELL_INVALID 256
|
||||||
|
|
||||||
#define GUC_WQ_SIZE (PAGE_SIZE * 2)
|
|
||||||
|
|
||||||
/* Work queue item header definitions */
|
/* Work queue item header definitions */
|
||||||
#define WQ_STATUS_ACTIVE 1
|
#define WQ_STATUS_ACTIVE 1
|
||||||
#define WQ_STATUS_SUSPENDED 2
|
#define WQ_STATUS_SUSPENDED 2
|
||||||
|
|
|
@ -344,6 +344,46 @@ static inline struct i915_priolist *to_priolist(struct rb_node *rb)
|
||||||
return rb_entry(rb, struct i915_priolist, node);
|
return rb_entry(rb, struct i915_priolist, node);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* When using multi-lrc submission a scratch memory area is reserved in the
|
||||||
|
* parent's context state for the process descriptor and work queue. Currently
|
||||||
|
* the scratch area is sized to a page.
|
||||||
|
*
|
||||||
|
* The layout of this scratch area is below:
|
||||||
|
* 0 guc_process_desc
|
||||||
|
* ... unused
|
||||||
|
* PARENT_SCRATCH_SIZE / 2 work queue start
|
||||||
|
* ... work queue
|
||||||
|
* PARENT_SCRATCH_SIZE - 1 work queue end
|
||||||
|
*/
|
||||||
|
#define WQ_SIZE (PARENT_SCRATCH_SIZE / 2)
|
||||||
|
#define WQ_OFFSET (PARENT_SCRATCH_SIZE - WQ_SIZE)
|
||||||
|
static u32 __get_process_desc_offset(struct intel_context *ce)
|
||||||
|
{
|
||||||
|
GEM_BUG_ON(!ce->parallel.guc.parent_page);
|
||||||
|
|
||||||
|
return ce->parallel.guc.parent_page * PAGE_SIZE;
|
||||||
|
}
|
||||||
|
|
||||||
|
static u32 __get_wq_offset(struct intel_context *ce)
|
||||||
|
{
|
||||||
|
return __get_process_desc_offset(ce) + WQ_OFFSET;
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct guc_process_desc *
|
||||||
|
__get_process_desc(struct intel_context *ce)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* Need to subtract LRC_STATE_OFFSET here as the
|
||||||
|
* parallel.guc.parent_page is the offset into ce->state while
|
||||||
|
* ce->lrc_reg_reg is ce->state + LRC_STATE_OFFSET.
|
||||||
|
*/
|
||||||
|
return (struct guc_process_desc *)
|
||||||
|
(ce->lrc_reg_state +
|
||||||
|
((__get_process_desc_offset(ce) -
|
||||||
|
LRC_STATE_OFFSET) / sizeof(u32)));
|
||||||
|
}
|
||||||
|
|
||||||
static struct guc_lrc_desc *__get_lrc_desc(struct intel_guc *guc, u32 index)
|
static struct guc_lrc_desc *__get_lrc_desc(struct intel_guc *guc, u32 index)
|
||||||
{
|
{
|
||||||
struct guc_lrc_desc *base = guc->lrc_desc_pool_vaddr;
|
struct guc_lrc_desc *base = guc->lrc_desc_pool_vaddr;
|
||||||
|
@ -1365,6 +1405,30 @@ static void unpin_guc_id(struct intel_guc *guc, struct intel_context *ce)
|
||||||
spin_unlock_irqrestore(&guc->submission_state.lock, flags);
|
spin_unlock_irqrestore(&guc->submission_state.lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int __guc_action_register_multi_lrc(struct intel_guc *guc,
|
||||||
|
struct intel_context *ce,
|
||||||
|
u32 guc_id,
|
||||||
|
u32 offset,
|
||||||
|
bool loop)
|
||||||
|
{
|
||||||
|
struct intel_context *child;
|
||||||
|
u32 action[4 + MAX_ENGINE_INSTANCE];
|
||||||
|
int len = 0;
|
||||||
|
|
||||||
|
GEM_BUG_ON(ce->parallel.number_children > MAX_ENGINE_INSTANCE);
|
||||||
|
|
||||||
|
action[len++] = INTEL_GUC_ACTION_REGISTER_CONTEXT_MULTI_LRC;
|
||||||
|
action[len++] = guc_id;
|
||||||
|
action[len++] = ce->parallel.number_children + 1;
|
||||||
|
action[len++] = offset;
|
||||||
|
for_each_child(ce, child) {
|
||||||
|
offset += sizeof(struct guc_lrc_desc);
|
||||||
|
action[len++] = offset;
|
||||||
|
}
|
||||||
|
|
||||||
|
return guc_submission_send_busy_loop(guc, action, len, 0, loop);
|
||||||
|
}
|
||||||
|
|
||||||
static int __guc_action_register_context(struct intel_guc *guc,
|
static int __guc_action_register_context(struct intel_guc *guc,
|
||||||
u32 guc_id,
|
u32 guc_id,
|
||||||
u32 offset,
|
u32 offset,
|
||||||
|
@ -1387,9 +1451,15 @@ static int register_context(struct intel_context *ce, bool loop)
|
||||||
ce->guc_id.id * sizeof(struct guc_lrc_desc);
|
ce->guc_id.id * sizeof(struct guc_lrc_desc);
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
GEM_BUG_ON(intel_context_is_child(ce));
|
||||||
trace_intel_context_register(ce);
|
trace_intel_context_register(ce);
|
||||||
|
|
||||||
ret = __guc_action_register_context(guc, ce->guc_id.id, offset, loop);
|
if (intel_context_is_parent(ce))
|
||||||
|
ret = __guc_action_register_multi_lrc(guc, ce, ce->guc_id.id,
|
||||||
|
offset, loop);
|
||||||
|
else
|
||||||
|
ret = __guc_action_register_context(guc, ce->guc_id.id, offset,
|
||||||
|
loop);
|
||||||
if (likely(!ret)) {
|
if (likely(!ret)) {
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
|
@ -1418,6 +1488,7 @@ static int deregister_context(struct intel_context *ce, u32 guc_id)
|
||||||
{
|
{
|
||||||
struct intel_guc *guc = ce_to_guc(ce);
|
struct intel_guc *guc = ce_to_guc(ce);
|
||||||
|
|
||||||
|
GEM_BUG_ON(intel_context_is_child(ce));
|
||||||
trace_intel_context_deregister(ce);
|
trace_intel_context_deregister(ce);
|
||||||
|
|
||||||
return __guc_action_deregister_context(guc, guc_id);
|
return __guc_action_deregister_context(guc, guc_id);
|
||||||
|
@ -1445,6 +1516,7 @@ static int guc_lrc_desc_pin(struct intel_context *ce, bool loop)
|
||||||
struct guc_lrc_desc *desc;
|
struct guc_lrc_desc *desc;
|
||||||
bool context_registered;
|
bool context_registered;
|
||||||
intel_wakeref_t wakeref;
|
intel_wakeref_t wakeref;
|
||||||
|
struct intel_context *child;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
GEM_BUG_ON(!engine->mask);
|
GEM_BUG_ON(!engine->mask);
|
||||||
|
@ -1470,6 +1542,41 @@ static int guc_lrc_desc_pin(struct intel_context *ce, bool loop)
|
||||||
desc->context_flags = CONTEXT_REGISTRATION_FLAG_KMD;
|
desc->context_flags = CONTEXT_REGISTRATION_FLAG_KMD;
|
||||||
guc_context_policy_init(engine, desc);
|
guc_context_policy_init(engine, desc);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If context is a parent, we need to register a process descriptor
|
||||||
|
* describing a work queue and register all child contexts.
|
||||||
|
*/
|
||||||
|
if (intel_context_is_parent(ce)) {
|
||||||
|
struct guc_process_desc *pdesc;
|
||||||
|
|
||||||
|
ce->parallel.guc.wqi_tail = 0;
|
||||||
|
ce->parallel.guc.wqi_head = 0;
|
||||||
|
|
||||||
|
desc->process_desc = i915_ggtt_offset(ce->state) +
|
||||||
|
__get_process_desc_offset(ce);
|
||||||
|
desc->wq_addr = i915_ggtt_offset(ce->state) +
|
||||||
|
__get_wq_offset(ce);
|
||||||
|
desc->wq_size = WQ_SIZE;
|
||||||
|
|
||||||
|
pdesc = __get_process_desc(ce);
|
||||||
|
memset(pdesc, 0, sizeof(*(pdesc)));
|
||||||
|
pdesc->stage_id = ce->guc_id.id;
|
||||||
|
pdesc->wq_base_addr = desc->wq_addr;
|
||||||
|
pdesc->wq_size_bytes = desc->wq_size;
|
||||||
|
pdesc->wq_status = WQ_STATUS_ACTIVE;
|
||||||
|
|
||||||
|
for_each_child(ce, child) {
|
||||||
|
desc = __get_lrc_desc(guc, child->guc_id.id);
|
||||||
|
|
||||||
|
desc->engine_class =
|
||||||
|
engine_class_to_guc_class(engine->class);
|
||||||
|
desc->hw_context_desc = child->lrc.lrca;
|
||||||
|
desc->priority = ce->guc_state.prio;
|
||||||
|
desc->context_flags = CONTEXT_REGISTRATION_FLAG_KMD;
|
||||||
|
guc_context_policy_init(engine, desc);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The context_lookup xarray is used to determine if the hardware
|
* The context_lookup xarray is used to determine if the hardware
|
||||||
* context is currently registered. There are two cases in which it
|
* context is currently registered. There are two cases in which it
|
||||||
|
@ -2804,6 +2911,12 @@ g2h_context_lookup(struct intel_guc *guc, u32 desc_idx)
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (unlikely(intel_context_is_child(ce))) {
|
||||||
|
drm_err(&guc_to_gt(guc)->i915->drm,
|
||||||
|
"Context is child, desc_idx %u", desc_idx);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
return ce;
|
return ce;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue