OpenCloudOS-Kernel/drivers/gpu/drm/i915/gt/intel_gt.h

117 lines
3.2 KiB
C
Raw Normal View History

/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2019 Intel Corporation
*/
#ifndef __INTEL_GT__
#define __INTEL_GT__
#include "intel_engine_types.h"
#include "intel_gt_types.h"
#include "intel_reset.h"
struct drm_i915_private;
struct drm_printer;
#define GT_TRACE(gt, fmt, ...) do { \
const struct intel_gt *gt__ __maybe_unused = (gt); \
GEM_TRACE("%s " fmt, dev_name(gt__->i915->drm.dev), \
##__VA_ARGS__); \
} while (0)
static inline bool gt_is_root(struct intel_gt *gt)
{
return !gt->info.id;
}
static inline struct intel_gt *uc_to_gt(struct intel_uc *uc)
{
return container_of(uc, struct intel_gt, uc);
}
static inline struct intel_gt *guc_to_gt(struct intel_guc *guc)
{
return container_of(guc, struct intel_gt, uc.guc);
}
static inline struct intel_gt *huc_to_gt(struct intel_huc *huc)
{
return container_of(huc, struct intel_gt, uc.huc);
}
static inline struct intel_gt *gsc_to_gt(struct intel_gsc *gsc)
{
return container_of(gsc, struct intel_gt, gsc);
}
void intel_root_gt_init_early(struct drm_i915_private *i915);
int intel_gt_assign_ggtt(struct intel_gt *gt);
int intel_gt_init_mmio(struct intel_gt *gt);
int __must_check intel_gt_init_hw(struct intel_gt *gt);
int intel_gt_init(struct intel_gt *gt);
void intel_gt_driver_register(struct intel_gt *gt);
void intel_gt_driver_unregister(struct intel_gt *gt);
void intel_gt_driver_remove(struct intel_gt *gt);
void intel_gt_driver_release(struct intel_gt *gt);
void intel_gt_driver_late_release_all(struct drm_i915_private *i915);
int intel_gt_wait_for_idle(struct intel_gt *gt, long timeout);
void intel_gt_check_and_clear_faults(struct intel_gt *gt);
void intel_gt_clear_error_registers(struct intel_gt *gt,
intel_engine_mask_t engine_mask);
void intel_gt_flush_ggtt_writes(struct intel_gt *gt);
void intel_gt_chipset_flush(struct intel_gt *gt);
static inline u32 intel_gt_scratch_offset(const struct intel_gt *gt,
enum intel_gt_scratch_field field)
{
return i915_ggtt_offset(gt->scratch) + field;
}
static inline bool intel_gt_has_unrecoverable_error(const struct intel_gt *gt)
{
return test_bit(I915_WEDGED_ON_INIT, &gt->reset.flags) ||
test_bit(I915_WEDGED_ON_FINI, &gt->reset.flags);
}
static inline bool intel_gt_is_wedged(const struct intel_gt *gt)
{
GEM_BUG_ON(intel_gt_has_unrecoverable_error(gt) &&
!test_bit(I915_WEDGED, &gt->reset.flags));
return unlikely(test_bit(I915_WEDGED, &gt->reset.flags));
}
int intel_gt_probe_all(struct drm_i915_private *i915);
int intel_gt_tiles_init(struct drm_i915_private *i915);
void intel_gt_release_all(struct drm_i915_private *i915);
#define for_each_gt(gt__, i915__, id__) \
for ((id__) = 0; \
(id__) < I915_MAX_GT; \
(id__)++) \
for_each_if(((gt__) = (i915__)->gt[(id__)]))
void intel_gt_info_print(const struct intel_gt_info *info,
struct drm_printer *p);
void intel_gt_watchdog_work(struct work_struct *work);
drm/i915/gt: Batch TLB invalidations Invalidate TLB in batches, in order to reduce performance regressions. Currently, every caller performs a full barrier around a TLB invalidation, ignoring all other invalidations that may have already removed their PTEs from the cache. As this is a synchronous operation and can be quite slow, we cause multiple threads to contend on the TLB invalidate mutex blocking userspace. We only need to invalidate the TLB once after replacing our PTE to ensure that there is no possible continued access to the physical address before releasing our pages. By tracking a seqno for each full TLB invalidate we can quickly determine if one has been performed since rewriting the PTE, and only if necessary trigger one for ourselves. That helps to reduce the performance regression introduced by TLB invalidate logic. [mchehab: rebased to not require moving the code to a separate file] Cc: stable@vger.kernel.org Fixes: 7938d61591d3 ("drm/i915: Flush TLBs before releasing backing store") Suggested-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Signed-off-by: Chris Wilson <chris.p.wilson@intel.com> Cc: Fei Yang <fei.yang@intel.com> Signed-off-by: Mauro Carvalho Chehab <mchehab@kernel.org> Acked-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Reviewed-by: Andi Shyti <andi.shyti@linux.intel.com> Signed-off-by: Andi Shyti <andi.shyti@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/4e97ef5deb6739cadaaf40aa45620547e9c4ec06.1658924372.git.mchehab@kernel.org (cherry picked from commit 5d36acb7198b0e5eb88e6b701f9ad7b9448f8df9) Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
2022-07-27 20:29:55 +08:00
static inline u32 intel_gt_tlb_seqno(const struct intel_gt *gt)
{
return seqprop_sequence(&gt->tlb.seqno);
}
static inline u32 intel_gt_next_invalidate_tlb_full(const struct intel_gt *gt)
{
return intel_gt_tlb_seqno(gt) | 1;
}
void intel_gt_invalidate_tlb(struct intel_gt *gt, u32 seqno);
drm/i915: Flush TLBs before releasing backing store We need to flush TLBs before releasing backing store otherwise userspace is able to encounter stale entries if a) it is not declaring access to certain buffers and b) it races with the backing store release from a such undeclared execution already executing on the GPU in parallel. The approach taken is to mark any buffer objects which were ever bound to the GPU and to trigger a serialized TLB flush when their backing store is released. Alternatively the flushing could be done on VMA unbind, at which point we would be able to ascertain whether there is potential a parallel GPU execution (which could race), but essentially it boils down to paying the cost of TLB flushes potentially needlessly at VMA unbind time (when the backing store is not known to be going away so not needed for safety), versus potentially needlessly at backing store relase time (since we at that point cannot tell whether there is anything executing on the GPU which uses that object). Thereforce simplicity of implementation has been chosen for now with scope to benchmark and refine later as required. Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Reported-by: Sushma Venkatesh Reddy <sushma.venkatesh.reddy@intel.com> Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch> Acked-by: Dave Airlie <airlied@redhat.com> Cc: Daniel Vetter <daniel.vetter@ffwll.ch> Cc: Jon Bloomfield <jon.bloomfield@intel.com> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Cc: Jani Nikula <jani.nikula@intel.com> Cc: stable@vger.kernel.org Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2021-10-19 20:27:10 +08:00
#endif /* __INTEL_GT_H__ */