Merge tag 'drm-intel-next-2016-07-11' of git://anongit.freedesktop.org/drm-intel into drm-next
- select igt testing depencies for CONFIG_DRM_I915_DEBUG (Chris) - track outputs in crtc state and clean up all our ad-hoc connector/encoder walking in modest code (Ville) - demidlayer drm_device/drm_i915_private (Chris Wilson) - thundering herd fix from Chris Wilson, with lots of help from Tvrtko Ursulin - piles of assorted clean and fallout from the thundering herd fix - documentation and more tuning for waitboosting (Chris) - pooled EU support on bxt (Arun Siluvery) - bxt support is no longer considered prelimary! - ring/engine vfunc cleanup from Tvrtko - introduce intel_wait_for_register helper (Chris) - opregion updates (Jani Nukla) - tuning and fixes for wait_for macros (Tvrkto&Imre) - more kabylake pci ids (Rodrigo) - pps cleanup and fixes for bxt (Imre) - move sink crc support over to atomic state (Maarten) - fix up async fbdev init ordering (Chris) - fbc fixes from Paulo and Chris * tag 'drm-intel-next-2016-07-11' of git://anongit.freedesktop.org/drm-intel: (223 commits) drm/i915: Update DRIVER_DATE to 20160711 drm/i915: Select DRM_VGEM for igt drm/i915: Select X86_MSR for igt drm/i915: Fill unused GGTT with scratch pages for VT-d drm/i915: Introduce Kabypoint PCH for Kabylake H/DT. drm/i915:gen9: implement WaMediaPoolStateCmdInWABB drm/i915: Check for invalid cloning earlier during modeset drm/i915: Simplify hdmi_12bpc_possible() drm/i915: Kill has_dsi_encoder drm/i915: s/INTEL_OUTPUT_DISPLAYPORT/INTEL_OUTPUT_DP/ drm/i915: Replace some open coded intel_crtc_has_dp_encoder()s drm/i915: Kill has_dp_encoder from pipe_config drm/i915: Replace manual lvds and sdvo/hdmi counting with intel_crtc_has_type() drm/i915: Unify intel_pipe_has_type() and intel_pipe_will_have_type() drm/i915: Add output_types bitmask into the crtc state drm/i915: Remove encoder type checks from MST suspend/resume drm/i915: Don't mark eDP encoders as MST capable drm/i915: avoid wait_for_atomic() in non-atomic host2guc_action() drm/i915: Group the irq breadcrumb variables into the same cacheline drm/i915: Wake up the bottom-half if we steal their interrupt ...
This commit is contained in:
commit
ff37c05a99
|
@ -18,6 +18,9 @@ config DRM_I915_WERROR
|
|||
config DRM_I915_DEBUG
|
||||
bool "Enable additional driver debugging"
|
||||
depends on DRM_I915
|
||||
select PREEMPT_COUNT
|
||||
select X86_MSR # used by igt/pm_rpm
|
||||
select DRM_VGEM # used by igt/prime_vgem (dmabuf interop checks)
|
||||
default n
|
||||
help
|
||||
Choose this option to turn on extra driver debugging that may affect
|
||||
|
|
|
@ -10,9 +10,11 @@ subdir-ccflags-$(CONFIG_DRM_I915_WERROR) := -Werror
|
|||
i915-y := i915_drv.o \
|
||||
i915_irq.o \
|
||||
i915_params.o \
|
||||
i915_pci.o \
|
||||
i915_suspend.o \
|
||||
i915_sysfs.o \
|
||||
intel_csr.o \
|
||||
intel_device_info.o \
|
||||
intel_pm.o \
|
||||
intel_runtime_pm.o
|
||||
|
||||
|
@ -37,6 +39,7 @@ i915-y += i915_cmd_parser.o \
|
|||
i915_gem_userptr.o \
|
||||
i915_gpu_error.o \
|
||||
i915_trace_points.o \
|
||||
intel_breadcrumbs.o \
|
||||
intel_lrc.o \
|
||||
intel_mocs.o \
|
||||
intel_ringbuffer.o \
|
||||
|
@ -101,9 +104,6 @@ i915-y += dvo_ch7017.o \
|
|||
# virtual gpu code
|
||||
i915-y += i915_vgpu.o
|
||||
|
||||
# legacy horrors
|
||||
i915-y += i915_dma.o
|
||||
|
||||
ifeq ($(CONFIG_DRM_I915_GVT),y)
|
||||
i915-y += intel_gvt.o
|
||||
include $(src)/gvt/Makefile
|
||||
|
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -69,7 +69,7 @@
|
|||
|
||||
#define DRIVER_NAME "i915"
|
||||
#define DRIVER_DESC "Intel Graphics"
|
||||
#define DRIVER_DATE "20160620"
|
||||
#define DRIVER_DATE "20160711"
|
||||
|
||||
#undef WARN_ON
|
||||
/* Many gcc seem to no see through this and fall over :( */
|
||||
|
@ -320,15 +320,16 @@ struct i915_hotplug {
|
|||
for_each_if ((__ports_mask) & (1 << (__port)))
|
||||
|
||||
#define for_each_crtc(dev, crtc) \
|
||||
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
|
||||
list_for_each_entry(crtc, &(dev)->mode_config.crtc_list, head)
|
||||
|
||||
#define for_each_intel_plane(dev, intel_plane) \
|
||||
list_for_each_entry(intel_plane, \
|
||||
&dev->mode_config.plane_list, \
|
||||
&(dev)->mode_config.plane_list, \
|
||||
base.head)
|
||||
|
||||
#define for_each_intel_plane_mask(dev, intel_plane, plane_mask) \
|
||||
list_for_each_entry(intel_plane, &dev->mode_config.plane_list, \
|
||||
list_for_each_entry(intel_plane, \
|
||||
&(dev)->mode_config.plane_list, \
|
||||
base.head) \
|
||||
for_each_if ((plane_mask) & \
|
||||
(1 << drm_plane_index(&intel_plane->base)))
|
||||
|
@ -339,11 +340,15 @@ struct i915_hotplug {
|
|||
base.head) \
|
||||
for_each_if ((intel_plane)->pipe == (intel_crtc)->pipe)
|
||||
|
||||
#define for_each_intel_crtc(dev, intel_crtc) \
|
||||
list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head)
|
||||
#define for_each_intel_crtc(dev, intel_crtc) \
|
||||
list_for_each_entry(intel_crtc, \
|
||||
&(dev)->mode_config.crtc_list, \
|
||||
base.head)
|
||||
|
||||
#define for_each_intel_crtc_mask(dev, intel_crtc, crtc_mask) \
|
||||
list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head) \
|
||||
#define for_each_intel_crtc_mask(dev, intel_crtc, crtc_mask) \
|
||||
list_for_each_entry(intel_crtc, \
|
||||
&(dev)->mode_config.crtc_list, \
|
||||
base.head) \
|
||||
for_each_if ((crtc_mask) & (1 << drm_crtc_index(&intel_crtc->base)))
|
||||
|
||||
#define for_each_intel_encoder(dev, intel_encoder) \
|
||||
|
@ -353,7 +358,7 @@ struct i915_hotplug {
|
|||
|
||||
#define for_each_intel_connector(dev, intel_connector) \
|
||||
list_for_each_entry(intel_connector, \
|
||||
&dev->mode_config.connector_list, \
|
||||
&(dev)->mode_config.connector_list, \
|
||||
base.head)
|
||||
|
||||
#define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \
|
||||
|
@ -475,6 +480,7 @@ struct drm_i915_error_state {
|
|||
struct timeval time;
|
||||
|
||||
char error_msg[128];
|
||||
bool simulated;
|
||||
int iommu;
|
||||
u32 reset_count;
|
||||
u32 suspend_count;
|
||||
|
@ -506,6 +512,7 @@ struct drm_i915_error_state {
|
|||
bool valid;
|
||||
/* Software tracked state */
|
||||
bool waiting;
|
||||
int num_waiters;
|
||||
int hangcheck_score;
|
||||
enum intel_ring_hangcheck_action hangcheck_action;
|
||||
int num_requests;
|
||||
|
@ -551,6 +558,12 @@ struct drm_i915_error_state {
|
|||
u32 tail;
|
||||
} *requests;
|
||||
|
||||
struct drm_i915_error_waiter {
|
||||
char comm[TASK_COMM_LEN];
|
||||
pid_t pid;
|
||||
u32 seqno;
|
||||
} *waiters;
|
||||
|
||||
struct {
|
||||
u32 gfx_mode;
|
||||
union {
|
||||
|
@ -868,9 +881,12 @@ struct i915_gem_context {
|
|||
|
||||
/* Unique identifier for this context, used by the hw for tracking */
|
||||
unsigned long flags;
|
||||
#define CONTEXT_NO_ZEROMAP BIT(0)
|
||||
#define CONTEXT_NO_ERROR_CAPTURE BIT(1)
|
||||
unsigned hw_id;
|
||||
u32 user_handle;
|
||||
#define CONTEXT_NO_ZEROMAP (1<<0)
|
||||
|
||||
u32 ggtt_alignment;
|
||||
|
||||
struct intel_context {
|
||||
struct drm_i915_gem_object *state;
|
||||
|
@ -1011,6 +1027,7 @@ enum intel_pch {
|
|||
PCH_CPT, /* Cougarpoint PCH */
|
||||
PCH_LPT, /* Lynxpoint PCH */
|
||||
PCH_SPT, /* Sunrisepoint PCH */
|
||||
PCH_KBP, /* Kabypoint PCH */
|
||||
PCH_NOP,
|
||||
};
|
||||
|
||||
|
@ -1304,38 +1321,12 @@ struct i915_gem_mm {
|
|||
/** LRU list of objects with fence regs on them. */
|
||||
struct list_head fence_list;
|
||||
|
||||
/**
|
||||
* We leave the user IRQ off as much as possible,
|
||||
* but this means that requests will finish and never
|
||||
* be retired once the system goes idle. Set a timer to
|
||||
* fire periodically while the ring is running. When it
|
||||
* fires, go retire requests.
|
||||
*/
|
||||
struct delayed_work retire_work;
|
||||
|
||||
/**
|
||||
* When we detect an idle GPU, we want to turn on
|
||||
* powersaving features. So once we see that there
|
||||
* are no more requests outstanding and no more
|
||||
* arrive within a small period of time, we fire
|
||||
* off the idle_work.
|
||||
*/
|
||||
struct delayed_work idle_work;
|
||||
|
||||
/**
|
||||
* Are we in a non-interruptible section of code like
|
||||
* modesetting?
|
||||
*/
|
||||
bool interruptible;
|
||||
|
||||
/**
|
||||
* Is the GPU currently considered idle, or busy executing userspace
|
||||
* requests? Whilst idle, we attempt to power down the hardware and
|
||||
* display clocks. In order to reduce the effect on performance, there
|
||||
* is a slight delay before we do so.
|
||||
*/
|
||||
bool busy;
|
||||
|
||||
/* the indicator for dispatch video commands on two BSD rings */
|
||||
unsigned int bsd_ring_dispatch_index;
|
||||
|
||||
|
@ -1372,7 +1363,6 @@ struct i915_gpu_error {
|
|||
/* Hang gpu twice in this window and your context gets banned */
|
||||
#define DRM_I915_CTX_BAN_PERIOD DIV_ROUND_UP(8*DRM_I915_HANGCHECK_PERIOD, 1000)
|
||||
|
||||
struct workqueue_struct *hangcheck_wq;
|
||||
struct delayed_work hangcheck_work;
|
||||
|
||||
/* For reset and error_state handling. */
|
||||
|
@ -1408,21 +1398,20 @@ struct i915_gpu_error {
|
|||
#define I915_RESET_IN_PROGRESS_FLAG 1
|
||||
#define I915_WEDGED (1 << 31)
|
||||
|
||||
/**
|
||||
* Waitqueue to signal when a hang is detected. Used to for waiters
|
||||
* to release the struct_mutex for the reset to procede.
|
||||
*/
|
||||
wait_queue_head_t wait_queue;
|
||||
|
||||
/**
|
||||
* Waitqueue to signal when the reset has completed. Used by clients
|
||||
* that wait for dev_priv->mm.wedged to settle.
|
||||
*/
|
||||
wait_queue_head_t reset_queue;
|
||||
|
||||
/* Userspace knobs for gpu hang simulation;
|
||||
* combines both a ring mask, and extra flags
|
||||
*/
|
||||
u32 stop_rings;
|
||||
#define I915_STOP_RING_ALLOW_BAN (1 << 31)
|
||||
#define I915_STOP_RING_ALLOW_WARN (1 << 30)
|
||||
|
||||
/* For missed irq/seqno simulation. */
|
||||
unsigned int test_irq_rings;
|
||||
unsigned long test_irq_rings;
|
||||
};
|
||||
|
||||
enum modeset_restore {
|
||||
|
@ -1733,7 +1722,8 @@ struct intel_wm_config {
|
|||
};
|
||||
|
||||
struct drm_i915_private {
|
||||
struct drm_device *dev;
|
||||
struct drm_device drm;
|
||||
|
||||
struct kmem_cache *objects;
|
||||
struct kmem_cache *vmas;
|
||||
struct kmem_cache *requests;
|
||||
|
@ -2029,6 +2019,34 @@ struct drm_i915_private {
|
|||
int (*init_engines)(struct drm_device *dev);
|
||||
void (*cleanup_engine)(struct intel_engine_cs *engine);
|
||||
void (*stop_engine)(struct intel_engine_cs *engine);
|
||||
|
||||
/**
|
||||
* Is the GPU currently considered idle, or busy executing
|
||||
* userspace requests? Whilst idle, we allow runtime power
|
||||
* management to power down the hardware and display clocks.
|
||||
* In order to reduce the effect on performance, there
|
||||
* is a slight delay before we do so.
|
||||
*/
|
||||
unsigned int active_engines;
|
||||
bool awake;
|
||||
|
||||
/**
|
||||
* We leave the user IRQ off as much as possible,
|
||||
* but this means that requests will finish and never
|
||||
* be retired once the system goes idle. Set a timer to
|
||||
* fire periodically while the ring is running. When it
|
||||
* fires, go retire requests.
|
||||
*/
|
||||
struct delayed_work retire_work;
|
||||
|
||||
/**
|
||||
* When we detect an idle GPU, we want to turn on
|
||||
* powersaving features. So once we see that there
|
||||
* are no more requests outstanding and no more
|
||||
* arrive within a small period of time, we fire
|
||||
* off the idle_work.
|
||||
*/
|
||||
struct delayed_work idle_work;
|
||||
} gt;
|
||||
|
||||
/* perform PHY state sanity checks? */
|
||||
|
@ -2044,7 +2062,7 @@ struct drm_i915_private {
|
|||
|
||||
static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
|
||||
{
|
||||
return dev->dev_private;
|
||||
return container_of(dev, struct drm_i915_private, drm);
|
||||
}
|
||||
|
||||
static inline struct drm_i915_private *dev_to_i915(struct device *dev)
|
||||
|
@ -2215,6 +2233,7 @@ struct drm_i915_gem_object {
|
|||
|
||||
unsigned int frontbuffer_bits:INTEL_FRONTBUFFER_BITS;
|
||||
|
||||
unsigned int has_wc_mmap;
|
||||
unsigned int pin_display;
|
||||
|
||||
struct sg_table *pages;
|
||||
|
@ -2267,6 +2286,12 @@ struct drm_i915_gem_object {
|
|||
};
|
||||
#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
|
||||
|
||||
static inline bool
|
||||
i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj)
|
||||
{
|
||||
return obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE;
|
||||
}
|
||||
|
||||
/*
|
||||
* Optimised SGL iterator for GEM objects
|
||||
*/
|
||||
|
@ -2357,7 +2382,7 @@ struct drm_i915_gem_request {
|
|||
/** On Which ring this request was generated */
|
||||
struct drm_i915_private *i915;
|
||||
struct intel_engine_cs *engine;
|
||||
unsigned reset_counter;
|
||||
struct intel_signal_node signaling;
|
||||
|
||||
/** GEM sequence number associated with the previous request,
|
||||
* when the HWS breadcrumb is equal to this the GPU is processing
|
||||
|
@ -2613,7 +2638,7 @@ struct drm_i915_cmd_table {
|
|||
#define INTEL_DEVID(p) (INTEL_INFO(p)->device_id)
|
||||
|
||||
#define REVID_FOREVER 0xff
|
||||
#define INTEL_REVID(p) (__I915__(p)->dev->pdev->revision)
|
||||
#define INTEL_REVID(p) (__I915__(p)->drm.pdev->revision)
|
||||
|
||||
#define GEN_FOREVER (0)
|
||||
/*
|
||||
|
@ -2743,29 +2768,34 @@ struct drm_i915_cmd_table {
|
|||
* have their own (e.g. HAS_PCH_SPLIT for ILK+ display, IS_foo for particular
|
||||
* chips, etc.).
|
||||
*/
|
||||
#define IS_GEN2(dev) (INTEL_INFO(dev)->gen_mask & BIT(1))
|
||||
#define IS_GEN3(dev) (INTEL_INFO(dev)->gen_mask & BIT(2))
|
||||
#define IS_GEN4(dev) (INTEL_INFO(dev)->gen_mask & BIT(3))
|
||||
#define IS_GEN5(dev) (INTEL_INFO(dev)->gen_mask & BIT(4))
|
||||
#define IS_GEN6(dev) (INTEL_INFO(dev)->gen_mask & BIT(5))
|
||||
#define IS_GEN7(dev) (INTEL_INFO(dev)->gen_mask & BIT(6))
|
||||
#define IS_GEN8(dev) (INTEL_INFO(dev)->gen_mask & BIT(7))
|
||||
#define IS_GEN9(dev) (INTEL_INFO(dev)->gen_mask & BIT(8))
|
||||
#define IS_GEN2(dev) (!!(INTEL_INFO(dev)->gen_mask & BIT(1)))
|
||||
#define IS_GEN3(dev) (!!(INTEL_INFO(dev)->gen_mask & BIT(2)))
|
||||
#define IS_GEN4(dev) (!!(INTEL_INFO(dev)->gen_mask & BIT(3)))
|
||||
#define IS_GEN5(dev) (!!(INTEL_INFO(dev)->gen_mask & BIT(4)))
|
||||
#define IS_GEN6(dev) (!!(INTEL_INFO(dev)->gen_mask & BIT(5)))
|
||||
#define IS_GEN7(dev) (!!(INTEL_INFO(dev)->gen_mask & BIT(6)))
|
||||
#define IS_GEN8(dev) (!!(INTEL_INFO(dev)->gen_mask & BIT(7)))
|
||||
#define IS_GEN9(dev) (!!(INTEL_INFO(dev)->gen_mask & BIT(8)))
|
||||
|
||||
#define RENDER_RING (1<<RCS)
|
||||
#define BSD_RING (1<<VCS)
|
||||
#define BLT_RING (1<<BCS)
|
||||
#define VEBOX_RING (1<<VECS)
|
||||
#define BSD2_RING (1<<VCS2)
|
||||
#define ALL_ENGINES (~0)
|
||||
#define ENGINE_MASK(id) BIT(id)
|
||||
#define RENDER_RING ENGINE_MASK(RCS)
|
||||
#define BSD_RING ENGINE_MASK(VCS)
|
||||
#define BLT_RING ENGINE_MASK(BCS)
|
||||
#define VEBOX_RING ENGINE_MASK(VECS)
|
||||
#define BSD2_RING ENGINE_MASK(VCS2)
|
||||
#define ALL_ENGINES (~0)
|
||||
|
||||
#define HAS_ENGINE(dev_priv, id) \
|
||||
(!!(INTEL_INFO(dev_priv)->ring_mask & ENGINE_MASK(id)))
|
||||
|
||||
#define HAS_BSD(dev_priv) HAS_ENGINE(dev_priv, VCS)
|
||||
#define HAS_BSD2(dev_priv) HAS_ENGINE(dev_priv, VCS2)
|
||||
#define HAS_BLT(dev_priv) HAS_ENGINE(dev_priv, BCS)
|
||||
#define HAS_VEBOX(dev_priv) HAS_ENGINE(dev_priv, VECS)
|
||||
|
||||
#define HAS_BSD(dev) (INTEL_INFO(dev)->ring_mask & BSD_RING)
|
||||
#define HAS_BSD2(dev) (INTEL_INFO(dev)->ring_mask & BSD2_RING)
|
||||
#define HAS_BLT(dev) (INTEL_INFO(dev)->ring_mask & BLT_RING)
|
||||
#define HAS_VEBOX(dev) (INTEL_INFO(dev)->ring_mask & VEBOX_RING)
|
||||
#define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc)
|
||||
#define HAS_SNOOP(dev) (INTEL_INFO(dev)->has_snoop)
|
||||
#define HAS_EDRAM(dev) (__I915__(dev)->edram_cap & EDRAM_ENABLED)
|
||||
#define HAS_EDRAM(dev) (!!(__I915__(dev)->edram_cap & EDRAM_ENABLED))
|
||||
#define HAS_WT(dev) ((IS_HASWELL(dev) || IS_BROADWELL(dev)) && \
|
||||
HAS_EDRAM(dev))
|
||||
#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
|
||||
|
@ -2783,9 +2813,10 @@ struct drm_i915_cmd_table {
|
|||
#define HAS_BROKEN_CS_TLB(dev) (IS_I830(dev) || IS_845G(dev))
|
||||
|
||||
/* WaRsDisableCoarsePowerGating:skl,bxt */
|
||||
#define NEEDS_WaRsDisableCoarsePowerGating(dev) (IS_BXT_REVID(dev, 0, BXT_REVID_A1) || \
|
||||
IS_SKL_GT3(dev) || \
|
||||
IS_SKL_GT4(dev))
|
||||
#define NEEDS_WaRsDisableCoarsePowerGating(dev_priv) \
|
||||
(IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1) || \
|
||||
IS_SKL_GT3(dev_priv) || \
|
||||
IS_SKL_GT4(dev_priv))
|
||||
|
||||
/*
|
||||
* dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts
|
||||
|
@ -2832,7 +2863,7 @@ struct drm_i915_cmd_table {
|
|||
* command submission once loaded. But these are logically independent
|
||||
* properties, so we have separate macros to test them.
|
||||
*/
|
||||
#define HAS_GUC(dev) (IS_GEN9(dev) && !IS_KABYLAKE(dev))
|
||||
#define HAS_GUC(dev) (IS_GEN9(dev))
|
||||
#define HAS_GUC_UCODE(dev) (HAS_GUC(dev))
|
||||
#define HAS_GUC_SCHED(dev) (HAS_GUC(dev))
|
||||
|
||||
|
@ -2853,11 +2884,13 @@ struct drm_i915_cmd_table {
|
|||
#define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00
|
||||
#define INTEL_PCH_SPT_DEVICE_ID_TYPE 0xA100
|
||||
#define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE 0x9D00
|
||||
#define INTEL_PCH_KBP_DEVICE_ID_TYPE 0xA200
|
||||
#define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100
|
||||
#define INTEL_PCH_P3X_DEVICE_ID_TYPE 0x7000
|
||||
#define INTEL_PCH_QEMU_DEVICE_ID_TYPE 0x2900 /* qemu q35 has 2918 */
|
||||
|
||||
#define INTEL_PCH_TYPE(dev) (__I915__(dev)->pch_type)
|
||||
#define HAS_PCH_KBP(dev) (INTEL_PCH_TYPE(dev) == PCH_KBP)
|
||||
#define HAS_PCH_SPT(dev) (INTEL_PCH_TYPE(dev) == PCH_SPT)
|
||||
#define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
|
||||
#define HAS_PCH_LPT_LP(dev) (__I915__(dev)->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE)
|
||||
|
@ -2879,8 +2912,14 @@ struct drm_i915_cmd_table {
|
|||
|
||||
#include "i915_trace.h"
|
||||
|
||||
extern const struct drm_ioctl_desc i915_ioctls[];
|
||||
extern int i915_max_ioctl;
|
||||
static inline bool intel_scanout_needs_vtd_wa(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
#ifdef CONFIG_INTEL_IOMMU
|
||||
if (INTEL_GEN(dev_priv) >= 6 && intel_iommu_gfx_mapped)
|
||||
return true;
|
||||
#endif
|
||||
return false;
|
||||
}
|
||||
|
||||
extern int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state);
|
||||
extern int i915_resume_switcheroo(struct drm_device *dev);
|
||||
|
@ -2888,7 +2927,7 @@ extern int i915_resume_switcheroo(struct drm_device *dev);
|
|||
int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
|
||||
int enable_ppgtt);
|
||||
|
||||
/* i915_dma.c */
|
||||
/* i915_drv.c */
|
||||
void __printf(3, 4)
|
||||
__i915_printk(struct drm_i915_private *dev_priv, const char *level,
|
||||
const char *fmt, ...);
|
||||
|
@ -2896,14 +2935,6 @@ __i915_printk(struct drm_i915_private *dev_priv, const char *level,
|
|||
#define i915_report_error(dev_priv, fmt, ...) \
|
||||
__i915_printk(dev_priv, KERN_ERR, fmt, ##__VA_ARGS__)
|
||||
|
||||
extern int i915_driver_load(struct drm_device *, unsigned long flags);
|
||||
extern int i915_driver_unload(struct drm_device *);
|
||||
extern int i915_driver_open(struct drm_device *dev, struct drm_file *file);
|
||||
extern void i915_driver_lastclose(struct drm_device * dev);
|
||||
extern void i915_driver_preclose(struct drm_device *dev,
|
||||
struct drm_file *file);
|
||||
extern void i915_driver_postclose(struct drm_device *dev,
|
||||
struct drm_file *file);
|
||||
#ifdef CONFIG_COMPAT
|
||||
extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
|
||||
unsigned long arg);
|
||||
|
@ -2928,7 +2959,23 @@ void intel_hpd_cancel_work(struct drm_i915_private *dev_priv);
|
|||
bool intel_hpd_pin_to_port(enum hpd_pin pin, enum port *port);
|
||||
|
||||
/* i915_irq.c */
|
||||
void i915_queue_hangcheck(struct drm_i915_private *dev_priv);
|
||||
static inline void i915_queue_hangcheck(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
unsigned long delay;
|
||||
|
||||
if (unlikely(!i915.enable_hangcheck))
|
||||
return;
|
||||
|
||||
/* Don't continually defer the hangcheck so that it is always run at
|
||||
* least once after work has been scheduled on any ring. Otherwise,
|
||||
* we will ignore a hung ring if a second ring is kept busy.
|
||||
*/
|
||||
|
||||
delay = round_jiffies_up_relative(DRM_I915_HANGCHECK_JIFFIES);
|
||||
queue_delayed_work(system_long_wq,
|
||||
&dev_priv->gpu_error.hangcheck_work, delay);
|
||||
}
|
||||
|
||||
__printf(3, 4)
|
||||
void i915_handle_error(struct drm_i915_private *dev_priv,
|
||||
u32 engine_mask,
|
||||
|
@ -2963,6 +3010,17 @@ u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv);
|
|||
|
||||
void assert_forcewakes_inactive(struct drm_i915_private *dev_priv);
|
||||
|
||||
int intel_wait_for_register(struct drm_i915_private *dev_priv,
|
||||
i915_reg_t reg,
|
||||
const u32 mask,
|
||||
const u32 value,
|
||||
const unsigned long timeout_ms);
|
||||
int intel_wait_for_register_fw(struct drm_i915_private *dev_priv,
|
||||
i915_reg_t reg,
|
||||
const u32 mask,
|
||||
const u32 value,
|
||||
const unsigned long timeout_ms);
|
||||
|
||||
static inline bool intel_gvt_active(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
return dev_priv->gvt.initialized;
|
||||
|
@ -3027,7 +3085,6 @@ ibx_disable_display_interrupt(struct drm_i915_private *dev_priv, uint32_t bits)
|
|||
ibx_display_interrupt_update(dev_priv, bits, 0);
|
||||
}
|
||||
|
||||
|
||||
/* i915_gem.c */
|
||||
int i915_gem_create_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
|
@ -3244,31 +3301,34 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2)
|
|||
return (int32_t)(seq1 - seq2) >= 0;
|
||||
}
|
||||
|
||||
static inline bool i915_gem_request_started(struct drm_i915_gem_request *req,
|
||||
bool lazy_coherency)
|
||||
static inline bool i915_gem_request_started(const struct drm_i915_gem_request *req)
|
||||
{
|
||||
if (!lazy_coherency && req->engine->irq_seqno_barrier)
|
||||
req->engine->irq_seqno_barrier(req->engine);
|
||||
return i915_seqno_passed(req->engine->get_seqno(req->engine),
|
||||
return i915_seqno_passed(intel_engine_get_seqno(req->engine),
|
||||
req->previous_seqno);
|
||||
}
|
||||
|
||||
static inline bool i915_gem_request_completed(struct drm_i915_gem_request *req,
|
||||
bool lazy_coherency)
|
||||
static inline bool i915_gem_request_completed(const struct drm_i915_gem_request *req)
|
||||
{
|
||||
if (!lazy_coherency && req->engine->irq_seqno_barrier)
|
||||
req->engine->irq_seqno_barrier(req->engine);
|
||||
return i915_seqno_passed(req->engine->get_seqno(req->engine),
|
||||
return i915_seqno_passed(intel_engine_get_seqno(req->engine),
|
||||
req->seqno);
|
||||
}
|
||||
|
||||
bool __i915_spin_request(const struct drm_i915_gem_request *request,
|
||||
int state, unsigned long timeout_us);
|
||||
static inline bool i915_spin_request(const struct drm_i915_gem_request *request,
|
||||
int state, unsigned long timeout_us)
|
||||
{
|
||||
return (i915_gem_request_started(request) &&
|
||||
__i915_spin_request(request, state, timeout_us));
|
||||
}
|
||||
|
||||
int __must_check i915_gem_get_seqno(struct drm_i915_private *dev_priv, u32 *seqno);
|
||||
int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno);
|
||||
|
||||
struct drm_i915_gem_request *
|
||||
i915_gem_find_active_request(struct intel_engine_cs *engine);
|
||||
|
||||
bool i915_gem_retire_requests(struct drm_i915_private *dev_priv);
|
||||
void i915_gem_retire_requests(struct drm_i915_private *dev_priv);
|
||||
void i915_gem_retire_requests_ring(struct intel_engine_cs *engine);
|
||||
|
||||
static inline u32 i915_reset_counter(struct i915_gpu_error *error)
|
||||
|
@ -3311,18 +3371,6 @@ static inline u32 i915_reset_count(struct i915_gpu_error *error)
|
|||
return ((i915_reset_counter(error) & ~I915_WEDGED) + 1) / 2;
|
||||
}
|
||||
|
||||
static inline bool i915_stop_ring_allow_ban(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
return dev_priv->gpu_error.stop_rings == 0 ||
|
||||
dev_priv->gpu_error.stop_rings & I915_STOP_RING_ALLOW_BAN;
|
||||
}
|
||||
|
||||
static inline bool i915_stop_ring_allow_warn(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
return dev_priv->gpu_error.stop_rings == 0 ||
|
||||
dev_priv->gpu_error.stop_rings & I915_STOP_RING_ALLOW_WARN;
|
||||
}
|
||||
|
||||
void i915_gem_reset(struct drm_device *dev);
|
||||
bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
|
||||
int __must_check i915_gem_init(struct drm_device *dev);
|
||||
|
@ -3330,7 +3378,7 @@ int i915_gem_init_engines(struct drm_device *dev);
|
|||
int __must_check i915_gem_init_hw(struct drm_device *dev);
|
||||
void i915_gem_init_swizzling(struct drm_device *dev);
|
||||
void i915_gem_cleanup_engines(struct drm_device *dev);
|
||||
int __must_check i915_gpu_idle(struct drm_device *dev);
|
||||
int __must_check i915_gem_wait_for_idle(struct drm_i915_private *dev_priv);
|
||||
int __must_check i915_gem_suspend(struct drm_device *dev);
|
||||
void __i915_add_request(struct drm_i915_gem_request *req,
|
||||
struct drm_i915_gem_object *batch_obj,
|
||||
|
@ -3484,7 +3532,7 @@ i915_gem_context_lookup(struct drm_i915_file_private *file_priv, u32 id)
|
|||
{
|
||||
struct i915_gem_context *ctx;
|
||||
|
||||
lockdep_assert_held(&file_priv->dev_priv->dev->struct_mutex);
|
||||
lockdep_assert_held(&file_priv->dev_priv->drm.struct_mutex);
|
||||
|
||||
ctx = idr_find(&file_priv->context_idr, id);
|
||||
if (!ctx)
|
||||
|
@ -3500,7 +3548,7 @@ static inline void i915_gem_context_reference(struct i915_gem_context *ctx)
|
|||
|
||||
static inline void i915_gem_context_unreference(struct i915_gem_context *ctx)
|
||||
{
|
||||
lockdep_assert_held(&ctx->i915->dev->struct_mutex);
|
||||
lockdep_assert_held(&ctx->i915->drm.struct_mutex);
|
||||
kref_put(&ctx->ref, i915_gem_context_free);
|
||||
}
|
||||
|
||||
|
@ -3576,7 +3624,7 @@ void i915_gem_shrinker_cleanup(struct drm_i915_private *dev_priv);
|
|||
/* i915_gem_tiling.c */
|
||||
static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
|
||||
|
||||
return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
|
||||
obj->tiling_mode != I915_TILING_NONE;
|
||||
|
@ -3590,12 +3638,14 @@ int i915_verify_lists(struct drm_device *dev);
|
|||
#endif
|
||||
|
||||
/* i915_debugfs.c */
|
||||
int i915_debugfs_init(struct drm_minor *minor);
|
||||
void i915_debugfs_cleanup(struct drm_minor *minor);
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
int i915_debugfs_register(struct drm_i915_private *dev_priv);
|
||||
void i915_debugfs_unregister(struct drm_i915_private *dev_priv);
|
||||
int i915_debugfs_connector_add(struct drm_connector *connector);
|
||||
void intel_display_crc_init(struct drm_device *dev);
|
||||
#else
|
||||
static inline int i915_debugfs_register(struct drm_i915_private *) {return 0;}
|
||||
static inline void i915_debugfs_unregister(struct drm_i915_private *) {}
|
||||
static inline int i915_debugfs_connector_add(struct drm_connector *connector)
|
||||
{ return 0; }
|
||||
static inline void intel_display_crc_init(struct drm_device *dev) {}
|
||||
|
@ -3686,8 +3736,8 @@ extern int intel_opregion_notify_adapter(struct drm_i915_private *dev_priv,
|
|||
extern int intel_opregion_get_panel_type(struct drm_i915_private *dev_priv);
|
||||
#else
|
||||
static inline int intel_opregion_setup(struct drm_i915_private *dev) { return 0; }
|
||||
static inline void intel_opregion_init(struct drm_i915_private *dev) { }
|
||||
static inline void intel_opregion_fini(struct drm_i915_private *dev) { }
|
||||
static inline void intel_opregion_register(struct drm_i915_private *dev_priv) { }
|
||||
static inline void intel_opregion_unregister(struct drm_i915_private *dev_priv) { }
|
||||
static inline void intel_opregion_asle_intr(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
}
|
||||
|
@ -3716,11 +3766,22 @@ static inline void intel_register_dsm_handler(void) { return; }
|
|||
static inline void intel_unregister_dsm_handler(void) { return; }
|
||||
#endif /* CONFIG_ACPI */
|
||||
|
||||
/* intel_device_info.c */
|
||||
static inline struct intel_device_info *
|
||||
mkwrite_device_info(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
return (struct intel_device_info *)&dev_priv->info;
|
||||
}
|
||||
|
||||
void intel_device_info_runtime_init(struct drm_i915_private *dev_priv);
|
||||
void intel_device_info_dump(struct drm_i915_private *dev_priv);
|
||||
|
||||
/* modesetting */
|
||||
extern void intel_modeset_init_hw(struct drm_device *dev);
|
||||
extern void intel_modeset_init(struct drm_device *dev);
|
||||
extern void intel_modeset_gem_init(struct drm_device *dev);
|
||||
extern void intel_modeset_cleanup(struct drm_device *dev);
|
||||
extern int intel_connector_register(struct drm_connector *);
|
||||
extern void intel_connector_unregister(struct drm_connector *);
|
||||
extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
|
||||
extern void intel_display_resume(struct drm_device *dev);
|
||||
|
@ -3731,7 +3792,6 @@ extern void intel_init_pch_refclk(struct drm_device *dev);
|
|||
extern void intel_set_rps(struct drm_i915_private *dev_priv, u8 val);
|
||||
extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv,
|
||||
bool enable);
|
||||
extern void intel_detect_pch(struct drm_device *dev);
|
||||
|
||||
extern bool i915_semaphore_is_enabled(struct drm_i915_private *dev_priv);
|
||||
int i915_reg_read_ioctl(struct drm_device *dev, void *data,
|
||||
|
@ -3864,6 +3924,7 @@ __raw_write(64, q)
|
|||
*/
|
||||
#define I915_READ_FW(reg__) __raw_i915_read32(dev_priv, (reg__))
|
||||
#define I915_WRITE_FW(reg__, val__) __raw_i915_write32(dev_priv, (reg__), (val__))
|
||||
#define I915_WRITE64_FW(reg__, val__) __raw_i915_write64(dev_priv, (reg__), (val__))
|
||||
#define POSTING_READ_FW(reg__) (void)I915_READ_FW(reg__)
|
||||
|
||||
/* "Broadcast RGB" property */
|
||||
|
@ -3927,12 +3988,80 @@ wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms)
|
|||
schedule_timeout_uninterruptible(remaining_jiffies);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void i915_trace_irq_get(struct intel_engine_cs *engine,
|
||||
struct drm_i915_gem_request *req)
|
||||
static inline bool __i915_request_irq_complete(struct drm_i915_gem_request *req)
|
||||
{
|
||||
if (engine->trace_irq_req == NULL && engine->irq_get(engine))
|
||||
i915_gem_request_assign(&engine->trace_irq_req, req);
|
||||
struct intel_engine_cs *engine = req->engine;
|
||||
|
||||
/* Before we do the heavier coherent read of the seqno,
|
||||
* check the value (hopefully) in the CPU cacheline.
|
||||
*/
|
||||
if (i915_gem_request_completed(req))
|
||||
return true;
|
||||
|
||||
/* Ensure our read of the seqno is coherent so that we
|
||||
* do not "miss an interrupt" (i.e. if this is the last
|
||||
* request and the seqno write from the GPU is not visible
|
||||
* by the time the interrupt fires, we will see that the
|
||||
* request is incomplete and go back to sleep awaiting
|
||||
* another interrupt that will never come.)
|
||||
*
|
||||
* Strictly, we only need to do this once after an interrupt,
|
||||
* but it is easier and safer to do it every time the waiter
|
||||
* is woken.
|
||||
*/
|
||||
if (engine->irq_seqno_barrier &&
|
||||
READ_ONCE(engine->breadcrumbs.irq_seqno_bh) == current &&
|
||||
cmpxchg_relaxed(&engine->breadcrumbs.irq_posted, 1, 0)) {
|
||||
struct task_struct *tsk;
|
||||
|
||||
/* The ordering of irq_posted versus applying the barrier
|
||||
* is crucial. The clearing of the current irq_posted must
|
||||
* be visible before we perform the barrier operation,
|
||||
* such that if a subsequent interrupt arrives, irq_posted
|
||||
* is reasserted and our task rewoken (which causes us to
|
||||
* do another __i915_request_irq_complete() immediately
|
||||
* and reapply the barrier). Conversely, if the clear
|
||||
* occurs after the barrier, then an interrupt that arrived
|
||||
* whilst we waited on the barrier would not trigger a
|
||||
* barrier on the next pass, and the read may not see the
|
||||
* seqno update.
|
||||
*/
|
||||
engine->irq_seqno_barrier(engine);
|
||||
|
||||
/* If we consume the irq, but we are no longer the bottom-half,
|
||||
* the real bottom-half may not have serialised their own
|
||||
* seqno check with the irq-barrier (i.e. may have inspected
|
||||
* the seqno before we believe it coherent since they see
|
||||
* irq_posted == false but we are still running).
|
||||
*/
|
||||
rcu_read_lock();
|
||||
tsk = READ_ONCE(engine->breadcrumbs.irq_seqno_bh);
|
||||
if (tsk && tsk != current)
|
||||
/* Note that if the bottom-half is changed as we
|
||||
* are sending the wake-up, the new bottom-half will
|
||||
* be woken by whomever made the change. We only have
|
||||
* to worry about when we steal the irq-posted for
|
||||
* ourself.
|
||||
*/
|
||||
wake_up_process(tsk);
|
||||
rcu_read_unlock();
|
||||
|
||||
if (i915_gem_request_completed(req))
|
||||
return true;
|
||||
}
|
||||
|
||||
/* We need to check whether any gpu reset happened in between
|
||||
* the request being submitted and now. If a reset has occurred,
|
||||
* the seqno will have been advance past ours and our request
|
||||
* is complete. If we are in the process of handling a reset,
|
||||
* the request is effectively complete as the rendering will
|
||||
* be discarded, but we need to return in order to drop the
|
||||
* struct_mutex.
|
||||
*/
|
||||
if (i915_reset_in_progress(&req->i915->gpu_error))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -154,7 +154,7 @@ void i915_gem_context_free(struct kref *ctx_ref)
|
|||
struct i915_gem_context *ctx = container_of(ctx_ref, typeof(*ctx), ref);
|
||||
int i;
|
||||
|
||||
lockdep_assert_held(&ctx->i915->dev->struct_mutex);
|
||||
lockdep_assert_held(&ctx->i915->drm.struct_mutex);
|
||||
trace_i915_context_free(ctx);
|
||||
|
||||
/*
|
||||
|
@ -250,7 +250,7 @@ static struct i915_gem_context *
|
|||
__create_hw_context(struct drm_device *dev,
|
||||
struct drm_i915_file_private *file_priv)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct i915_gem_context *ctx;
|
||||
int ret;
|
||||
|
||||
|
@ -268,6 +268,8 @@ __create_hw_context(struct drm_device *dev,
|
|||
list_add_tail(&ctx->link, &dev_priv->context_list);
|
||||
ctx->i915 = dev_priv;
|
||||
|
||||
ctx->ggtt_alignment = get_context_alignment(dev_priv);
|
||||
|
||||
if (dev_priv->hw_context_size) {
|
||||
struct drm_i915_gem_object *obj =
|
||||
i915_gem_alloc_context_obj(dev, dev_priv->hw_context_size);
|
||||
|
@ -394,7 +396,7 @@ static void i915_gem_context_unpin(struct i915_gem_context *ctx,
|
|||
|
||||
void i915_gem_context_reset(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
|
||||
lockdep_assert_held(&dev->struct_mutex);
|
||||
|
||||
|
@ -410,7 +412,7 @@ void i915_gem_context_reset(struct drm_device *dev)
|
|||
|
||||
int i915_gem_context_init(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct i915_gem_context *ctx;
|
||||
|
||||
/* Init should only be called once per module load. Eventually the
|
||||
|
@ -451,26 +453,6 @@ int i915_gem_context_init(struct drm_device *dev)
|
|||
return PTR_ERR(ctx);
|
||||
}
|
||||
|
||||
if (!i915.enable_execlists && ctx->engine[RCS].state) {
|
||||
int ret;
|
||||
|
||||
/* We may need to do things with the shrinker which
|
||||
* require us to immediately switch back to the default
|
||||
* context. This can cause a problem as pinning the
|
||||
* default context also requires GTT space which may not
|
||||
* be available. To avoid this we always pin the default
|
||||
* context.
|
||||
*/
|
||||
ret = i915_gem_obj_ggtt_pin(ctx->engine[RCS].state,
|
||||
get_context_alignment(dev_priv), 0);
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed to pinned default global context (error %d)\n",
|
||||
ret);
|
||||
i915_gem_context_unreference(ctx);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
dev_priv->kernel_context = ctx;
|
||||
|
||||
DRM_DEBUG_DRIVER("%s context support initialized\n",
|
||||
|
@ -483,33 +465,45 @@ void i915_gem_context_lost(struct drm_i915_private *dev_priv)
|
|||
{
|
||||
struct intel_engine_cs *engine;
|
||||
|
||||
lockdep_assert_held(&dev_priv->dev->struct_mutex);
|
||||
lockdep_assert_held(&dev_priv->drm.struct_mutex);
|
||||
|
||||
for_each_engine(engine, dev_priv) {
|
||||
if (engine->last_context) {
|
||||
i915_gem_context_unpin(engine->last_context, engine);
|
||||
engine->last_context = NULL;
|
||||
}
|
||||
|
||||
/* Force the GPU state to be reinitialised on enabling */
|
||||
dev_priv->kernel_context->engine[engine->id].initialised =
|
||||
engine->init_context == NULL;
|
||||
}
|
||||
|
||||
/* Force the GPU state to be reinitialised on enabling */
|
||||
dev_priv->kernel_context->remap_slice = ALL_L3_SLICES(dev_priv);
|
||||
/* Force the GPU state to be restored on enabling */
|
||||
if (!i915.enable_execlists) {
|
||||
struct i915_gem_context *ctx;
|
||||
|
||||
list_for_each_entry(ctx, &dev_priv->context_list, link) {
|
||||
if (!i915_gem_context_is_default(ctx))
|
||||
continue;
|
||||
|
||||
for_each_engine(engine, dev_priv)
|
||||
ctx->engine[engine->id].initialised = false;
|
||||
|
||||
ctx->remap_slice = ALL_L3_SLICES(dev_priv);
|
||||
}
|
||||
|
||||
for_each_engine(engine, dev_priv) {
|
||||
struct intel_context *kce =
|
||||
&dev_priv->kernel_context->engine[engine->id];
|
||||
|
||||
kce->initialised = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void i915_gem_context_fini(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct i915_gem_context *dctx = dev_priv->kernel_context;
|
||||
|
||||
lockdep_assert_held(&dev->struct_mutex);
|
||||
|
||||
if (!i915.enable_execlists && dctx->engine[RCS].state)
|
||||
i915_gem_object_ggtt_unpin(dctx->engine[RCS].state);
|
||||
|
||||
i915_gem_context_unreference(dctx);
|
||||
dev_priv->kernel_context = NULL;
|
||||
|
||||
|
@ -759,7 +753,7 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
|
|||
|
||||
/* Trying to pin first makes error handling easier. */
|
||||
ret = i915_gem_obj_ggtt_pin(to->engine[RCS].state,
|
||||
get_context_alignment(engine->i915),
|
||||
to->ggtt_alignment,
|
||||
0);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -901,7 +895,7 @@ int i915_switch_context(struct drm_i915_gem_request *req)
|
|||
struct intel_engine_cs *engine = req->engine;
|
||||
|
||||
WARN_ON(i915.enable_execlists);
|
||||
lockdep_assert_held(&req->i915->dev->struct_mutex);
|
||||
lockdep_assert_held(&req->i915->drm.struct_mutex);
|
||||
|
||||
if (!req->ctx->engine[engine->id].state) {
|
||||
struct i915_gem_context *to = req->ctx;
|
||||
|
@ -1032,6 +1026,9 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
|
|||
else
|
||||
args->value = to_i915(dev)->ggtt.base.total;
|
||||
break;
|
||||
case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
|
||||
args->value = !!(ctx->flags & CONTEXT_NO_ERROR_CAPTURE);
|
||||
break;
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
|
@ -1077,6 +1074,16 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
|
|||
ctx->flags |= args->value ? CONTEXT_NO_ZEROMAP : 0;
|
||||
}
|
||||
break;
|
||||
case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
|
||||
if (args->size) {
|
||||
ret = -EINVAL;
|
||||
} else {
|
||||
if (args->value)
|
||||
ctx->flags |= CONTEXT_NO_ERROR_CAPTURE;
|
||||
else
|
||||
ctx->flags &= ~CONTEXT_NO_ERROR_CAPTURE;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
|
@ -1089,7 +1096,7 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
|
|||
int i915_gem_context_reset_stats_ioctl(struct drm_device *dev,
|
||||
void *data, struct drm_file *file)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct drm_i915_reset_stats *args = data;
|
||||
struct i915_ctx_hang_stats *hs;
|
||||
struct i915_gem_context *ctx;
|
||||
|
|
|
@ -33,6 +33,37 @@
|
|||
#include "intel_drv.h"
|
||||
#include "i915_trace.h"
|
||||
|
||||
static int switch_to_pinned_context(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct intel_engine_cs *engine;
|
||||
|
||||
if (i915.enable_execlists)
|
||||
return 0;
|
||||
|
||||
for_each_engine(engine, dev_priv) {
|
||||
struct drm_i915_gem_request *req;
|
||||
int ret;
|
||||
|
||||
if (engine->last_context == NULL)
|
||||
continue;
|
||||
|
||||
if (engine->last_context == dev_priv->kernel_context)
|
||||
continue;
|
||||
|
||||
req = i915_gem_request_alloc(engine, dev_priv->kernel_context);
|
||||
if (IS_ERR(req))
|
||||
return PTR_ERR(req);
|
||||
|
||||
ret = i915_switch_context(req);
|
||||
i915_add_request_no_flush(req);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static bool
|
||||
mark_free(struct i915_vma *vma, struct list_head *unwind)
|
||||
{
|
||||
|
@ -150,11 +181,19 @@ none:
|
|||
|
||||
/* Only idle the GPU and repeat the search once */
|
||||
if (pass++ == 0) {
|
||||
ret = i915_gpu_idle(dev);
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
|
||||
if (i915_is_ggtt(vm)) {
|
||||
ret = switch_to_pinned_context(dev_priv);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = i915_gem_wait_for_idle(dev_priv);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
i915_gem_retire_requests(to_i915(dev));
|
||||
i915_gem_retire_requests(dev_priv);
|
||||
goto search_again;
|
||||
}
|
||||
|
||||
|
@ -261,11 +300,19 @@ int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
|
|||
trace_i915_gem_evict_vm(vm);
|
||||
|
||||
if (do_idle) {
|
||||
ret = i915_gpu_idle(vm->dev);
|
||||
struct drm_i915_private *dev_priv = to_i915(vm->dev);
|
||||
|
||||
if (i915_is_ggtt(vm)) {
|
||||
ret = switch_to_pinned_context(dev_priv);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = i915_gem_wait_for_idle(dev_priv);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
i915_gem_retire_requests(to_i915(vm->dev));
|
||||
i915_gem_retire_requests(dev_priv);
|
||||
|
||||
WARN_ON(!list_empty(&vm->active_list));
|
||||
}
|
||||
|
|
|
@ -1142,7 +1142,7 @@ i915_reset_gen7_sol_offsets(struct drm_device *dev,
|
|||
struct drm_i915_gem_request *req)
|
||||
{
|
||||
struct intel_engine_cs *engine = req->engine;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
int ret, i;
|
||||
|
||||
if (!IS_GEN7(dev) || engine != &dev_priv->engine[RCS]) {
|
||||
|
@ -1225,7 +1225,7 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
|
|||
{
|
||||
struct drm_device *dev = params->dev;
|
||||
struct intel_engine_cs *engine = params->engine;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
u64 exec_start, exec_len;
|
||||
int instp_mode;
|
||||
u32 instp_mask;
|
||||
|
@ -1328,10 +1328,10 @@ gen8_dispatch_bsd_ring(struct drm_i915_private *dev_priv, struct drm_file *file)
|
|||
/* Check whether the file_priv has already selected one ring. */
|
||||
if ((int)file_priv->bsd_ring < 0) {
|
||||
/* If not, use the ping-pong mechanism to select one. */
|
||||
mutex_lock(&dev_priv->dev->struct_mutex);
|
||||
mutex_lock(&dev_priv->drm.struct_mutex);
|
||||
file_priv->bsd_ring = dev_priv->mm.bsd_ring_dispatch_index;
|
||||
dev_priv->mm.bsd_ring_dispatch_index ^= 1;
|
||||
mutex_unlock(&dev_priv->dev->struct_mutex);
|
||||
mutex_unlock(&dev_priv->drm.struct_mutex);
|
||||
}
|
||||
|
||||
return file_priv->bsd_ring;
|
||||
|
@ -1477,6 +1477,12 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
|||
dispatch_flags |= I915_DISPATCH_RS;
|
||||
}
|
||||
|
||||
/* Take a local wakeref for preparing to dispatch the execbuf as
|
||||
* we expect to access the hardware fairly frequently in the
|
||||
* process. Upon first dispatch, we acquire another prolonged
|
||||
* wakeref that we hold until the GPU has been idle for at least
|
||||
* 100ms.
|
||||
*/
|
||||
intel_runtime_pm_get(dev_priv);
|
||||
|
||||
ret = i915_mutex_lock_interruptible(dev);
|
||||
|
|
|
@ -58,7 +58,7 @@
|
|||
static void i965_write_fence_reg(struct drm_device *dev, int reg,
|
||||
struct drm_i915_gem_object *obj)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
i915_reg_t fence_reg_lo, fence_reg_hi;
|
||||
int fence_pitch_shift;
|
||||
|
||||
|
@ -117,7 +117,7 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg,
|
|||
static void i915_write_fence_reg(struct drm_device *dev, int reg,
|
||||
struct drm_i915_gem_object *obj)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
u32 val;
|
||||
|
||||
if (obj) {
|
||||
|
@ -156,7 +156,7 @@ static void i915_write_fence_reg(struct drm_device *dev, int reg,
|
|||
static void i830_write_fence_reg(struct drm_device *dev, int reg,
|
||||
struct drm_i915_gem_object *obj)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
uint32_t val;
|
||||
|
||||
if (obj) {
|
||||
|
@ -193,7 +193,7 @@ inline static bool i915_gem_object_needs_mb(struct drm_i915_gem_object *obj)
|
|||
static void i915_gem_write_fence(struct drm_device *dev, int reg,
|
||||
struct drm_i915_gem_object *obj)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
|
||||
/* Ensure that all CPU reads are completed before installing a fence
|
||||
* and all writes before removing the fence.
|
||||
|
@ -229,7 +229,7 @@ static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
|
|||
struct drm_i915_fence_reg *fence,
|
||||
bool enable)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
|
||||
int reg = fence_number(dev_priv, fence);
|
||||
|
||||
i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
|
||||
|
@ -286,7 +286,7 @@ i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
|
|||
int
|
||||
i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
|
||||
struct drm_i915_fence_reg *fence;
|
||||
int ret;
|
||||
|
||||
|
@ -311,7 +311,7 @@ i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
|
|||
static struct drm_i915_fence_reg *
|
||||
i915_find_fence_reg(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct drm_i915_fence_reg *reg, *avail;
|
||||
int i;
|
||||
|
||||
|
@ -367,7 +367,7 @@ int
|
|||
i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
struct drm_device *dev = obj->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
bool enable = obj->tiling_mode != I915_TILING_NONE;
|
||||
struct drm_i915_fence_reg *reg;
|
||||
int ret;
|
||||
|
@ -433,7 +433,7 @@ bool
|
|||
i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
if (obj->fence_reg != I915_FENCE_REG_NONE) {
|
||||
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
|
||||
struct i915_vma *ggtt_vma = i915_gem_obj_to_ggtt(obj);
|
||||
|
||||
WARN_ON(!ggtt_vma ||
|
||||
|
@ -457,7 +457,7 @@ void
|
|||
i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
if (obj->fence_reg != I915_FENCE_REG_NONE) {
|
||||
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
|
||||
WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count <= 0);
|
||||
dev_priv->fence_regs[obj->fence_reg].pin_count--;
|
||||
}
|
||||
|
@ -472,7 +472,7 @@ i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
|
|||
*/
|
||||
void i915_gem_restore_fences(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < dev_priv->num_fence_regs; i++) {
|
||||
|
@ -549,7 +549,7 @@ void i915_gem_restore_fences(struct drm_device *dev)
|
|||
void
|
||||
i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
|
||||
uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
|
||||
|
||||
|
|
|
@ -153,7 +153,7 @@ int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
|
|||
#endif
|
||||
|
||||
/* Early VLV doesn't have this */
|
||||
if (IS_VALLEYVIEW(dev_priv) && dev_priv->dev->pdev->revision < 0xb) {
|
||||
if (IS_VALLEYVIEW(dev_priv) && dev_priv->drm.pdev->revision < 0xb) {
|
||||
DRM_DEBUG_DRIVER("disabling PPGTT on pre-B3 step VLV\n");
|
||||
return 0;
|
||||
}
|
||||
|
@ -1570,13 +1570,13 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
|
|||
struct i915_page_table *unused;
|
||||
gen6_pte_t scratch_pte;
|
||||
uint32_t pd_entry;
|
||||
uint32_t pte, pde, temp;
|
||||
uint32_t pte, pde;
|
||||
uint32_t start = ppgtt->base.start, length = ppgtt->base.total;
|
||||
|
||||
scratch_pte = vm->pte_encode(px_dma(vm->scratch_page),
|
||||
I915_CACHE_LLC, true, 0);
|
||||
|
||||
gen6_for_each_pde(unused, &ppgtt->pd, start, length, temp, pde) {
|
||||
gen6_for_each_pde(unused, &ppgtt->pd, start, length, pde) {
|
||||
u32 expected;
|
||||
gen6_pte_t *pt_vaddr;
|
||||
const dma_addr_t pt_addr = px_dma(ppgtt->pd.page_table[pde]);
|
||||
|
@ -1640,9 +1640,9 @@ static void gen6_write_page_range(struct drm_i915_private *dev_priv,
|
|||
{
|
||||
struct i915_ggtt *ggtt = &dev_priv->ggtt;
|
||||
struct i915_page_table *pt;
|
||||
uint32_t pde, temp;
|
||||
uint32_t pde;
|
||||
|
||||
gen6_for_each_pde(pt, pd, start, length, temp, pde)
|
||||
gen6_for_each_pde(pt, pd, start, length, pde)
|
||||
gen6_write_pde(pd, pde, pt);
|
||||
|
||||
/* Make sure write is complete before other code can use this page
|
||||
|
@ -1683,17 +1683,6 @@ static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int vgpu_mm_switch(struct i915_hw_ppgtt *ppgtt,
|
||||
struct drm_i915_gem_request *req)
|
||||
{
|
||||
struct intel_engine_cs *engine = req->engine;
|
||||
struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev);
|
||||
|
||||
I915_WRITE(RING_PP_DIR_DCLV(engine), PP_DIR_DCLV_2G);
|
||||
I915_WRITE(RING_PP_DIR_BASE(engine), get_pd_offset(ppgtt));
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
|
||||
struct drm_i915_gem_request *req)
|
||||
{
|
||||
|
@ -1731,21 +1720,16 @@ static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt,
|
|||
struct drm_i915_gem_request *req)
|
||||
{
|
||||
struct intel_engine_cs *engine = req->engine;
|
||||
struct drm_device *dev = ppgtt->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
struct drm_i915_private *dev_priv = req->i915;
|
||||
|
||||
I915_WRITE(RING_PP_DIR_DCLV(engine), PP_DIR_DCLV_2G);
|
||||
I915_WRITE(RING_PP_DIR_BASE(engine), get_pd_offset(ppgtt));
|
||||
|
||||
POSTING_READ(RING_PP_DIR_DCLV(engine));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void gen8_ppgtt_enable(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_engine_cs *engine;
|
||||
|
||||
for_each_engine(engine, dev_priv) {
|
||||
|
@ -1757,7 +1741,7 @@ static void gen8_ppgtt_enable(struct drm_device *dev)
|
|||
|
||||
static void gen7_ppgtt_enable(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_engine_cs *engine;
|
||||
uint32_t ecochk, ecobits;
|
||||
|
||||
|
@ -1782,7 +1766,7 @@ static void gen7_ppgtt_enable(struct drm_device *dev)
|
|||
|
||||
static void gen6_ppgtt_enable(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
uint32_t ecochk, gab_ctl, ecobits;
|
||||
|
||||
ecobits = I915_READ(GAC_ECO_BITS);
|
||||
|
@ -1875,7 +1859,7 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
|
|||
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
|
||||
struct i915_page_table *pt;
|
||||
uint32_t start, length, start_save, length_save;
|
||||
uint32_t pde, temp;
|
||||
uint32_t pde;
|
||||
int ret;
|
||||
|
||||
if (WARN_ON(start_in + length_in > ppgtt->base.total))
|
||||
|
@ -1891,7 +1875,7 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
|
|||
* need allocation. The second stage marks use ptes within the page
|
||||
* tables.
|
||||
*/
|
||||
gen6_for_each_pde(pt, &ppgtt->pd, start, length, temp, pde) {
|
||||
gen6_for_each_pde(pt, &ppgtt->pd, start, length, pde) {
|
||||
if (pt != vm->scratch_pt) {
|
||||
WARN_ON(bitmap_empty(pt->used_ptes, GEN6_PTES));
|
||||
continue;
|
||||
|
@ -1916,7 +1900,7 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
|
|||
start = start_save;
|
||||
length = length_save;
|
||||
|
||||
gen6_for_each_pde(pt, &ppgtt->pd, start, length, temp, pde) {
|
||||
gen6_for_each_pde(pt, &ppgtt->pd, start, length, pde) {
|
||||
DECLARE_BITMAP(tmp_bitmap, GEN6_PTES);
|
||||
|
||||
bitmap_zero(tmp_bitmap, GEN6_PTES);
|
||||
|
@ -1985,15 +1969,16 @@ static void gen6_free_scratch(struct i915_address_space *vm)
|
|||
static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
|
||||
{
|
||||
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
|
||||
struct i915_page_directory *pd = &ppgtt->pd;
|
||||
struct drm_device *dev = vm->dev;
|
||||
struct i915_page_table *pt;
|
||||
uint32_t pde;
|
||||
|
||||
drm_mm_remove_node(&ppgtt->node);
|
||||
|
||||
gen6_for_all_pdes(pt, ppgtt, pde) {
|
||||
gen6_for_all_pdes(pt, pd, pde)
|
||||
if (pt != vm->scratch_pt)
|
||||
free_pt(ppgtt->base.dev, pt);
|
||||
}
|
||||
free_pt(dev, pt);
|
||||
|
||||
gen6_free_scratch(vm);
|
||||
}
|
||||
|
@ -2059,9 +2044,9 @@ static void gen6_scratch_va_range(struct i915_hw_ppgtt *ppgtt,
|
|||
uint64_t start, uint64_t length)
|
||||
{
|
||||
struct i915_page_table *unused;
|
||||
uint32_t pde, temp;
|
||||
uint32_t pde;
|
||||
|
||||
gen6_for_each_pde(unused, &ppgtt->pd, start, length, temp, pde)
|
||||
gen6_for_each_pde(unused, &ppgtt->pd, start, length, pde)
|
||||
ppgtt->pd.page_table[pde] = ppgtt->base.scratch_pt;
|
||||
}
|
||||
|
||||
|
@ -2073,18 +2058,15 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
|
|||
int ret;
|
||||
|
||||
ppgtt->base.pte_encode = ggtt->base.pte_encode;
|
||||
if (IS_GEN6(dev)) {
|
||||
if (intel_vgpu_active(dev_priv) || IS_GEN6(dev))
|
||||
ppgtt->switch_mm = gen6_mm_switch;
|
||||
} else if (IS_HASWELL(dev)) {
|
||||
else if (IS_HASWELL(dev))
|
||||
ppgtt->switch_mm = hsw_mm_switch;
|
||||
} else if (IS_GEN7(dev)) {
|
||||
else if (IS_GEN7(dev))
|
||||
ppgtt->switch_mm = gen7_mm_switch;
|
||||
} else
|
||||
else
|
||||
BUG();
|
||||
|
||||
if (intel_vgpu_active(dev_priv))
|
||||
ppgtt->switch_mm = vgpu_mm_switch;
|
||||
|
||||
ret = gen6_ppgtt_alloc(ppgtt);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -2133,7 +2115,7 @@ static void i915_address_space_init(struct i915_address_space *vm,
|
|||
struct drm_i915_private *dev_priv)
|
||||
{
|
||||
drm_mm_init(&vm->mm, vm->start, vm->total);
|
||||
vm->dev = dev_priv->dev;
|
||||
vm->dev = &dev_priv->drm;
|
||||
INIT_LIST_HEAD(&vm->active_list);
|
||||
INIT_LIST_HEAD(&vm->inactive_list);
|
||||
list_add_tail(&vm->global_link, &dev_priv->vm_list);
|
||||
|
@ -2141,7 +2123,7 @@ static void i915_address_space_init(struct i915_address_space *vm,
|
|||
|
||||
static void gtt_write_workarounds(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
|
||||
/* This function is for gtt related workarounds. This function is
|
||||
* called on driver load and after a GPU reset, so you can place
|
||||
|
@ -2160,7 +2142,7 @@ static void gtt_write_workarounds(struct drm_device *dev)
|
|||
|
||||
static int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
int ret = 0;
|
||||
|
||||
ret = __hw_ppgtt_init(dev, ppgtt);
|
||||
|
@ -2261,8 +2243,8 @@ static bool do_idling(struct drm_i915_private *dev_priv)
|
|||
|
||||
if (unlikely(ggtt->do_idle_maps)) {
|
||||
dev_priv->mm.interruptible = false;
|
||||
if (i915_gpu_idle(dev_priv->dev)) {
|
||||
DRM_ERROR("Couldn't idle GPU\n");
|
||||
if (i915_gem_wait_for_idle(dev_priv)) {
|
||||
DRM_ERROR("Failed to wait for idle; VT'd may hang.\n");
|
||||
/* Wait a bit, in hopes it avoids the hang */
|
||||
udelay(10);
|
||||
}
|
||||
|
@ -2610,7 +2592,7 @@ static void i915_ggtt_insert_entries(struct i915_address_space *vm,
|
|||
uint64_t start,
|
||||
enum i915_cache_level cache_level, u32 unused)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = vm->dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(vm->dev);
|
||||
unsigned int flags = (cache_level == I915_CACHE_NONE) ?
|
||||
AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
|
||||
int rpm_atomic_seq;
|
||||
|
@ -2628,7 +2610,7 @@ static void i915_ggtt_clear_range(struct i915_address_space *vm,
|
|||
uint64_t length,
|
||||
bool unused)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = vm->dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(vm->dev);
|
||||
unsigned first_entry = start >> PAGE_SHIFT;
|
||||
unsigned num_entries = length >> PAGE_SHIFT;
|
||||
int rpm_atomic_seq;
|
||||
|
@ -2709,7 +2691,7 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
|
|||
static void ggtt_unbind_vma(struct i915_vma *vma)
|
||||
{
|
||||
struct drm_device *dev = vma->vm->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct drm_i915_gem_object *obj = vma->obj;
|
||||
const uint64_t size = min_t(uint64_t,
|
||||
obj->base.size,
|
||||
|
@ -2735,7 +2717,7 @@ static void ggtt_unbind_vma(struct i915_vma *vma)
|
|||
void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
struct drm_device *dev = obj->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
bool interruptible;
|
||||
|
||||
interruptible = do_idling(dev_priv);
|
||||
|
@ -3137,7 +3119,7 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
|
|||
ggtt->base.unbind_vma = ggtt_unbind_vma;
|
||||
ggtt->base.insert_page = gen8_ggtt_insert_page;
|
||||
ggtt->base.clear_range = nop_clear_range;
|
||||
if (!USES_FULL_PPGTT(dev_priv))
|
||||
if (!USES_FULL_PPGTT(dev_priv) || intel_scanout_needs_vtd_wa(dev_priv))
|
||||
ggtt->base.clear_range = gen8_ggtt_clear_range;
|
||||
|
||||
ggtt->base.insert_entries = gen8_ggtt_insert_entries;
|
||||
|
@ -3197,7 +3179,7 @@ static int i915_gmch_probe(struct i915_ggtt *ggtt)
|
|||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
int ret;
|
||||
|
||||
ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->dev->pdev, NULL);
|
||||
ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->drm.pdev, NULL);
|
||||
if (!ret) {
|
||||
DRM_ERROR("failed to set up gmch\n");
|
||||
return -EIO;
|
||||
|
@ -3206,7 +3188,7 @@ static int i915_gmch_probe(struct i915_ggtt *ggtt)
|
|||
intel_gtt_get(&ggtt->base.total, &ggtt->stolen_size,
|
||||
&ggtt->mappable_base, &ggtt->mappable_end);
|
||||
|
||||
ggtt->do_idle_maps = needs_idle_maps(dev_priv->dev);
|
||||
ggtt->do_idle_maps = needs_idle_maps(&dev_priv->drm);
|
||||
ggtt->base.insert_page = i915_ggtt_insert_page;
|
||||
ggtt->base.insert_entries = i915_ggtt_insert_entries;
|
||||
ggtt->base.clear_range = i915_ggtt_clear_range;
|
||||
|
|
|
@ -390,27 +390,27 @@ struct i915_hw_ppgtt {
|
|||
void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m);
|
||||
};
|
||||
|
||||
/* For each pde iterates over every pde between from start until start + length.
|
||||
* If start, and start+length are not perfectly divisible, the macro will round
|
||||
* down, and up as needed. The macro modifies pde, start, and length. Dev is
|
||||
* only used to differentiate shift values. Temp is temp. On gen6/7, start = 0,
|
||||
* and length = 2G effectively iterates over every PDE in the system.
|
||||
*
|
||||
* XXX: temp is not actually needed, but it saves doing the ALIGN operation.
|
||||
/*
|
||||
* gen6_for_each_pde() iterates over every pde from start until start+length.
|
||||
* If start and start+length are not perfectly divisible, the macro will round
|
||||
* down and up as needed. Start=0 and length=2G effectively iterates over
|
||||
* every PDE in the system. The macro modifies ALL its parameters except 'pd',
|
||||
* so each of the other parameters should preferably be a simple variable, or
|
||||
* at most an lvalue with no side-effects!
|
||||
*/
|
||||
#define gen6_for_each_pde(pt, pd, start, length, temp, iter) \
|
||||
for (iter = gen6_pde_index(start); \
|
||||
length > 0 && iter < I915_PDES ? \
|
||||
(pt = (pd)->page_table[iter]), 1 : 0; \
|
||||
iter++, \
|
||||
temp = ALIGN(start+1, 1 << GEN6_PDE_SHIFT) - start, \
|
||||
temp = min_t(unsigned, temp, length), \
|
||||
start += temp, length -= temp)
|
||||
#define gen6_for_each_pde(pt, pd, start, length, iter) \
|
||||
for (iter = gen6_pde_index(start); \
|
||||
length > 0 && iter < I915_PDES && \
|
||||
(pt = (pd)->page_table[iter], true); \
|
||||
({ u32 temp = ALIGN(start+1, 1 << GEN6_PDE_SHIFT); \
|
||||
temp = min(temp - start, length); \
|
||||
start += temp, length -= temp; }), ++iter)
|
||||
|
||||
#define gen6_for_all_pdes(pt, ppgtt, iter) \
|
||||
for (iter = 0; \
|
||||
pt = ppgtt->pd.page_table[iter], iter < I915_PDES; \
|
||||
iter++)
|
||||
#define gen6_for_all_pdes(pt, pd, iter) \
|
||||
for (iter = 0; \
|
||||
iter < I915_PDES && \
|
||||
(pt = (pd)->page_table[iter], true); \
|
||||
++iter)
|
||||
|
||||
static inline uint32_t i915_pte_index(uint64_t address, uint32_t pde_shift)
|
||||
{
|
||||
|
|
|
@ -58,7 +58,7 @@ static int render_state_init(struct render_state *so,
|
|||
if (so->rodata->batch_items * 4 > 4096)
|
||||
return -EINVAL;
|
||||
|
||||
so->obj = i915_gem_object_create(dev_priv->dev, 4096);
|
||||
so->obj = i915_gem_object_create(&dev_priv->drm, 4096);
|
||||
if (IS_ERR(so->obj))
|
||||
return PTR_ERR(so->obj);
|
||||
|
||||
|
|
|
@ -257,7 +257,7 @@ i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
|
|||
{
|
||||
struct drm_i915_private *dev_priv =
|
||||
container_of(shrinker, struct drm_i915_private, mm.shrinker);
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
struct drm_i915_gem_object *obj;
|
||||
unsigned long count;
|
||||
bool unlock;
|
||||
|
@ -265,6 +265,8 @@ i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
|
|||
if (!i915_gem_shrinker_lock(dev, &unlock))
|
||||
return 0;
|
||||
|
||||
i915_gem_retire_requests(dev_priv);
|
||||
|
||||
count = 0;
|
||||
list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
|
||||
if (can_release_pages(obj))
|
||||
|
@ -286,7 +288,7 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
|
|||
{
|
||||
struct drm_i915_private *dev_priv =
|
||||
container_of(shrinker, struct drm_i915_private, mm.shrinker);
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
unsigned long freed;
|
||||
bool unlock;
|
||||
|
||||
|
@ -321,7 +323,7 @@ i915_gem_shrinker_lock_uninterruptible(struct drm_i915_private *dev_priv,
|
|||
{
|
||||
unsigned long timeout = msecs_to_jiffies(timeout_ms) + 1;
|
||||
|
||||
while (!i915_gem_shrinker_lock(dev_priv->dev, &slu->unlock)) {
|
||||
while (!i915_gem_shrinker_lock(&dev_priv->drm, &slu->unlock)) {
|
||||
schedule_timeout_killable(1);
|
||||
if (fatal_signal_pending(current))
|
||||
return false;
|
||||
|
@ -342,7 +344,7 @@ i915_gem_shrinker_unlock_uninterruptible(struct drm_i915_private *dev_priv,
|
|||
{
|
||||
dev_priv->mm.interruptible = slu->was_interruptible;
|
||||
if (slu->unlock)
|
||||
mutex_unlock(&dev_priv->dev->struct_mutex);
|
||||
mutex_unlock(&dev_priv->drm.struct_mutex);
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -408,7 +410,7 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
|
|||
return NOTIFY_DONE;
|
||||
|
||||
/* Force everything onto the inactive lists */
|
||||
ret = i915_gpu_idle(dev_priv->dev);
|
||||
ret = i915_gem_wait_for_idle(dev_priv);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
|
|
|
@ -270,7 +270,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
|
|||
|
||||
void i915_gem_cleanup_stolen(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
|
||||
if (!drm_mm_initialized(&dev_priv->mm.stolen))
|
||||
return;
|
||||
|
@ -550,7 +550,7 @@ static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj)
|
|||
static void
|
||||
i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
|
||||
|
||||
if (obj->stolen) {
|
||||
i915_gem_stolen_remove_node(dev_priv, obj->stolen);
|
||||
|
@ -601,7 +601,7 @@ cleanup:
|
|||
struct drm_i915_gem_object *
|
||||
i915_gem_object_create_stolen(struct drm_device *dev, u32 size)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct drm_i915_gem_object *obj;
|
||||
struct drm_mm_node *stolen;
|
||||
int ret;
|
||||
|
|
|
@ -162,7 +162,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
|
|||
struct drm_file *file)
|
||||
{
|
||||
struct drm_i915_gem_set_tiling *args = data;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct drm_i915_gem_object *obj;
|
||||
int ret = 0;
|
||||
|
||||
|
@ -294,7 +294,7 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
|
|||
struct drm_file *file)
|
||||
{
|
||||
struct drm_i915_gem_get_tiling *args = data;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct drm_i915_gem_object *obj;
|
||||
|
||||
obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
|
||||
|
|
|
@ -332,7 +332,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
|
|||
const struct i915_error_state_file_priv *error_priv)
|
||||
{
|
||||
struct drm_device *dev = error_priv->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct drm_i915_error_state *error = error_priv->error;
|
||||
struct drm_i915_error_object *obj;
|
||||
int i, j, offset, elt;
|
||||
|
@ -463,6 +463,18 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
|
|||
}
|
||||
}
|
||||
|
||||
if (error->ring[i].num_waiters) {
|
||||
err_printf(m, "%s --- %d waiters\n",
|
||||
dev_priv->engine[i].name,
|
||||
error->ring[i].num_waiters);
|
||||
for (j = 0; j < error->ring[i].num_waiters; j++) {
|
||||
err_printf(m, " seqno 0x%08x for %s [%d]\n",
|
||||
error->ring[i].waiters[j].seqno,
|
||||
error->ring[i].waiters[j].comm,
|
||||
error->ring[i].waiters[j].pid);
|
||||
}
|
||||
}
|
||||
|
||||
if ((obj = error->ring[i].ringbuffer)) {
|
||||
err_printf(m, "%s --- ringbuffer = 0x%08x\n",
|
||||
dev_priv->engine[i].name,
|
||||
|
@ -488,7 +500,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
|
|||
hws_page[elt+1],
|
||||
hws_page[elt+2],
|
||||
hws_page[elt+3]);
|
||||
offset += 16;
|
||||
offset += 16;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -605,8 +617,9 @@ static void i915_error_state_free(struct kref *error_ref)
|
|||
i915_error_object_free(error->ring[i].ringbuffer);
|
||||
i915_error_object_free(error->ring[i].hws_page);
|
||||
i915_error_object_free(error->ring[i].ctx);
|
||||
kfree(error->ring[i].requests);
|
||||
i915_error_object_free(error->ring[i].wa_ctx);
|
||||
kfree(error->ring[i].requests);
|
||||
kfree(error->ring[i].waiters);
|
||||
}
|
||||
|
||||
i915_error_object_free(error->semaphore_obj);
|
||||
|
@ -892,6 +905,48 @@ static void gen6_record_semaphore_state(struct drm_i915_private *dev_priv,
|
|||
}
|
||||
}
|
||||
|
||||
static void engine_record_waiters(struct intel_engine_cs *engine,
|
||||
struct drm_i915_error_ring *ering)
|
||||
{
|
||||
struct intel_breadcrumbs *b = &engine->breadcrumbs;
|
||||
struct drm_i915_error_waiter *waiter;
|
||||
struct rb_node *rb;
|
||||
int count;
|
||||
|
||||
ering->num_waiters = 0;
|
||||
ering->waiters = NULL;
|
||||
|
||||
spin_lock(&b->lock);
|
||||
count = 0;
|
||||
for (rb = rb_first(&b->waiters); rb != NULL; rb = rb_next(rb))
|
||||
count++;
|
||||
spin_unlock(&b->lock);
|
||||
|
||||
waiter = NULL;
|
||||
if (count)
|
||||
waiter = kmalloc_array(count,
|
||||
sizeof(struct drm_i915_error_waiter),
|
||||
GFP_ATOMIC);
|
||||
if (!waiter)
|
||||
return;
|
||||
|
||||
ering->waiters = waiter;
|
||||
|
||||
spin_lock(&b->lock);
|
||||
for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
|
||||
struct intel_wait *w = container_of(rb, typeof(*w), node);
|
||||
|
||||
strcpy(waiter->comm, w->tsk->comm);
|
||||
waiter->pid = w->tsk->pid;
|
||||
waiter->seqno = w->seqno;
|
||||
waiter++;
|
||||
|
||||
if (++ering->num_waiters == count)
|
||||
break;
|
||||
}
|
||||
spin_unlock(&b->lock);
|
||||
}
|
||||
|
||||
static void i915_record_ring_state(struct drm_i915_private *dev_priv,
|
||||
struct drm_i915_error_state *error,
|
||||
struct intel_engine_cs *engine,
|
||||
|
@ -926,10 +981,10 @@ static void i915_record_ring_state(struct drm_i915_private *dev_priv,
|
|||
ering->instdone = I915_READ(GEN2_INSTDONE);
|
||||
}
|
||||
|
||||
ering->waiting = waitqueue_active(&engine->irq_queue);
|
||||
ering->waiting = intel_engine_has_waiter(engine);
|
||||
ering->instpm = I915_READ(RING_INSTPM(engine->mmio_base));
|
||||
ering->acthd = intel_ring_get_active_head(engine);
|
||||
ering->seqno = engine->get_seqno(engine);
|
||||
ering->seqno = intel_engine_get_seqno(engine);
|
||||
ering->last_seqno = engine->last_submitted_seqno;
|
||||
ering->start = I915_READ_START(engine);
|
||||
ering->head = I915_READ_HEAD(engine);
|
||||
|
@ -1022,7 +1077,6 @@ static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
|
|||
|
||||
for (i = 0; i < I915_NUM_ENGINES; i++) {
|
||||
struct intel_engine_cs *engine = &dev_priv->engine[i];
|
||||
struct intel_ringbuffer *rbuf;
|
||||
|
||||
error->ring[i].pid = -1;
|
||||
|
||||
|
@ -1032,14 +1086,15 @@ static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
|
|||
error->ring[i].valid = true;
|
||||
|
||||
i915_record_ring_state(dev_priv, error, engine, &error->ring[i]);
|
||||
engine_record_waiters(engine, &error->ring[i]);
|
||||
|
||||
request = i915_gem_find_active_request(engine);
|
||||
if (request) {
|
||||
struct i915_address_space *vm;
|
||||
struct intel_ringbuffer *rb;
|
||||
|
||||
vm = request->ctx && request->ctx->ppgtt ?
|
||||
&request->ctx->ppgtt->base :
|
||||
&ggtt->base;
|
||||
vm = request->ctx->ppgtt ?
|
||||
&request->ctx->ppgtt->base : &ggtt->base;
|
||||
|
||||
/* We need to copy these to an anonymous buffer
|
||||
* as the simplest method to avoid being overwritten
|
||||
|
@ -1066,27 +1121,18 @@ static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
|
|||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
error->simulated |=
|
||||
request->ctx->flags & CONTEXT_NO_ERROR_CAPTURE;
|
||||
|
||||
rb = request->ringbuf;
|
||||
error->ring[i].cpu_ring_head = rb->head;
|
||||
error->ring[i].cpu_ring_tail = rb->tail;
|
||||
error->ring[i].ringbuffer =
|
||||
i915_error_ggtt_object_create(dev_priv,
|
||||
rb->obj);
|
||||
}
|
||||
|
||||
if (i915.enable_execlists) {
|
||||
/* TODO: This is only a small fix to keep basic error
|
||||
* capture working, but we need to add more information
|
||||
* for it to be useful (e.g. dump the context being
|
||||
* executed).
|
||||
*/
|
||||
if (request)
|
||||
rbuf = request->ctx->engine[engine->id].ringbuf;
|
||||
else
|
||||
rbuf = dev_priv->kernel_context->engine[engine->id].ringbuf;
|
||||
} else
|
||||
rbuf = engine->buffer;
|
||||
|
||||
error->ring[i].cpu_ring_head = rbuf->head;
|
||||
error->ring[i].cpu_ring_tail = rbuf->tail;
|
||||
|
||||
error->ring[i].ringbuffer =
|
||||
i915_error_ggtt_object_create(dev_priv, rbuf->obj);
|
||||
|
||||
error->ring[i].hws_page =
|
||||
i915_error_ggtt_object_create(dev_priv,
|
||||
engine->status_page.obj);
|
||||
|
@ -1230,7 +1276,7 @@ static void i915_gem_capture_buffers(struct drm_i915_private *dev_priv,
|
|||
static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
|
||||
struct drm_i915_error_state *error)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
int i;
|
||||
|
||||
/* General organization
|
||||
|
@ -1355,6 +1401,9 @@ void i915_capture_error_state(struct drm_i915_private *dev_priv,
|
|||
struct drm_i915_error_state *error;
|
||||
unsigned long flags;
|
||||
|
||||
if (READ_ONCE(dev_priv->gpu_error.first_error))
|
||||
return;
|
||||
|
||||
/* Account for pipe specific data like PIPE*STAT */
|
||||
error = kzalloc(sizeof(*error), GFP_ATOMIC);
|
||||
if (!error) {
|
||||
|
@ -1378,12 +1427,14 @@ void i915_capture_error_state(struct drm_i915_private *dev_priv,
|
|||
i915_error_capture_msg(dev_priv, error, engine_mask, error_msg);
|
||||
DRM_INFO("%s\n", error->error_msg);
|
||||
|
||||
spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
|
||||
if (dev_priv->gpu_error.first_error == NULL) {
|
||||
dev_priv->gpu_error.first_error = error;
|
||||
error = NULL;
|
||||
if (!error->simulated) {
|
||||
spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
|
||||
if (!dev_priv->gpu_error.first_error) {
|
||||
dev_priv->gpu_error.first_error = error;
|
||||
error = NULL;
|
||||
}
|
||||
spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
|
||||
}
|
||||
spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
|
||||
|
||||
if (error) {
|
||||
i915_error_state_free(&error->ref);
|
||||
|
@ -1395,7 +1446,8 @@ void i915_capture_error_state(struct drm_i915_private *dev_priv,
|
|||
DRM_INFO("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n");
|
||||
DRM_INFO("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
|
||||
DRM_INFO("The gpu crash dump is required to analyze gpu hangs, so please always attach it.\n");
|
||||
DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n", dev_priv->dev->primary->index);
|
||||
DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n",
|
||||
dev_priv->drm.primary->index);
|
||||
warned = true;
|
||||
}
|
||||
}
|
||||
|
@ -1403,7 +1455,7 @@ void i915_capture_error_state(struct drm_i915_private *dev_priv,
|
|||
void i915_error_state_get(struct drm_device *dev,
|
||||
struct i915_error_state_file_priv *error_priv)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
|
||||
spin_lock_irq(&dev_priv->gpu_error.lock);
|
||||
error_priv->error = dev_priv->gpu_error.first_error;
|
||||
|
@ -1421,7 +1473,7 @@ void i915_error_state_put(struct i915_error_state_file_priv *error_priv)
|
|||
|
||||
void i915_destroy_error_state(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct drm_i915_error_state *error;
|
||||
|
||||
spin_lock_irq(&dev_priv->gpu_error.lock);
|
||||
|
|
|
@ -97,8 +97,14 @@ static int host2guc_action(struct intel_guc *guc, u32 *data, u32 len)
|
|||
|
||||
I915_WRITE(HOST2GUC_INTERRUPT, HOST2GUC_TRIGGER);
|
||||
|
||||
/* No HOST2GUC command should take longer than 10ms */
|
||||
ret = wait_for_atomic(host2guc_action_response(dev_priv, &status), 10);
|
||||
/*
|
||||
* Fast commands should complete in less than 10us, so sample quickly
|
||||
* up to that length of time, then switch to a slower sleep-wait loop.
|
||||
* No HOST2GUC command should ever take longer than 10ms.
|
||||
*/
|
||||
ret = wait_for_us(host2guc_action_response(dev_priv, &status), 10);
|
||||
if (ret)
|
||||
ret = wait_for(host2guc_action_response(dev_priv, &status), 10);
|
||||
if (status != GUC2HOST_STATUS_SUCCESS) {
|
||||
/*
|
||||
* Either the GuC explicitly returned an error (which
|
||||
|
@ -153,12 +159,11 @@ static int host2guc_sample_forcewake(struct intel_guc *guc,
|
|||
struct i915_guc_client *client)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = guc_to_i915(guc);
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
u32 data[2];
|
||||
|
||||
data[0] = HOST2GUC_ACTION_SAMPLE_FORCEWAKE;
|
||||
/* WaRsDisableCoarsePowerGating:skl,bxt */
|
||||
if (!intel_enable_rc6() || NEEDS_WaRsDisableCoarsePowerGating(dev))
|
||||
if (!intel_enable_rc6() || NEEDS_WaRsDisableCoarsePowerGating(dev_priv))
|
||||
data[1] = 0;
|
||||
else
|
||||
/* bit 0 and 1 are for Render and Media domain separately */
|
||||
|
@ -582,7 +587,7 @@ static int guc_ring_doorbell(struct i915_guc_client *gc)
|
|||
*/
|
||||
int i915_guc_submit(struct drm_i915_gem_request *rq)
|
||||
{
|
||||
unsigned int engine_id = rq->engine->guc_id;
|
||||
unsigned int engine_id = rq->engine->id;
|
||||
struct intel_guc *guc = &rq->i915->guc;
|
||||
struct i915_guc_client *client = guc->execbuf_client;
|
||||
int b_ret;
|
||||
|
@ -623,7 +628,7 @@ gem_allocate_guc_obj(struct drm_i915_private *dev_priv, u32 size)
|
|||
{
|
||||
struct drm_i915_gem_object *obj;
|
||||
|
||||
obj = i915_gem_object_create(dev_priv->dev, size);
|
||||
obj = i915_gem_object_create(&dev_priv->drm, size);
|
||||
if (IS_ERR(obj))
|
||||
return NULL;
|
||||
|
||||
|
@ -1034,7 +1039,7 @@ void i915_guc_submission_fini(struct drm_i915_private *dev_priv)
|
|||
*/
|
||||
int intel_guc_suspend(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_guc *guc = &dev_priv->guc;
|
||||
struct i915_gem_context *ctx;
|
||||
u32 data[3];
|
||||
|
@ -1060,7 +1065,7 @@ int intel_guc_suspend(struct drm_device *dev)
|
|||
*/
|
||||
int intel_guc_resume(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_guc *guc = &dev_priv->guc;
|
||||
struct i915_gem_context *ctx;
|
||||
u32 data[3];
|
||||
|
|
|
@ -259,12 +259,12 @@ static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
|
|||
dev_priv->gt_irq_mask &= ~interrupt_mask;
|
||||
dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
|
||||
I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
|
||||
POSTING_READ(GTIMR);
|
||||
}
|
||||
|
||||
void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
|
||||
{
|
||||
ilk_update_gt_irq(dev_priv, mask, mask);
|
||||
POSTING_READ_FW(GTIMR);
|
||||
}
|
||||
|
||||
void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
|
||||
|
@ -351,9 +351,8 @@ void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv)
|
|||
void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
spin_lock_irq(&dev_priv->irq_lock);
|
||||
|
||||
WARN_ON(dev_priv->rps.pm_iir);
|
||||
WARN_ON(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
|
||||
WARN_ON_ONCE(dev_priv->rps.pm_iir);
|
||||
WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
|
||||
dev_priv->rps.interrupts_enabled = true;
|
||||
I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) |
|
||||
dev_priv->pm_rps_events);
|
||||
|
@ -371,11 +370,6 @@ void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
|
|||
{
|
||||
spin_lock_irq(&dev_priv->irq_lock);
|
||||
dev_priv->rps.interrupts_enabled = false;
|
||||
spin_unlock_irq(&dev_priv->irq_lock);
|
||||
|
||||
cancel_work_sync(&dev_priv->rps.work);
|
||||
|
||||
spin_lock_irq(&dev_priv->irq_lock);
|
||||
|
||||
I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0));
|
||||
|
||||
|
@ -384,8 +378,15 @@ void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
|
|||
~dev_priv->pm_rps_events);
|
||||
|
||||
spin_unlock_irq(&dev_priv->irq_lock);
|
||||
synchronize_irq(dev_priv->drm.irq);
|
||||
|
||||
synchronize_irq(dev_priv->dev->irq);
|
||||
/* Now that we will not be generating any more work, flush any
|
||||
* outsanding tasks. As we are called on the RPS idle path,
|
||||
* we will reset the GPU to minimum frequencies, so the current
|
||||
* state of the worker can be discarded.
|
||||
*/
|
||||
cancel_work_sync(&dev_priv->rps.work);
|
||||
gen6_reset_rps_interrupts(dev_priv);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -565,7 +566,7 @@ i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
|
|||
u32 enable_mask;
|
||||
|
||||
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
|
||||
enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
|
||||
enable_mask = vlv_get_pipestat_enable_mask(&dev_priv->drm,
|
||||
status_mask);
|
||||
else
|
||||
enable_mask = status_mask << 16;
|
||||
|
@ -579,7 +580,7 @@ i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
|
|||
u32 enable_mask;
|
||||
|
||||
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
|
||||
enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
|
||||
enable_mask = vlv_get_pipestat_enable_mask(&dev_priv->drm,
|
||||
status_mask);
|
||||
else
|
||||
enable_mask = status_mask << 16;
|
||||
|
@ -666,7 +667,7 @@ static u32 i8xx_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
|
|||
*/
|
||||
static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
i915_reg_t high_frame, low_frame;
|
||||
u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
|
||||
struct intel_crtc *intel_crtc =
|
||||
|
@ -713,7 +714,7 @@ static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
|
|||
|
||||
static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
|
||||
return I915_READ(PIPE_FRMCOUNT_G4X(pipe));
|
||||
}
|
||||
|
@ -722,7 +723,7 @@ static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
|
|||
static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
|
||||
{
|
||||
struct drm_device *dev = crtc->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
const struct drm_display_mode *mode = &crtc->base.hwmode;
|
||||
enum pipe pipe = crtc->pipe;
|
||||
int position, vtotal;
|
||||
|
@ -774,7 +775,7 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
|
|||
ktime_t *stime, ktime_t *etime,
|
||||
const struct drm_display_mode *mode)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
int position;
|
||||
|
@ -895,7 +896,7 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
|
|||
|
||||
int intel_get_crtc_scanline(struct intel_crtc *crtc)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
unsigned long irqflags;
|
||||
int position;
|
||||
|
||||
|
@ -976,13 +977,11 @@ static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv)
|
|||
|
||||
static void notify_ring(struct intel_engine_cs *engine)
|
||||
{
|
||||
if (!intel_engine_initialized(engine))
|
||||
return;
|
||||
|
||||
trace_i915_gem_request_notify(engine);
|
||||
engine->user_interrupts++;
|
||||
|
||||
wake_up_all(&engine->irq_queue);
|
||||
smp_store_mb(engine->breadcrumbs.irq_posted, true);
|
||||
if (intel_engine_wakeup(engine)) {
|
||||
trace_i915_gem_request_notify(engine);
|
||||
engine->breadcrumbs.irq_wakeups++;
|
||||
}
|
||||
}
|
||||
|
||||
static void vlv_c0_read(struct drm_i915_private *dev_priv,
|
||||
|
@ -1063,7 +1062,7 @@ static bool any_waiters(struct drm_i915_private *dev_priv)
|
|||
struct intel_engine_cs *engine;
|
||||
|
||||
for_each_engine(engine, dev_priv)
|
||||
if (engine->irq_refcount)
|
||||
if (intel_engine_has_waiter(engine))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
|
@ -1084,13 +1083,6 @@ static void gen6_pm_rps_work(struct work_struct *work)
|
|||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* The RPS work is synced during runtime suspend, we don't require a
|
||||
* wakeref. TODO: instead of disabling the asserts make sure that we
|
||||
* always hold an RPM reference while the work is running.
|
||||
*/
|
||||
DISABLE_RPM_WAKEREF_ASSERTS(dev_priv);
|
||||
|
||||
pm_iir = dev_priv->rps.pm_iir;
|
||||
dev_priv->rps.pm_iir = 0;
|
||||
/* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
|
||||
|
@ -1103,7 +1095,7 @@ static void gen6_pm_rps_work(struct work_struct *work)
|
|||
WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
|
||||
|
||||
if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost)
|
||||
goto out;
|
||||
return;
|
||||
|
||||
mutex_lock(&dev_priv->rps.hw_lock);
|
||||
|
||||
|
@ -1158,8 +1150,6 @@ static void gen6_pm_rps_work(struct work_struct *work)
|
|||
intel_set_rps(dev_priv, new_delay);
|
||||
|
||||
mutex_unlock(&dev_priv->rps.hw_lock);
|
||||
out:
|
||||
ENABLE_RPM_WAKEREF_ASSERTS(dev_priv);
|
||||
}
|
||||
|
||||
|
||||
|
@ -1185,7 +1175,7 @@ static void ivybridge_parity_work(struct work_struct *work)
|
|||
* In order to prevent a get/put style interface, acquire struct mutex
|
||||
* any time we access those registers.
|
||||
*/
|
||||
mutex_lock(&dev_priv->dev->struct_mutex);
|
||||
mutex_lock(&dev_priv->drm.struct_mutex);
|
||||
|
||||
/* If we've screwed up tracking, just let the interrupt fire again */
|
||||
if (WARN_ON(!dev_priv->l3_parity.which_slice))
|
||||
|
@ -1221,7 +1211,7 @@ static void ivybridge_parity_work(struct work_struct *work)
|
|||
parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
|
||||
parity_event[5] = NULL;
|
||||
|
||||
kobject_uevent_env(&dev_priv->dev->primary->kdev->kobj,
|
||||
kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj,
|
||||
KOBJ_CHANGE, parity_event);
|
||||
|
||||
DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
|
||||
|
@ -1241,7 +1231,7 @@ out:
|
|||
gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv));
|
||||
spin_unlock_irq(&dev_priv->irq_lock);
|
||||
|
||||
mutex_unlock(&dev_priv->dev->struct_mutex);
|
||||
mutex_unlock(&dev_priv->drm.struct_mutex);
|
||||
}
|
||||
|
||||
static void ivybridge_parity_error_irq_handler(struct drm_i915_private *dev_priv,
|
||||
|
@ -1267,8 +1257,7 @@ static void ivybridge_parity_error_irq_handler(struct drm_i915_private *dev_priv
|
|||
static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv,
|
||||
u32 gt_iir)
|
||||
{
|
||||
if (gt_iir &
|
||||
(GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
|
||||
if (gt_iir & GT_RENDER_USER_INTERRUPT)
|
||||
notify_ring(&dev_priv->engine[RCS]);
|
||||
if (gt_iir & ILK_BSD_USER_INTERRUPT)
|
||||
notify_ring(&dev_priv->engine[VCS]);
|
||||
|
@ -1277,9 +1266,7 @@ static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv,
|
|||
static void snb_gt_irq_handler(struct drm_i915_private *dev_priv,
|
||||
u32 gt_iir)
|
||||
{
|
||||
|
||||
if (gt_iir &
|
||||
(GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
|
||||
if (gt_iir & GT_RENDER_USER_INTERRUPT)
|
||||
notify_ring(&dev_priv->engine[RCS]);
|
||||
if (gt_iir & GT_BSD_USER_INTERRUPT)
|
||||
notify_ring(&dev_priv->engine[VCS]);
|
||||
|
@ -1526,7 +1513,7 @@ static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
|
|||
|
||||
entry = &pipe_crc->entries[head];
|
||||
|
||||
entry->frame = dev_priv->dev->driver->get_vblank_counter(dev_priv->dev,
|
||||
entry->frame = dev_priv->drm.driver->get_vblank_counter(&dev_priv->drm,
|
||||
pipe);
|
||||
entry->crc[0] = crc0;
|
||||
entry->crc[1] = crc1;
|
||||
|
@ -1602,7 +1589,7 @@ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
|
|||
gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
|
||||
if (dev_priv->rps.interrupts_enabled) {
|
||||
dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
|
||||
queue_work(dev_priv->wq, &dev_priv->rps.work);
|
||||
schedule_work(&dev_priv->rps.work);
|
||||
}
|
||||
spin_unlock(&dev_priv->irq_lock);
|
||||
}
|
||||
|
@ -1624,7 +1611,7 @@ static bool intel_pipe_handle_vblank(struct drm_i915_private *dev_priv,
|
|||
{
|
||||
bool ret;
|
||||
|
||||
ret = drm_handle_vblank(dev_priv->dev, pipe);
|
||||
ret = drm_handle_vblank(&dev_priv->drm, pipe);
|
||||
if (ret)
|
||||
intel_finish_page_flip_mmio(dev_priv, pipe);
|
||||
|
||||
|
@ -1757,7 +1744,7 @@ static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv,
|
|||
static irqreturn_t valleyview_irq_handler(int irq, void *arg)
|
||||
{
|
||||
struct drm_device *dev = arg;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
irqreturn_t ret = IRQ_NONE;
|
||||
|
||||
if (!intel_irqs_enabled(dev_priv))
|
||||
|
@ -1840,7 +1827,7 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
|
|||
static irqreturn_t cherryview_irq_handler(int irq, void *arg)
|
||||
{
|
||||
struct drm_device *dev = arg;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
irqreturn_t ret = IRQ_NONE;
|
||||
|
||||
if (!intel_irqs_enabled(dev_priv))
|
||||
|
@ -2225,7 +2212,7 @@ static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
|
|||
static irqreturn_t ironlake_irq_handler(int irq, void *arg)
|
||||
{
|
||||
struct drm_device *dev = arg;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
u32 de_iir, gt_iir, de_ier, sde_ier = 0;
|
||||
irqreturn_t ret = IRQ_NONE;
|
||||
|
||||
|
@ -2438,7 +2425,7 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
|
|||
I915_WRITE(SDEIIR, iir);
|
||||
ret = IRQ_HANDLED;
|
||||
|
||||
if (HAS_PCH_SPT(dev_priv))
|
||||
if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv))
|
||||
spt_irq_handler(dev_priv, iir);
|
||||
else
|
||||
cpt_irq_handler(dev_priv, iir);
|
||||
|
@ -2457,7 +2444,7 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
|
|||
static irqreturn_t gen8_irq_handler(int irq, void *arg)
|
||||
{
|
||||
struct drm_device *dev = arg;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
u32 master_ctl;
|
||||
u32 gt_iir[4] = {};
|
||||
irqreturn_t ret;
|
||||
|
@ -2488,11 +2475,8 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static void i915_error_wake_up(struct drm_i915_private *dev_priv,
|
||||
bool reset_completed)
|
||||
static void i915_error_wake_up(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct intel_engine_cs *engine;
|
||||
|
||||
/*
|
||||
* Notify all waiters for GPU completion events that reset state has
|
||||
* been changed, and that they need to restart their wait after
|
||||
|
@ -2501,18 +2485,10 @@ static void i915_error_wake_up(struct drm_i915_private *dev_priv,
|
|||
*/
|
||||
|
||||
/* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
|
||||
for_each_engine(engine, dev_priv)
|
||||
wake_up_all(&engine->irq_queue);
|
||||
wake_up_all(&dev_priv->gpu_error.wait_queue);
|
||||
|
||||
/* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
|
||||
wake_up_all(&dev_priv->pending_flip_queue);
|
||||
|
||||
/*
|
||||
* Signal tasks blocked in i915_gem_wait_for_error that the pending
|
||||
* reset state is cleared.
|
||||
*/
|
||||
if (reset_completed)
|
||||
wake_up_all(&dev_priv->gpu_error.reset_queue);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -2524,7 +2500,7 @@ static void i915_error_wake_up(struct drm_i915_private *dev_priv,
|
|||
*/
|
||||
static void i915_reset_and_wakeup(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct kobject *kobj = &dev_priv->dev->primary->kdev->kobj;
|
||||
struct kobject *kobj = &dev_priv->drm.primary->kdev->kobj;
|
||||
char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
|
||||
char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
|
||||
char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
|
||||
|
@ -2577,7 +2553,7 @@ static void i915_reset_and_wakeup(struct drm_i915_private *dev_priv)
|
|||
* Note: The wake_up also serves as a memory barrier so that
|
||||
* waiters see the update value of the reset counter atomic_t.
|
||||
*/
|
||||
i915_error_wake_up(dev_priv, true);
|
||||
wake_up_all(&dev_priv->gpu_error.reset_queue);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2714,7 +2690,7 @@ void i915_handle_error(struct drm_i915_private *dev_priv,
|
|||
* ensure that the waiters see the updated value of the reset
|
||||
* counter atomic_t.
|
||||
*/
|
||||
i915_error_wake_up(dev_priv, false);
|
||||
i915_error_wake_up(dev_priv);
|
||||
}
|
||||
|
||||
i915_reset_and_wakeup(dev_priv);
|
||||
|
@ -2725,7 +2701,7 @@ void i915_handle_error(struct drm_i915_private *dev_priv,
|
|||
*/
|
||||
static int i915_enable_vblank(struct drm_device *dev, unsigned int pipe)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
unsigned long irqflags;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
|
||||
|
@ -2742,7 +2718,7 @@ static int i915_enable_vblank(struct drm_device *dev, unsigned int pipe)
|
|||
|
||||
static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
unsigned long irqflags;
|
||||
uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
|
||||
DE_PIPE_VBLANK(pipe);
|
||||
|
@ -2756,7 +2732,7 @@ static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe)
|
|||
|
||||
static int valleyview_enable_vblank(struct drm_device *dev, unsigned int pipe)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
unsigned long irqflags;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
|
||||
|
@ -2769,7 +2745,7 @@ static int valleyview_enable_vblank(struct drm_device *dev, unsigned int pipe)
|
|||
|
||||
static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
unsigned long irqflags;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
|
||||
|
@ -2784,7 +2760,7 @@ static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe)
|
|||
*/
|
||||
static void i915_disable_vblank(struct drm_device *dev, unsigned int pipe)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
unsigned long irqflags;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
|
||||
|
@ -2796,7 +2772,7 @@ static void i915_disable_vblank(struct drm_device *dev, unsigned int pipe)
|
|||
|
||||
static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
unsigned long irqflags;
|
||||
uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
|
||||
DE_PIPE_VBLANK(pipe);
|
||||
|
@ -2808,7 +2784,7 @@ static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe)
|
|||
|
||||
static void valleyview_disable_vblank(struct drm_device *dev, unsigned int pipe)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
unsigned long irqflags;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
|
||||
|
@ -2819,7 +2795,7 @@ static void valleyview_disable_vblank(struct drm_device *dev, unsigned int pipe)
|
|||
|
||||
static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
unsigned long irqflags;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
|
||||
|
@ -2835,9 +2811,9 @@ ring_idle(struct intel_engine_cs *engine, u32 seqno)
|
|||
}
|
||||
|
||||
static bool
|
||||
ipehr_is_semaphore_wait(struct drm_i915_private *dev_priv, u32 ipehr)
|
||||
ipehr_is_semaphore_wait(struct intel_engine_cs *engine, u32 ipehr)
|
||||
{
|
||||
if (INTEL_GEN(dev_priv) >= 8) {
|
||||
if (INTEL_GEN(engine->i915) >= 8) {
|
||||
return (ipehr >> 23) == 0x1c;
|
||||
} else {
|
||||
ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
|
||||
|
@ -2908,7 +2884,7 @@ semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
|
|||
return NULL;
|
||||
|
||||
ipehr = I915_READ(RING_IPEHR(engine->mmio_base));
|
||||
if (!ipehr_is_semaphore_wait(engine->i915, ipehr))
|
||||
if (!ipehr_is_semaphore_wait(engine, ipehr))
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
|
@ -2966,7 +2942,7 @@ static int semaphore_passed(struct intel_engine_cs *engine)
|
|||
if (signaller->hangcheck.deadlock >= I915_NUM_ENGINES)
|
||||
return -1;
|
||||
|
||||
if (i915_seqno_passed(signaller->get_seqno(signaller), seqno))
|
||||
if (i915_seqno_passed(intel_engine_get_seqno(signaller), seqno))
|
||||
return 1;
|
||||
|
||||
/* cursory check for an unkickable deadlock */
|
||||
|
@ -3078,23 +3054,21 @@ ring_stuck(struct intel_engine_cs *engine, u64 acthd)
|
|||
return HANGCHECK_HUNG;
|
||||
}
|
||||
|
||||
static unsigned kick_waiters(struct intel_engine_cs *engine)
|
||||
static unsigned long kick_waiters(struct intel_engine_cs *engine)
|
||||
{
|
||||
struct drm_i915_private *i915 = engine->i915;
|
||||
unsigned user_interrupts = READ_ONCE(engine->user_interrupts);
|
||||
unsigned long irq_count = READ_ONCE(engine->breadcrumbs.irq_wakeups);
|
||||
|
||||
if (engine->hangcheck.user_interrupts == user_interrupts &&
|
||||
if (engine->hangcheck.user_interrupts == irq_count &&
|
||||
!test_and_set_bit(engine->id, &i915->gpu_error.missed_irq_rings)) {
|
||||
if (!(i915->gpu_error.test_irq_rings & intel_engine_flag(engine)))
|
||||
if (!test_bit(engine->id, &i915->gpu_error.test_irq_rings))
|
||||
DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
|
||||
engine->name);
|
||||
else
|
||||
DRM_INFO("Fake missed irq on %s\n",
|
||||
engine->name);
|
||||
wake_up_all(&engine->irq_queue);
|
||||
|
||||
intel_engine_enable_fake_irq(engine);
|
||||
}
|
||||
|
||||
return user_interrupts;
|
||||
return irq_count;
|
||||
}
|
||||
/*
|
||||
* This is called when the chip hasn't reported back with completed
|
||||
|
@ -3110,9 +3084,8 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
|
|||
container_of(work, typeof(*dev_priv),
|
||||
gpu_error.hangcheck_work.work);
|
||||
struct intel_engine_cs *engine;
|
||||
enum intel_engine_id id;
|
||||
int busy_count = 0, rings_hung = 0;
|
||||
bool stuck[I915_NUM_ENGINES] = { 0 };
|
||||
unsigned int hung = 0, stuck = 0;
|
||||
int busy_count = 0;
|
||||
#define BUSY 1
|
||||
#define KICK 5
|
||||
#define HUNG 20
|
||||
|
@ -3121,12 +3094,8 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
|
|||
if (!i915.enable_hangcheck)
|
||||
return;
|
||||
|
||||
/*
|
||||
* The hangcheck work is synced during runtime suspend, we don't
|
||||
* require a wakeref. TODO: instead of disabling the asserts make
|
||||
* sure that we hold a reference when this work is running.
|
||||
*/
|
||||
DISABLE_RPM_WAKEREF_ASSERTS(dev_priv);
|
||||
if (!READ_ONCE(dev_priv->gt.awake))
|
||||
return;
|
||||
|
||||
/* As enabling the GPU requires fairly extensive mmio access,
|
||||
* periodically arm the mmio checker to see if we are triggering
|
||||
|
@ -3134,11 +3103,11 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
|
|||
*/
|
||||
intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
|
||||
|
||||
for_each_engine_id(engine, dev_priv, id) {
|
||||
for_each_engine(engine, dev_priv) {
|
||||
bool busy = intel_engine_has_waiter(engine);
|
||||
u64 acthd;
|
||||
u32 seqno;
|
||||
unsigned user_interrupts;
|
||||
bool busy = true;
|
||||
|
||||
semaphore_clear_deadlocks(dev_priv);
|
||||
|
||||
|
@ -3153,7 +3122,7 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
|
|||
engine->irq_seqno_barrier(engine);
|
||||
|
||||
acthd = intel_ring_get_active_head(engine);
|
||||
seqno = engine->get_seqno(engine);
|
||||
seqno = intel_engine_get_seqno(engine);
|
||||
|
||||
/* Reset stuck interrupts between batch advances */
|
||||
user_interrupts = 0;
|
||||
|
@ -3161,12 +3130,11 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
|
|||
if (engine->hangcheck.seqno == seqno) {
|
||||
if (ring_idle(engine, seqno)) {
|
||||
engine->hangcheck.action = HANGCHECK_IDLE;
|
||||
if (waitqueue_active(&engine->irq_queue)) {
|
||||
if (busy) {
|
||||
/* Safeguard against driver failure */
|
||||
user_interrupts = kick_waiters(engine);
|
||||
engine->hangcheck.score += BUSY;
|
||||
} else
|
||||
busy = false;
|
||||
}
|
||||
} else {
|
||||
/* We always increment the hangcheck score
|
||||
* if the ring is busy and still processing
|
||||
|
@ -3198,10 +3166,15 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
|
|||
break;
|
||||
case HANGCHECK_HUNG:
|
||||
engine->hangcheck.score += HUNG;
|
||||
stuck[id] = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
|
||||
hung |= intel_engine_flag(engine);
|
||||
if (engine->hangcheck.action != HANGCHECK_HUNG)
|
||||
stuck |= intel_engine_flag(engine);
|
||||
}
|
||||
} else {
|
||||
engine->hangcheck.action = HANGCHECK_ACTIVE;
|
||||
|
||||
|
@ -3226,48 +3199,33 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
|
|||
busy_count += busy;
|
||||
}
|
||||
|
||||
for_each_engine_id(engine, dev_priv, id) {
|
||||
if (engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
|
||||
DRM_INFO("%s on %s\n",
|
||||
stuck[id] ? "stuck" : "no progress",
|
||||
engine->name);
|
||||
rings_hung |= intel_engine_flag(engine);
|
||||
}
|
||||
}
|
||||
|
||||
if (rings_hung) {
|
||||
i915_handle_error(dev_priv, rings_hung, "Engine(s) hung");
|
||||
goto out;
|
||||
if (hung) {
|
||||
char msg[80];
|
||||
int len;
|
||||
|
||||
/* If some rings hung but others were still busy, only
|
||||
* blame the hanging rings in the synopsis.
|
||||
*/
|
||||
if (stuck != hung)
|
||||
hung &= ~stuck;
|
||||
len = scnprintf(msg, sizeof(msg),
|
||||
"%s on ", stuck == hung ? "No progress" : "Hang");
|
||||
for_each_engine_masked(engine, dev_priv, hung)
|
||||
len += scnprintf(msg + len, sizeof(msg) - len,
|
||||
"%s, ", engine->name);
|
||||
msg[len-2] = '\0';
|
||||
|
||||
return i915_handle_error(dev_priv, hung, msg);
|
||||
}
|
||||
|
||||
/* Reset timer in case GPU hangs without another request being added */
|
||||
if (busy_count)
|
||||
/* Reset timer case chip hangs without another request
|
||||
* being added */
|
||||
i915_queue_hangcheck(dev_priv);
|
||||
|
||||
out:
|
||||
ENABLE_RPM_WAKEREF_ASSERTS(dev_priv);
|
||||
}
|
||||
|
||||
void i915_queue_hangcheck(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct i915_gpu_error *e = &dev_priv->gpu_error;
|
||||
|
||||
if (!i915.enable_hangcheck)
|
||||
return;
|
||||
|
||||
/* Don't continually defer the hangcheck so that it is always run at
|
||||
* least once after work has been scheduled on any ring. Otherwise,
|
||||
* we will ignore a hung ring if a second ring is kept busy.
|
||||
*/
|
||||
|
||||
queue_delayed_work(e->hangcheck_wq, &e->hangcheck_work,
|
||||
round_jiffies_up_relative(DRM_I915_HANGCHECK_JIFFIES));
|
||||
}
|
||||
|
||||
static void ibx_irq_reset(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
|
||||
if (HAS_PCH_NOP(dev))
|
||||
return;
|
||||
|
@ -3288,7 +3246,7 @@ static void ibx_irq_reset(struct drm_device *dev)
|
|||
*/
|
||||
static void ibx_irq_pre_postinstall(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
|
||||
if (HAS_PCH_NOP(dev))
|
||||
return;
|
||||
|
@ -3300,7 +3258,7 @@ static void ibx_irq_pre_postinstall(struct drm_device *dev)
|
|||
|
||||
static void gen5_gt_irq_reset(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
|
||||
GEN5_IRQ_RESET(GT);
|
||||
if (INTEL_INFO(dev)->gen >= 6)
|
||||
|
@ -3360,7 +3318,7 @@ static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
|
|||
*/
|
||||
static void ironlake_irq_reset(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
|
||||
I915_WRITE(HWSTAM, 0xffffffff);
|
||||
|
||||
|
@ -3375,7 +3333,7 @@ static void ironlake_irq_reset(struct drm_device *dev)
|
|||
|
||||
static void valleyview_irq_preinstall(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
|
||||
I915_WRITE(VLV_MASTER_IER, 0);
|
||||
POSTING_READ(VLV_MASTER_IER);
|
||||
|
@ -3398,7 +3356,7 @@ static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
|
|||
|
||||
static void gen8_irq_reset(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
int pipe;
|
||||
|
||||
I915_WRITE(GEN8_MASTER_IRQ, 0);
|
||||
|
@ -3444,12 +3402,12 @@ void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
|
|||
spin_unlock_irq(&dev_priv->irq_lock);
|
||||
|
||||
/* make sure we're done processing display irqs */
|
||||
synchronize_irq(dev_priv->dev->irq);
|
||||
synchronize_irq(dev_priv->drm.irq);
|
||||
}
|
||||
|
||||
static void cherryview_irq_preinstall(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
|
||||
I915_WRITE(GEN8_MASTER_IRQ, 0);
|
||||
POSTING_READ(GEN8_MASTER_IRQ);
|
||||
|
@ -3470,7 +3428,7 @@ static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv,
|
|||
struct intel_encoder *encoder;
|
||||
u32 enabled_irqs = 0;
|
||||
|
||||
for_each_intel_encoder(dev_priv->dev, encoder)
|
||||
for_each_intel_encoder(&dev_priv->drm, encoder)
|
||||
if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
|
||||
enabled_irqs |= hpd[encoder->hpd_pin];
|
||||
|
||||
|
@ -3601,7 +3559,7 @@ static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
|
|||
|
||||
static void ibx_irq_postinstall(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
u32 mask;
|
||||
|
||||
if (HAS_PCH_NOP(dev))
|
||||
|
@ -3618,7 +3576,7 @@ static void ibx_irq_postinstall(struct drm_device *dev)
|
|||
|
||||
static void gen5_gt_irq_postinstall(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
u32 pm_irqs, gt_irqs;
|
||||
|
||||
pm_irqs = gt_irqs = 0;
|
||||
|
@ -3632,8 +3590,7 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev)
|
|||
|
||||
gt_irqs |= GT_RENDER_USER_INTERRUPT;
|
||||
if (IS_GEN5(dev)) {
|
||||
gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT |
|
||||
ILK_BSD_USER_INTERRUPT;
|
||||
gt_irqs |= ILK_BSD_USER_INTERRUPT;
|
||||
} else {
|
||||
gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
|
||||
}
|
||||
|
@ -3655,7 +3612,7 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev)
|
|||
|
||||
static int ironlake_irq_postinstall(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
u32 display_mask, extra_mask;
|
||||
|
||||
if (INTEL_INFO(dev)->gen >= 7) {
|
||||
|
@ -3734,7 +3691,7 @@ void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
|
|||
|
||||
static int valleyview_irq_postinstall(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
|
||||
gen5_gt_irq_postinstall(dev);
|
||||
|
||||
|
@ -3827,7 +3784,7 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
|
|||
|
||||
static int gen8_irq_postinstall(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
|
||||
if (HAS_PCH_SPLIT(dev))
|
||||
ibx_irq_pre_postinstall(dev);
|
||||
|
@ -3846,7 +3803,7 @@ static int gen8_irq_postinstall(struct drm_device *dev)
|
|||
|
||||
static int cherryview_irq_postinstall(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
|
||||
gen8_gt_irq_postinstall(dev_priv);
|
||||
|
||||
|
@ -3863,7 +3820,7 @@ static int cherryview_irq_postinstall(struct drm_device *dev)
|
|||
|
||||
static void gen8_irq_uninstall(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
|
||||
if (!dev_priv)
|
||||
return;
|
||||
|
@ -3873,7 +3830,7 @@ static void gen8_irq_uninstall(struct drm_device *dev)
|
|||
|
||||
static void valleyview_irq_uninstall(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
|
||||
if (!dev_priv)
|
||||
return;
|
||||
|
@ -3893,7 +3850,7 @@ static void valleyview_irq_uninstall(struct drm_device *dev)
|
|||
|
||||
static void cherryview_irq_uninstall(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
|
||||
if (!dev_priv)
|
||||
return;
|
||||
|
@ -3913,7 +3870,7 @@ static void cherryview_irq_uninstall(struct drm_device *dev)
|
|||
|
||||
static void ironlake_irq_uninstall(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
|
||||
if (!dev_priv)
|
||||
return;
|
||||
|
@ -3923,7 +3880,7 @@ static void ironlake_irq_uninstall(struct drm_device *dev)
|
|||
|
||||
static void i8xx_irq_preinstall(struct drm_device * dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
int pipe;
|
||||
|
||||
for_each_pipe(dev_priv, pipe)
|
||||
|
@ -3935,7 +3892,7 @@ static void i8xx_irq_preinstall(struct drm_device * dev)
|
|||
|
||||
static int i8xx_irq_postinstall(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
|
||||
I915_WRITE16(EMR,
|
||||
~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
|
||||
|
@ -3998,7 +3955,7 @@ check_page_flip:
|
|||
static irqreturn_t i8xx_irq_handler(int irq, void *arg)
|
||||
{
|
||||
struct drm_device *dev = arg;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
u16 iir, new_iir;
|
||||
u32 pipe_stats[2];
|
||||
int pipe;
|
||||
|
@ -4075,7 +4032,7 @@ out:
|
|||
|
||||
static void i8xx_irq_uninstall(struct drm_device * dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
int pipe;
|
||||
|
||||
for_each_pipe(dev_priv, pipe) {
|
||||
|
@ -4090,7 +4047,7 @@ static void i8xx_irq_uninstall(struct drm_device * dev)
|
|||
|
||||
static void i915_irq_preinstall(struct drm_device * dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
int pipe;
|
||||
|
||||
if (I915_HAS_HOTPLUG(dev)) {
|
||||
|
@ -4108,7 +4065,7 @@ static void i915_irq_preinstall(struct drm_device * dev)
|
|||
|
||||
static int i915_irq_postinstall(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
u32 enable_mask;
|
||||
|
||||
I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
|
||||
|
@ -4187,7 +4144,7 @@ check_page_flip:
|
|||
static irqreturn_t i915_irq_handler(int irq, void *arg)
|
||||
{
|
||||
struct drm_device *dev = arg;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
|
||||
u32 flip_mask =
|
||||
I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
|
||||
|
@ -4292,7 +4249,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
|
|||
|
||||
static void i915_irq_uninstall(struct drm_device * dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
int pipe;
|
||||
|
||||
if (I915_HAS_HOTPLUG(dev)) {
|
||||
|
@ -4314,7 +4271,7 @@ static void i915_irq_uninstall(struct drm_device * dev)
|
|||
|
||||
static void i965_irq_preinstall(struct drm_device * dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
int pipe;
|
||||
|
||||
i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
|
||||
|
@ -4330,7 +4287,7 @@ static void i965_irq_preinstall(struct drm_device * dev)
|
|||
|
||||
static int i965_irq_postinstall(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
u32 enable_mask;
|
||||
u32 error_mask;
|
||||
|
||||
|
@ -4414,7 +4371,7 @@ static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv)
|
|||
static irqreturn_t i965_irq_handler(int irq, void *arg)
|
||||
{
|
||||
struct drm_device *dev = arg;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
u32 iir, new_iir;
|
||||
u32 pipe_stats[I915_MAX_PIPES];
|
||||
int ret = IRQ_NONE, pipe;
|
||||
|
@ -4523,7 +4480,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
|
|||
|
||||
static void i965_irq_uninstall(struct drm_device * dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
int pipe;
|
||||
|
||||
if (!dev_priv)
|
||||
|
@ -4553,7 +4510,7 @@ static void i965_irq_uninstall(struct drm_device * dev)
|
|||
*/
|
||||
void intel_irq_init(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
|
||||
intel_hpd_init_work(dev_priv);
|
||||
|
||||
|
@ -4631,7 +4588,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
|
|||
dev->driver->disable_vblank = gen8_disable_vblank;
|
||||
if (IS_BROXTON(dev))
|
||||
dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
|
||||
else if (HAS_PCH_SPT(dev))
|
||||
else if (HAS_PCH_SPT(dev) || HAS_PCH_KBP(dev))
|
||||
dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
|
||||
else
|
||||
dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
|
||||
|
@ -4687,7 +4644,7 @@ int intel_irq_install(struct drm_i915_private *dev_priv)
|
|||
*/
|
||||
dev_priv->pm.irqs_enabled = true;
|
||||
|
||||
return drm_irq_install(dev_priv->dev, dev_priv->dev->pdev->irq);
|
||||
return drm_irq_install(&dev_priv->drm, dev_priv->drm.pdev->irq);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -4699,7 +4656,7 @@ int intel_irq_install(struct drm_i915_private *dev_priv)
|
|||
*/
|
||||
void intel_irq_uninstall(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
drm_irq_uninstall(dev_priv->dev);
|
||||
drm_irq_uninstall(&dev_priv->drm);
|
||||
intel_hpd_cancel_work(dev_priv);
|
||||
dev_priv->pm.irqs_enabled = false;
|
||||
}
|
||||
|
@ -4713,9 +4670,9 @@ void intel_irq_uninstall(struct drm_i915_private *dev_priv)
|
|||
*/
|
||||
void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
dev_priv->dev->driver->irq_uninstall(dev_priv->dev);
|
||||
dev_priv->drm.driver->irq_uninstall(&dev_priv->drm);
|
||||
dev_priv->pm.irqs_enabled = false;
|
||||
synchronize_irq(dev_priv->dev->irq);
|
||||
synchronize_irq(dev_priv->drm.irq);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -4728,6 +4685,6 @@ void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
|
|||
void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
dev_priv->pm.irqs_enabled = true;
|
||||
dev_priv->dev->driver->irq_preinstall(dev_priv->dev);
|
||||
dev_priv->dev->driver->irq_postinstall(dev_priv->dev);
|
||||
dev_priv->drm.driver->irq_preinstall(&dev_priv->drm);
|
||||
dev_priv->drm.driver->irq_postinstall(&dev_priv->drm);
|
||||
}
|
||||
|
|
|
@ -224,6 +224,6 @@ module_param_named(enable_dpcd_backlight, i915.enable_dpcd_backlight, bool, 0600
|
|||
MODULE_PARM_DESC(enable_dpcd_backlight,
|
||||
"Enable support for DPCD backlight control (default:false)");
|
||||
|
||||
module_param_named(enable_gvt, i915.enable_gvt, bool, 0600);
|
||||
module_param_named(enable_gvt, i915.enable_gvt, bool, 0400);
|
||||
MODULE_PARM_DESC(enable_gvt,
|
||||
"Enable support for Intel GVT-g graphics virtualization host support(default:false)");
|
||||
|
|
|
@ -0,0 +1,503 @@
|
|||
/*
|
||||
* Copyright © 2016 Intel Corporation
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
* IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/console.h>
|
||||
#include <linux/vgaarb.h>
|
||||
#include <linux/vga_switcheroo.h>
|
||||
|
||||
#include "i915_drv.h"
|
||||
|
||||
#define GEN_DEFAULT_PIPEOFFSETS \
|
||||
.pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
|
||||
PIPE_C_OFFSET, PIPE_EDP_OFFSET }, \
|
||||
.trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
|
||||
TRANSCODER_C_OFFSET, TRANSCODER_EDP_OFFSET }, \
|
||||
.palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET }
|
||||
|
||||
#define GEN_CHV_PIPEOFFSETS \
|
||||
.pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
|
||||
CHV_PIPE_C_OFFSET }, \
|
||||
.trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
|
||||
CHV_TRANSCODER_C_OFFSET, }, \
|
||||
.palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET, \
|
||||
CHV_PALETTE_C_OFFSET }
|
||||
|
||||
#define CURSOR_OFFSETS \
|
||||
.cursor_offsets = { CURSOR_A_OFFSET, CURSOR_B_OFFSET, CHV_CURSOR_C_OFFSET }
|
||||
|
||||
#define IVB_CURSOR_OFFSETS \
|
||||
.cursor_offsets = { CURSOR_A_OFFSET, IVB_CURSOR_B_OFFSET, IVB_CURSOR_C_OFFSET }
|
||||
|
||||
#define BDW_COLORS \
|
||||
.color = { .degamma_lut_size = 512, .gamma_lut_size = 512 }
|
||||
#define CHV_COLORS \
|
||||
.color = { .degamma_lut_size = 65, .gamma_lut_size = 257 }
|
||||
|
||||
static const struct intel_device_info intel_i830_info = {
|
||||
.gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2,
|
||||
.has_overlay = 1, .overlay_needs_physical = 1,
|
||||
.ring_mask = RENDER_RING,
|
||||
GEN_DEFAULT_PIPEOFFSETS,
|
||||
CURSOR_OFFSETS,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_845g_info = {
|
||||
.gen = 2, .num_pipes = 1,
|
||||
.has_overlay = 1, .overlay_needs_physical = 1,
|
||||
.ring_mask = RENDER_RING,
|
||||
GEN_DEFAULT_PIPEOFFSETS,
|
||||
CURSOR_OFFSETS,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_i85x_info = {
|
||||
.gen = 2, .is_i85x = 1, .is_mobile = 1, .num_pipes = 2,
|
||||
.cursor_needs_physical = 1,
|
||||
.has_overlay = 1, .overlay_needs_physical = 1,
|
||||
.has_fbc = 1,
|
||||
.ring_mask = RENDER_RING,
|
||||
GEN_DEFAULT_PIPEOFFSETS,
|
||||
CURSOR_OFFSETS,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_i865g_info = {
|
||||
.gen = 2, .num_pipes = 1,
|
||||
.has_overlay = 1, .overlay_needs_physical = 1,
|
||||
.ring_mask = RENDER_RING,
|
||||
GEN_DEFAULT_PIPEOFFSETS,
|
||||
CURSOR_OFFSETS,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_i915g_info = {
|
||||
.gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .num_pipes = 2,
|
||||
.has_overlay = 1, .overlay_needs_physical = 1,
|
||||
.ring_mask = RENDER_RING,
|
||||
GEN_DEFAULT_PIPEOFFSETS,
|
||||
CURSOR_OFFSETS,
|
||||
};
|
||||
static const struct intel_device_info intel_i915gm_info = {
|
||||
.gen = 3, .is_mobile = 1, .num_pipes = 2,
|
||||
.cursor_needs_physical = 1,
|
||||
.has_overlay = 1, .overlay_needs_physical = 1,
|
||||
.supports_tv = 1,
|
||||
.has_fbc = 1,
|
||||
.ring_mask = RENDER_RING,
|
||||
GEN_DEFAULT_PIPEOFFSETS,
|
||||
CURSOR_OFFSETS,
|
||||
};
|
||||
static const struct intel_device_info intel_i945g_info = {
|
||||
.gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .num_pipes = 2,
|
||||
.has_overlay = 1, .overlay_needs_physical = 1,
|
||||
.ring_mask = RENDER_RING,
|
||||
GEN_DEFAULT_PIPEOFFSETS,
|
||||
CURSOR_OFFSETS,
|
||||
};
|
||||
static const struct intel_device_info intel_i945gm_info = {
|
||||
.gen = 3, .is_i945gm = 1, .is_mobile = 1, .num_pipes = 2,
|
||||
.has_hotplug = 1, .cursor_needs_physical = 1,
|
||||
.has_overlay = 1, .overlay_needs_physical = 1,
|
||||
.supports_tv = 1,
|
||||
.has_fbc = 1,
|
||||
.ring_mask = RENDER_RING,
|
||||
GEN_DEFAULT_PIPEOFFSETS,
|
||||
CURSOR_OFFSETS,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_i965g_info = {
|
||||
.gen = 4, .is_broadwater = 1, .num_pipes = 2,
|
||||
.has_hotplug = 1,
|
||||
.has_overlay = 1,
|
||||
.ring_mask = RENDER_RING,
|
||||
GEN_DEFAULT_PIPEOFFSETS,
|
||||
CURSOR_OFFSETS,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_i965gm_info = {
|
||||
.gen = 4, .is_crestline = 1, .num_pipes = 2,
|
||||
.is_mobile = 1, .has_fbc = 1, .has_hotplug = 1,
|
||||
.has_overlay = 1,
|
||||
.supports_tv = 1,
|
||||
.ring_mask = RENDER_RING,
|
||||
GEN_DEFAULT_PIPEOFFSETS,
|
||||
CURSOR_OFFSETS,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_g33_info = {
|
||||
.gen = 3, .is_g33 = 1, .num_pipes = 2,
|
||||
.need_gfx_hws = 1, .has_hotplug = 1,
|
||||
.has_overlay = 1,
|
||||
.ring_mask = RENDER_RING,
|
||||
GEN_DEFAULT_PIPEOFFSETS,
|
||||
CURSOR_OFFSETS,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_g45_info = {
|
||||
.gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .num_pipes = 2,
|
||||
.has_pipe_cxsr = 1, .has_hotplug = 1,
|
||||
.ring_mask = RENDER_RING | BSD_RING,
|
||||
GEN_DEFAULT_PIPEOFFSETS,
|
||||
CURSOR_OFFSETS,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_gm45_info = {
|
||||
.gen = 4, .is_g4x = 1, .num_pipes = 2,
|
||||
.is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1,
|
||||
.has_pipe_cxsr = 1, .has_hotplug = 1,
|
||||
.supports_tv = 1,
|
||||
.ring_mask = RENDER_RING | BSD_RING,
|
||||
GEN_DEFAULT_PIPEOFFSETS,
|
||||
CURSOR_OFFSETS,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_pineview_info = {
|
||||
.gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .num_pipes = 2,
|
||||
.need_gfx_hws = 1, .has_hotplug = 1,
|
||||
.has_overlay = 1,
|
||||
GEN_DEFAULT_PIPEOFFSETS,
|
||||
CURSOR_OFFSETS,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_ironlake_d_info = {
|
||||
.gen = 5, .num_pipes = 2,
|
||||
.need_gfx_hws = 1, .has_hotplug = 1,
|
||||
.ring_mask = RENDER_RING | BSD_RING,
|
||||
GEN_DEFAULT_PIPEOFFSETS,
|
||||
CURSOR_OFFSETS,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_ironlake_m_info = {
|
||||
.gen = 5, .is_mobile = 1, .num_pipes = 2,
|
||||
.need_gfx_hws = 1, .has_hotplug = 1,
|
||||
.has_fbc = 1,
|
||||
.ring_mask = RENDER_RING | BSD_RING,
|
||||
GEN_DEFAULT_PIPEOFFSETS,
|
||||
CURSOR_OFFSETS,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_sandybridge_d_info = {
|
||||
.gen = 6, .num_pipes = 2,
|
||||
.need_gfx_hws = 1, .has_hotplug = 1,
|
||||
.has_fbc = 1,
|
||||
.ring_mask = RENDER_RING | BSD_RING | BLT_RING,
|
||||
.has_llc = 1,
|
||||
GEN_DEFAULT_PIPEOFFSETS,
|
||||
CURSOR_OFFSETS,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_sandybridge_m_info = {
|
||||
.gen = 6, .is_mobile = 1, .num_pipes = 2,
|
||||
.need_gfx_hws = 1, .has_hotplug = 1,
|
||||
.has_fbc = 1,
|
||||
.ring_mask = RENDER_RING | BSD_RING | BLT_RING,
|
||||
.has_llc = 1,
|
||||
GEN_DEFAULT_PIPEOFFSETS,
|
||||
CURSOR_OFFSETS,
|
||||
};
|
||||
|
||||
#define GEN7_FEATURES \
|
||||
.gen = 7, .num_pipes = 3, \
|
||||
.need_gfx_hws = 1, .has_hotplug = 1, \
|
||||
.has_fbc = 1, \
|
||||
.ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
|
||||
.has_llc = 1, \
|
||||
GEN_DEFAULT_PIPEOFFSETS, \
|
||||
IVB_CURSOR_OFFSETS
|
||||
|
||||
static const struct intel_device_info intel_ivybridge_d_info = {
|
||||
GEN7_FEATURES,
|
||||
.is_ivybridge = 1,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_ivybridge_m_info = {
|
||||
GEN7_FEATURES,
|
||||
.is_ivybridge = 1,
|
||||
.is_mobile = 1,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_ivybridge_q_info = {
|
||||
GEN7_FEATURES,
|
||||
.is_ivybridge = 1,
|
||||
.num_pipes = 0, /* legal, last one wins */
|
||||
};
|
||||
|
||||
#define VLV_FEATURES \
|
||||
.gen = 7, .num_pipes = 2, \
|
||||
.need_gfx_hws = 1, .has_hotplug = 1, \
|
||||
.ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
|
||||
.display_mmio_offset = VLV_DISPLAY_BASE, \
|
||||
GEN_DEFAULT_PIPEOFFSETS, \
|
||||
CURSOR_OFFSETS
|
||||
|
||||
static const struct intel_device_info intel_valleyview_m_info = {
|
||||
VLV_FEATURES,
|
||||
.is_valleyview = 1,
|
||||
.is_mobile = 1,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_valleyview_d_info = {
|
||||
VLV_FEATURES,
|
||||
.is_valleyview = 1,
|
||||
};
|
||||
|
||||
#define HSW_FEATURES \
|
||||
GEN7_FEATURES, \
|
||||
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, \
|
||||
.has_ddi = 1, \
|
||||
.has_fpga_dbg = 1
|
||||
|
||||
static const struct intel_device_info intel_haswell_d_info = {
|
||||
HSW_FEATURES,
|
||||
.is_haswell = 1,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_haswell_m_info = {
|
||||
HSW_FEATURES,
|
||||
.is_haswell = 1,
|
||||
.is_mobile = 1,
|
||||
};
|
||||
|
||||
#define BDW_FEATURES \
|
||||
HSW_FEATURES, \
|
||||
BDW_COLORS
|
||||
|
||||
static const struct intel_device_info intel_broadwell_d_info = {
|
||||
BDW_FEATURES,
|
||||
.gen = 8,
|
||||
.is_broadwell = 1,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_broadwell_m_info = {
|
||||
BDW_FEATURES,
|
||||
.gen = 8, .is_mobile = 1,
|
||||
.is_broadwell = 1,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_broadwell_gt3d_info = {
|
||||
BDW_FEATURES,
|
||||
.gen = 8,
|
||||
.is_broadwell = 1,
|
||||
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_broadwell_gt3m_info = {
|
||||
BDW_FEATURES,
|
||||
.gen = 8, .is_mobile = 1,
|
||||
.is_broadwell = 1,
|
||||
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_cherryview_info = {
|
||||
.gen = 8, .num_pipes = 3,
|
||||
.need_gfx_hws = 1, .has_hotplug = 1,
|
||||
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
|
||||
.is_cherryview = 1,
|
||||
.display_mmio_offset = VLV_DISPLAY_BASE,
|
||||
GEN_CHV_PIPEOFFSETS,
|
||||
CURSOR_OFFSETS,
|
||||
CHV_COLORS,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_skylake_info = {
|
||||
BDW_FEATURES,
|
||||
.is_skylake = 1,
|
||||
.gen = 9,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_skylake_gt3_info = {
|
||||
BDW_FEATURES,
|
||||
.is_skylake = 1,
|
||||
.gen = 9,
|
||||
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_broxton_info = {
|
||||
.is_broxton = 1,
|
||||
.gen = 9,
|
||||
.need_gfx_hws = 1, .has_hotplug = 1,
|
||||
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
|
||||
.num_pipes = 3,
|
||||
.has_ddi = 1,
|
||||
.has_fpga_dbg = 1,
|
||||
.has_fbc = 1,
|
||||
.has_pooled_eu = 0,
|
||||
GEN_DEFAULT_PIPEOFFSETS,
|
||||
IVB_CURSOR_OFFSETS,
|
||||
BDW_COLORS,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_kabylake_info = {
|
||||
BDW_FEATURES,
|
||||
.is_kabylake = 1,
|
||||
.gen = 9,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_kabylake_gt3_info = {
|
||||
BDW_FEATURES,
|
||||
.is_kabylake = 1,
|
||||
.gen = 9,
|
||||
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
|
||||
};
|
||||
|
||||
/*
|
||||
* Make sure any device matches here are from most specific to most
|
||||
* general. For example, since the Quanta match is based on the subsystem
|
||||
* and subvendor IDs, we need it to come before the more general IVB
|
||||
* PCI ID matches, otherwise we'll use the wrong info struct above.
|
||||
*/
|
||||
static const struct pci_device_id pciidlist[] = {
|
||||
INTEL_I830_IDS(&intel_i830_info),
|
||||
INTEL_I845G_IDS(&intel_845g_info),
|
||||
INTEL_I85X_IDS(&intel_i85x_info),
|
||||
INTEL_I865G_IDS(&intel_i865g_info),
|
||||
INTEL_I915G_IDS(&intel_i915g_info),
|
||||
INTEL_I915GM_IDS(&intel_i915gm_info),
|
||||
INTEL_I945G_IDS(&intel_i945g_info),
|
||||
INTEL_I945GM_IDS(&intel_i945gm_info),
|
||||
INTEL_I965G_IDS(&intel_i965g_info),
|
||||
INTEL_G33_IDS(&intel_g33_info),
|
||||
INTEL_I965GM_IDS(&intel_i965gm_info),
|
||||
INTEL_GM45_IDS(&intel_gm45_info),
|
||||
INTEL_G45_IDS(&intel_g45_info),
|
||||
INTEL_PINEVIEW_IDS(&intel_pineview_info),
|
||||
INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info),
|
||||
INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info),
|
||||
INTEL_SNB_D_IDS(&intel_sandybridge_d_info),
|
||||
INTEL_SNB_M_IDS(&intel_sandybridge_m_info),
|
||||
INTEL_IVB_Q_IDS(&intel_ivybridge_q_info), /* must be first IVB */
|
||||
INTEL_IVB_M_IDS(&intel_ivybridge_m_info),
|
||||
INTEL_IVB_D_IDS(&intel_ivybridge_d_info),
|
||||
INTEL_HSW_D_IDS(&intel_haswell_d_info),
|
||||
INTEL_HSW_M_IDS(&intel_haswell_m_info),
|
||||
INTEL_VLV_M_IDS(&intel_valleyview_m_info),
|
||||
INTEL_VLV_D_IDS(&intel_valleyview_d_info),
|
||||
INTEL_BDW_GT12M_IDS(&intel_broadwell_m_info),
|
||||
INTEL_BDW_GT12D_IDS(&intel_broadwell_d_info),
|
||||
INTEL_BDW_GT3M_IDS(&intel_broadwell_gt3m_info),
|
||||
INTEL_BDW_GT3D_IDS(&intel_broadwell_gt3d_info),
|
||||
INTEL_CHV_IDS(&intel_cherryview_info),
|
||||
INTEL_SKL_GT1_IDS(&intel_skylake_info),
|
||||
INTEL_SKL_GT2_IDS(&intel_skylake_info),
|
||||
INTEL_SKL_GT3_IDS(&intel_skylake_gt3_info),
|
||||
INTEL_SKL_GT4_IDS(&intel_skylake_gt3_info),
|
||||
INTEL_BXT_IDS(&intel_broxton_info),
|
||||
INTEL_KBL_GT1_IDS(&intel_kabylake_info),
|
||||
INTEL_KBL_GT2_IDS(&intel_kabylake_info),
|
||||
INTEL_KBL_GT3_IDS(&intel_kabylake_gt3_info),
|
||||
INTEL_KBL_GT4_IDS(&intel_kabylake_gt3_info),
|
||||
{0, 0, 0}
|
||||
};
|
||||
MODULE_DEVICE_TABLE(pci, pciidlist);
|
||||
|
||||
extern int i915_driver_load(struct pci_dev *pdev,
|
||||
const struct pci_device_id *ent);
|
||||
|
||||
static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
{
|
||||
struct intel_device_info *intel_info =
|
||||
(struct intel_device_info *) ent->driver_data;
|
||||
|
||||
if (IS_PRELIMINARY_HW(intel_info) && !i915.preliminary_hw_support) {
|
||||
DRM_INFO("This hardware requires preliminary hardware support.\n"
|
||||
"See CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT, and/or modparam preliminary_hw_support\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
/* Only bind to function 0 of the device. Early generations
|
||||
* used function 1 as a placeholder for multi-head. This causes
|
||||
* us confusion instead, especially on the systems where both
|
||||
* functions have the same PCI-ID!
|
||||
*/
|
||||
if (PCI_FUNC(pdev->devfn))
|
||||
return -ENODEV;
|
||||
|
||||
/*
|
||||
* apple-gmux is needed on dual GPU MacBook Pro
|
||||
* to probe the panel if we're the inactive GPU.
|
||||
*/
|
||||
if (vga_switcheroo_client_probe_defer(pdev))
|
||||
return -EPROBE_DEFER;
|
||||
|
||||
return i915_driver_load(pdev, ent);
|
||||
}
|
||||
|
||||
extern void i915_driver_unload(struct drm_device *dev);
|
||||
|
||||
static void i915_pci_remove(struct pci_dev *pdev)
|
||||
{
|
||||
struct drm_device *dev = pci_get_drvdata(pdev);
|
||||
|
||||
i915_driver_unload(dev);
|
||||
drm_dev_unref(dev);
|
||||
}
|
||||
|
||||
extern const struct dev_pm_ops i915_pm_ops;
|
||||
|
||||
static struct pci_driver i915_pci_driver = {
|
||||
.name = DRIVER_NAME,
|
||||
.id_table = pciidlist,
|
||||
.probe = i915_pci_probe,
|
||||
.remove = i915_pci_remove,
|
||||
.driver.pm = &i915_pm_ops,
|
||||
};
|
||||
|
||||
static int __init i915_init(void)
|
||||
{
|
||||
bool use_kms = true;
|
||||
|
||||
/*
|
||||
* Enable KMS by default, unless explicitly overriden by
|
||||
* either the i915.modeset prarameter or by the
|
||||
* vga_text_mode_force boot option.
|
||||
*/
|
||||
|
||||
if (i915.modeset == 0)
|
||||
use_kms = false;
|
||||
|
||||
if (vgacon_text_force() && i915.modeset == -1)
|
||||
use_kms = false;
|
||||
|
||||
if (!use_kms) {
|
||||
/* Silently fail loading to not upset userspace. */
|
||||
DRM_DEBUG_DRIVER("KMS disabled.\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
return pci_register_driver(&i915_pci_driver);
|
||||
}
|
||||
|
||||
static void __exit i915_exit(void)
|
||||
{
|
||||
if (!i915_pci_driver.driver.owner)
|
||||
return;
|
||||
|
||||
pci_unregister_driver(&i915_pci_driver);
|
||||
}
|
||||
|
||||
module_init(i915_init);
|
||||
module_exit(i915_exit);
|
||||
|
||||
MODULE_AUTHOR("Tungsten Graphics, Inc.");
|
||||
MODULE_AUTHOR("Intel Corporation");
|
||||
|
||||
MODULE_DESCRIPTION(DRIVER_DESC);
|
||||
MODULE_LICENSE("GPL and additional rights");
|
|
@ -7070,7 +7070,8 @@ enum {
|
|||
#define GEN6_RPDEUC _MMIO(0xA084)
|
||||
#define GEN6_RPDEUCSW _MMIO(0xA088)
|
||||
#define GEN6_RC_STATE _MMIO(0xA094)
|
||||
#define RC6_STATE (1 << 18)
|
||||
#define RC_SW_TARGET_STATE_SHIFT 16
|
||||
#define RC_SW_TARGET_STATE_MASK (7 << RC_SW_TARGET_STATE_SHIFT)
|
||||
#define GEN6_RC1_WAKE_RATE_LIMIT _MMIO(0xA098)
|
||||
#define GEN6_RC6_WAKE_RATE_LIMIT _MMIO(0xA09C)
|
||||
#define GEN6_RC6pp_WAKE_RATE_LIMIT _MMIO(0xA0A0)
|
||||
|
@ -7085,12 +7086,16 @@ enum {
|
|||
#define GEN6_RC6pp_THRESHOLD _MMIO(0xA0C0)
|
||||
#define GEN6_PMINTRMSK _MMIO(0xA168)
|
||||
#define GEN8_PMINTR_REDIRECT_TO_NON_DISP (1<<31)
|
||||
#define GEN8_MISC_CTRL0 _MMIO(0xA180)
|
||||
#define VLV_PWRDWNUPCTL _MMIO(0xA294)
|
||||
#define GEN9_MEDIA_PG_IDLE_HYSTERESIS _MMIO(0xA0C4)
|
||||
#define GEN9_RENDER_PG_IDLE_HYSTERESIS _MMIO(0xA0C8)
|
||||
#define GEN9_PG_ENABLE _MMIO(0xA210)
|
||||
#define GEN9_RENDER_PG_ENABLE (1<<0)
|
||||
#define GEN9_MEDIA_PG_ENABLE (1<<1)
|
||||
#define GEN8_PUSHBUS_CONTROL _MMIO(0xA248)
|
||||
#define GEN8_PUSHBUS_ENABLE _MMIO(0xA250)
|
||||
#define GEN8_PUSHBUS_SHIFT _MMIO(0xA25C)
|
||||
|
||||
#define VLV_CHICKEN_3 _MMIO(VLV_DISPLAY_BASE + 0x7040C)
|
||||
#define PIXEL_OVERLAP_CNT_MASK (3 << 30)
|
||||
|
|
|
@ -31,7 +31,7 @@
|
|||
|
||||
static void i915_save_display(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
|
||||
/* Display arbitration control */
|
||||
if (INTEL_INFO(dev)->gen <= 4)
|
||||
|
@ -63,7 +63,7 @@ static void i915_save_display(struct drm_device *dev)
|
|||
|
||||
static void i915_restore_display(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
u32 mask = 0xffffffff;
|
||||
|
||||
/* Display arbitration */
|
||||
|
@ -103,7 +103,7 @@ static void i915_restore_display(struct drm_device *dev)
|
|||
|
||||
int i915_save_state(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
int i;
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
|
@ -148,7 +148,7 @@ int i915_save_state(struct drm_device *dev)
|
|||
|
||||
int i915_restore_state(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
int i;
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
|
|
|
@ -38,7 +38,7 @@
|
|||
static u32 calc_residency(struct drm_device *dev,
|
||||
i915_reg_t reg)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
u64 raw_time; /* 32b value may overflow during fixed point math */
|
||||
u64 units = 128ULL, div = 100000ULL;
|
||||
u32 ret;
|
||||
|
@ -166,7 +166,7 @@ i915_l3_read(struct file *filp, struct kobject *kobj,
|
|||
struct device *dev = kobj_to_dev(kobj);
|
||||
struct drm_minor *dminor = dev_to_drm_minor(dev);
|
||||
struct drm_device *drm_dev = dminor->dev;
|
||||
struct drm_i915_private *dev_priv = drm_dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(drm_dev);
|
||||
int slice = (int)(uintptr_t)attr->private;
|
||||
int ret;
|
||||
|
||||
|
@ -202,7 +202,7 @@ i915_l3_write(struct file *filp, struct kobject *kobj,
|
|||
struct device *dev = kobj_to_dev(kobj);
|
||||
struct drm_minor *dminor = dev_to_drm_minor(dev);
|
||||
struct drm_device *drm_dev = dminor->dev;
|
||||
struct drm_i915_private *dev_priv = drm_dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(drm_dev);
|
||||
struct i915_gem_context *ctx;
|
||||
u32 *temp = NULL; /* Just here to make handling failures easy */
|
||||
int slice = (int)(uintptr_t)attr->private;
|
||||
|
@ -227,13 +227,6 @@ i915_l3_write(struct file *filp, struct kobject *kobj,
|
|||
}
|
||||
}
|
||||
|
||||
ret = i915_gpu_idle(drm_dev);
|
||||
if (ret) {
|
||||
kfree(temp);
|
||||
mutex_unlock(&drm_dev->struct_mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* TODO: Ideally we really want a GPU reset here to make sure errors
|
||||
* aren't propagated. Since I cannot find a stable way to reset the GPU
|
||||
* at this point it is left as a TODO.
|
||||
|
@ -275,7 +268,7 @@ static ssize_t gt_act_freq_mhz_show(struct device *kdev,
|
|||
{
|
||||
struct drm_minor *minor = dev_to_drm_minor(kdev);
|
||||
struct drm_device *dev = minor->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
int ret;
|
||||
|
||||
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
|
||||
|
@ -309,7 +302,7 @@ static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
|
|||
{
|
||||
struct drm_minor *minor = dev_to_drm_minor(kdev);
|
||||
struct drm_device *dev = minor->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
int ret;
|
||||
|
||||
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
|
||||
|
@ -330,7 +323,7 @@ static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
|
|||
{
|
||||
struct drm_minor *minor = dev_to_drm_minor(kdev);
|
||||
struct drm_device *dev = minor->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE,
|
||||
"%d\n",
|
||||
|
@ -341,7 +334,7 @@ static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute
|
|||
{
|
||||
struct drm_minor *minor = dev_to_drm_minor(kdev);
|
||||
struct drm_device *dev = minor->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
int ret;
|
||||
|
||||
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
|
||||
|
@ -359,7 +352,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
|
|||
{
|
||||
struct drm_minor *minor = dev_to_drm_minor(kdev);
|
||||
struct drm_device *dev = minor->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
u32 val;
|
||||
ssize_t ret;
|
||||
|
||||
|
@ -409,7 +402,7 @@ static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute
|
|||
{
|
||||
struct drm_minor *minor = dev_to_drm_minor(kdev);
|
||||
struct drm_device *dev = minor->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
int ret;
|
||||
|
||||
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
|
||||
|
@ -427,7 +420,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
|
|||
{
|
||||
struct drm_minor *minor = dev_to_drm_minor(kdev);
|
||||
struct drm_device *dev = minor->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
u32 val;
|
||||
ssize_t ret;
|
||||
|
||||
|
@ -487,7 +480,7 @@ static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr
|
|||
{
|
||||
struct drm_minor *minor = dev_to_drm_minor(kdev);
|
||||
struct drm_device *dev = minor->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
u32 val;
|
||||
|
||||
if (attr == &dev_attr_gt_RP0_freq_mhz)
|
||||
|
|
|
@ -118,7 +118,7 @@ TRACE_EVENT(i915_gem_shrink,
|
|||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->dev = i915->dev->primary->index;
|
||||
__entry->dev = i915->drm.primary->index;
|
||||
__entry->target = target;
|
||||
__entry->flags = flags;
|
||||
),
|
||||
|
@ -462,7 +462,7 @@ TRACE_EVENT(i915_gem_ring_sync_to,
|
|||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->dev = from->i915->dev->primary->index;
|
||||
__entry->dev = from->i915->drm.primary->index;
|
||||
__entry->sync_from = from->id;
|
||||
__entry->sync_to = to_req->engine->id;
|
||||
__entry->seqno = i915_gem_request_get_seqno(req);
|
||||
|
@ -486,11 +486,11 @@ TRACE_EVENT(i915_gem_ring_dispatch,
|
|||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->dev = req->i915->dev->primary->index;
|
||||
__entry->dev = req->i915->drm.primary->index;
|
||||
__entry->ring = req->engine->id;
|
||||
__entry->seqno = req->seqno;
|
||||
__entry->flags = flags;
|
||||
i915_trace_irq_get(req->engine, req);
|
||||
intel_engine_enable_signaling(req);
|
||||
),
|
||||
|
||||
TP_printk("dev=%u, ring=%u, seqno=%u, flags=%x",
|
||||
|
@ -509,7 +509,7 @@ TRACE_EVENT(i915_gem_ring_flush,
|
|||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->dev = req->i915->dev->primary->index;
|
||||
__entry->dev = req->i915->drm.primary->index;
|
||||
__entry->ring = req->engine->id;
|
||||
__entry->invalidate = invalidate;
|
||||
__entry->flush = flush;
|
||||
|
@ -531,7 +531,7 @@ DECLARE_EVENT_CLASS(i915_gem_request,
|
|||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->dev = req->i915->dev->primary->index;
|
||||
__entry->dev = req->i915->drm.primary->index;
|
||||
__entry->ring = req->engine->id;
|
||||
__entry->seqno = req->seqno;
|
||||
),
|
||||
|
@ -556,9 +556,9 @@ TRACE_EVENT(i915_gem_request_notify,
|
|||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->dev = engine->i915->dev->primary->index;
|
||||
__entry->dev = engine->i915->drm.primary->index;
|
||||
__entry->ring = engine->id;
|
||||
__entry->seqno = engine->get_seqno(engine);
|
||||
__entry->seqno = intel_engine_get_seqno(engine);
|
||||
),
|
||||
|
||||
TP_printk("dev=%u, ring=%u, seqno=%u",
|
||||
|
@ -593,11 +593,11 @@ TRACE_EVENT(i915_gem_request_wait_begin,
|
|||
* less desirable.
|
||||
*/
|
||||
TP_fast_assign(
|
||||
__entry->dev = req->i915->dev->primary->index;
|
||||
__entry->dev = req->i915->drm.primary->index;
|
||||
__entry->ring = req->engine->id;
|
||||
__entry->seqno = req->seqno;
|
||||
__entry->blocking =
|
||||
mutex_is_locked(&req->i915->dev->struct_mutex);
|
||||
mutex_is_locked(&req->i915->drm.struct_mutex);
|
||||
),
|
||||
|
||||
TP_printk("dev=%u, ring=%u, seqno=%u, blocking=%s",
|
||||
|
@ -746,7 +746,7 @@ DECLARE_EVENT_CLASS(i915_context,
|
|||
TP_fast_assign(
|
||||
__entry->ctx = ctx;
|
||||
__entry->vm = ctx->ppgtt ? &ctx->ppgtt->base : NULL;
|
||||
__entry->dev = ctx->i915->dev->primary->index;
|
||||
__entry->dev = ctx->i915->drm.primary->index;
|
||||
),
|
||||
|
||||
TP_printk("dev=%u, ctx=%p, ctx_vm=%p",
|
||||
|
@ -786,7 +786,7 @@ TRACE_EVENT(switch_mm,
|
|||
__entry->ring = engine->id;
|
||||
__entry->to = to;
|
||||
__entry->vm = to->ppgtt? &to->ppgtt->base : NULL;
|
||||
__entry->dev = engine->i915->dev->primary->index;
|
||||
__entry->dev = engine->i915->drm.primary->index;
|
||||
),
|
||||
|
||||
TP_printk("dev=%u, ring=%u, ctx=%p, ctx_vm=%p",
|
||||
|
|
|
@ -154,7 +154,7 @@ static bool audio_rate_need_prog(struct intel_crtc *crtc,
|
|||
{
|
||||
if (((mode->clock == TMDS_297M) ||
|
||||
(mode->clock == TMDS_296M)) &&
|
||||
intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI))
|
||||
intel_crtc_has_type(crtc->config, INTEL_OUTPUT_HDMI))
|
||||
return true;
|
||||
else
|
||||
return false;
|
||||
|
@ -165,7 +165,7 @@ static bool intel_eld_uptodate(struct drm_connector *connector,
|
|||
i915_reg_t reg_elda, uint32_t bits_elda,
|
||||
i915_reg_t reg_edid)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = connector->dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(connector->dev);
|
||||
uint8_t *eld = connector->eld;
|
||||
uint32_t tmp;
|
||||
int i;
|
||||
|
@ -189,7 +189,7 @@ static bool intel_eld_uptodate(struct drm_connector *connector,
|
|||
|
||||
static void g4x_audio_codec_disable(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
uint32_t eldv, tmp;
|
||||
|
||||
DRM_DEBUG_KMS("Disable audio codec\n");
|
||||
|
@ -210,7 +210,7 @@ static void g4x_audio_codec_enable(struct drm_connector *connector,
|
|||
struct intel_encoder *encoder,
|
||||
const struct drm_display_mode *adjusted_mode)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = connector->dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(connector->dev);
|
||||
uint8_t *eld = connector->eld;
|
||||
uint32_t eldv;
|
||||
uint32_t tmp;
|
||||
|
@ -247,7 +247,7 @@ static void g4x_audio_codec_enable(struct drm_connector *connector,
|
|||
|
||||
static void hsw_audio_codec_disable(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
|
||||
enum pipe pipe = intel_crtc->pipe;
|
||||
uint32_t tmp;
|
||||
|
@ -262,7 +262,7 @@ static void hsw_audio_codec_disable(struct intel_encoder *encoder)
|
|||
tmp |= AUD_CONFIG_N_PROG_ENABLE;
|
||||
tmp &= ~AUD_CONFIG_UPPER_N_MASK;
|
||||
tmp &= ~AUD_CONFIG_LOWER_N_MASK;
|
||||
if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT))
|
||||
if (intel_crtc_has_dp_encoder(intel_crtc->config))
|
||||
tmp |= AUD_CONFIG_N_VALUE_INDEX;
|
||||
I915_WRITE(HSW_AUD_CFG(pipe), tmp);
|
||||
|
||||
|
@ -279,7 +279,7 @@ static void hsw_audio_codec_enable(struct drm_connector *connector,
|
|||
struct intel_encoder *encoder,
|
||||
const struct drm_display_mode *adjusted_mode)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = connector->dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(connector->dev);
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
|
||||
enum pipe pipe = intel_crtc->pipe;
|
||||
struct i915_audio_component *acomp = dev_priv->audio_component;
|
||||
|
@ -328,7 +328,7 @@ static void hsw_audio_codec_enable(struct drm_connector *connector,
|
|||
tmp = I915_READ(HSW_AUD_CFG(pipe));
|
||||
tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
|
||||
tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK;
|
||||
if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT))
|
||||
if (intel_crtc_has_dp_encoder(intel_crtc->config))
|
||||
tmp |= AUD_CONFIG_N_VALUE_INDEX;
|
||||
else
|
||||
tmp |= audio_config_hdmi_pixel_clock(adjusted_mode);
|
||||
|
@ -357,7 +357,7 @@ static void hsw_audio_codec_enable(struct drm_connector *connector,
|
|||
|
||||
static void ilk_audio_codec_disable(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
|
||||
struct intel_digital_port *intel_dig_port =
|
||||
enc_to_dig_port(&encoder->base);
|
||||
|
@ -389,7 +389,7 @@ static void ilk_audio_codec_disable(struct intel_encoder *encoder)
|
|||
tmp |= AUD_CONFIG_N_PROG_ENABLE;
|
||||
tmp &= ~AUD_CONFIG_UPPER_N_MASK;
|
||||
tmp &= ~AUD_CONFIG_LOWER_N_MASK;
|
||||
if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT))
|
||||
if (intel_crtc_has_dp_encoder(intel_crtc->config))
|
||||
tmp |= AUD_CONFIG_N_VALUE_INDEX;
|
||||
I915_WRITE(aud_config, tmp);
|
||||
|
||||
|
@ -405,7 +405,7 @@ static void ilk_audio_codec_enable(struct drm_connector *connector,
|
|||
struct intel_encoder *encoder,
|
||||
const struct drm_display_mode *adjusted_mode)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = connector->dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(connector->dev);
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
|
||||
struct intel_digital_port *intel_dig_port =
|
||||
enc_to_dig_port(&encoder->base);
|
||||
|
@ -475,7 +475,7 @@ static void ilk_audio_codec_enable(struct drm_connector *connector,
|
|||
tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
|
||||
tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
|
||||
tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK;
|
||||
if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT))
|
||||
if (intel_crtc_has_dp_encoder(intel_crtc->config))
|
||||
tmp |= AUD_CONFIG_N_VALUE_INDEX;
|
||||
else
|
||||
tmp |= audio_config_hdmi_pixel_clock(adjusted_mode);
|
||||
|
@ -496,7 +496,7 @@ void intel_audio_codec_enable(struct intel_encoder *intel_encoder)
|
|||
const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
|
||||
struct drm_connector *connector;
|
||||
struct drm_device *dev = encoder->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct i915_audio_component *acomp = dev_priv->audio_component;
|
||||
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
|
||||
enum port port = intel_dig_port->port;
|
||||
|
@ -513,7 +513,7 @@ void intel_audio_codec_enable(struct intel_encoder *intel_encoder)
|
|||
|
||||
/* ELD Conn_Type */
|
||||
connector->eld[5] &= ~(3 << 2);
|
||||
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
|
||||
if (intel_crtc_has_dp_encoder(crtc->config))
|
||||
connector->eld[5] |= (1 << 2);
|
||||
|
||||
connector->eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2;
|
||||
|
@ -543,7 +543,7 @@ void intel_audio_codec_disable(struct intel_encoder *intel_encoder)
|
|||
{
|
||||
struct drm_encoder *encoder = &intel_encoder->base;
|
||||
struct drm_device *dev = encoder->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct i915_audio_component *acomp = dev_priv->audio_component;
|
||||
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
|
||||
enum port port = intel_dig_port->port;
|
||||
|
@ -749,14 +749,14 @@ static int i915_audio_component_bind(struct device *i915_dev,
|
|||
if (WARN_ON(acomp->ops || acomp->dev))
|
||||
return -EEXIST;
|
||||
|
||||
drm_modeset_lock_all(dev_priv->dev);
|
||||
drm_modeset_lock_all(&dev_priv->drm);
|
||||
acomp->ops = &i915_audio_component_ops;
|
||||
acomp->dev = i915_dev;
|
||||
BUILD_BUG_ON(MAX_PORTS != I915_MAX_PORTS);
|
||||
for (i = 0; i < ARRAY_SIZE(acomp->aud_sample_rate); i++)
|
||||
acomp->aud_sample_rate[i] = 0;
|
||||
dev_priv->audio_component = acomp;
|
||||
drm_modeset_unlock_all(dev_priv->dev);
|
||||
drm_modeset_unlock_all(&dev_priv->drm);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -767,11 +767,11 @@ static void i915_audio_component_unbind(struct device *i915_dev,
|
|||
struct i915_audio_component *acomp = data;
|
||||
struct drm_i915_private *dev_priv = dev_to_i915(i915_dev);
|
||||
|
||||
drm_modeset_lock_all(dev_priv->dev);
|
||||
drm_modeset_lock_all(&dev_priv->drm);
|
||||
acomp->ops = NULL;
|
||||
acomp->dev = NULL;
|
||||
dev_priv->audio_component = NULL;
|
||||
drm_modeset_unlock_all(dev_priv->dev);
|
||||
drm_modeset_unlock_all(&dev_priv->drm);
|
||||
}
|
||||
|
||||
static const struct component_ops i915_audio_component_bind_ops = {
|
||||
|
@ -799,7 +799,7 @@ void i915_audio_component_init(struct drm_i915_private *dev_priv)
|
|||
{
|
||||
int ret;
|
||||
|
||||
ret = component_add(dev_priv->dev->dev, &i915_audio_component_bind_ops);
|
||||
ret = component_add(dev_priv->drm.dev, &i915_audio_component_bind_ops);
|
||||
if (ret < 0) {
|
||||
DRM_ERROR("failed to add audio component (%d)\n", ret);
|
||||
/* continue with reduced functionality */
|
||||
|
@ -821,6 +821,6 @@ void i915_audio_component_cleanup(struct drm_i915_private *dev_priv)
|
|||
if (!dev_priv->audio_component_registered)
|
||||
return;
|
||||
|
||||
component_del(dev_priv->dev->dev, &i915_audio_component_bind_ops);
|
||||
component_del(dev_priv->drm.dev, &i915_audio_component_bind_ops);
|
||||
dev_priv->audio_component_registered = false;
|
||||
}
|
||||
|
|
|
@ -1426,7 +1426,7 @@ static const struct vbt_header *find_vbt(void __iomem *bios, size_t size)
|
|||
int
|
||||
intel_bios_init(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct pci_dev *pdev = dev_priv->dev->pdev;
|
||||
struct pci_dev *pdev = dev_priv->drm.pdev;
|
||||
const struct vbt_header *vbt = dev_priv->opregion.vbt;
|
||||
const struct bdb_header *bdb;
|
||||
u8 __iomem *bios = NULL;
|
||||
|
|
|
@ -0,0 +1,586 @@
|
|||
/*
|
||||
* Copyright © 2015 Intel Corporation
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
* IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/kthread.h>
|
||||
|
||||
#include "i915_drv.h"
|
||||
|
||||
static void intel_breadcrumbs_fake_irq(unsigned long data)
|
||||
{
|
||||
struct intel_engine_cs *engine = (struct intel_engine_cs *)data;
|
||||
|
||||
/*
|
||||
* The timer persists in case we cannot enable interrupts,
|
||||
* or if we have previously seen seqno/interrupt incoherency
|
||||
* ("missed interrupt" syndrome). Here the worker will wake up
|
||||
* every jiffie in order to kick the oldest waiter to do the
|
||||
* coherent seqno check.
|
||||
*/
|
||||
rcu_read_lock();
|
||||
if (intel_engine_wakeup(engine))
|
||||
mod_timer(&engine->breadcrumbs.fake_irq, jiffies + 1);
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
static void irq_enable(struct intel_engine_cs *engine)
|
||||
{
|
||||
/* Enabling the IRQ may miss the generation of the interrupt, but
|
||||
* we still need to force the barrier before reading the seqno,
|
||||
* just in case.
|
||||
*/
|
||||
engine->breadcrumbs.irq_posted = true;
|
||||
|
||||
spin_lock_irq(&engine->i915->irq_lock);
|
||||
engine->irq_enable(engine);
|
||||
spin_unlock_irq(&engine->i915->irq_lock);
|
||||
}
|
||||
|
||||
static void irq_disable(struct intel_engine_cs *engine)
|
||||
{
|
||||
spin_lock_irq(&engine->i915->irq_lock);
|
||||
engine->irq_disable(engine);
|
||||
spin_unlock_irq(&engine->i915->irq_lock);
|
||||
|
||||
engine->breadcrumbs.irq_posted = false;
|
||||
}
|
||||
|
||||
static void __intel_breadcrumbs_enable_irq(struct intel_breadcrumbs *b)
|
||||
{
|
||||
struct intel_engine_cs *engine =
|
||||
container_of(b, struct intel_engine_cs, breadcrumbs);
|
||||
struct drm_i915_private *i915 = engine->i915;
|
||||
|
||||
assert_spin_locked(&b->lock);
|
||||
if (b->rpm_wakelock)
|
||||
return;
|
||||
|
||||
/* Since we are waiting on a request, the GPU should be busy
|
||||
* and should have its own rpm reference. For completeness,
|
||||
* record an rpm reference for ourselves to cover the
|
||||
* interrupt we unmask.
|
||||
*/
|
||||
intel_runtime_pm_get_noresume(i915);
|
||||
b->rpm_wakelock = true;
|
||||
|
||||
/* No interrupts? Kick the waiter every jiffie! */
|
||||
if (intel_irqs_enabled(i915)) {
|
||||
if (!test_bit(engine->id, &i915->gpu_error.test_irq_rings))
|
||||
irq_enable(engine);
|
||||
b->irq_enabled = true;
|
||||
}
|
||||
|
||||
if (!b->irq_enabled ||
|
||||
test_bit(engine->id, &i915->gpu_error.missed_irq_rings))
|
||||
mod_timer(&b->fake_irq, jiffies + 1);
|
||||
}
|
||||
|
||||
static void __intel_breadcrumbs_disable_irq(struct intel_breadcrumbs *b)
|
||||
{
|
||||
struct intel_engine_cs *engine =
|
||||
container_of(b, struct intel_engine_cs, breadcrumbs);
|
||||
|
||||
assert_spin_locked(&b->lock);
|
||||
if (!b->rpm_wakelock)
|
||||
return;
|
||||
|
||||
if (b->irq_enabled) {
|
||||
irq_disable(engine);
|
||||
b->irq_enabled = false;
|
||||
}
|
||||
|
||||
intel_runtime_pm_put(engine->i915);
|
||||
b->rpm_wakelock = false;
|
||||
}
|
||||
|
||||
static inline struct intel_wait *to_wait(struct rb_node *node)
|
||||
{
|
||||
return container_of(node, struct intel_wait, node);
|
||||
}
|
||||
|
||||
static inline void __intel_breadcrumbs_finish(struct intel_breadcrumbs *b,
|
||||
struct intel_wait *wait)
|
||||
{
|
||||
assert_spin_locked(&b->lock);
|
||||
|
||||
/* This request is completed, so remove it from the tree, mark it as
|
||||
* complete, and *then* wake up the associated task.
|
||||
*/
|
||||
rb_erase(&wait->node, &b->waiters);
|
||||
RB_CLEAR_NODE(&wait->node);
|
||||
|
||||
wake_up_process(wait->tsk); /* implicit smp_wmb() */
|
||||
}
|
||||
|
||||
static bool __intel_engine_add_wait(struct intel_engine_cs *engine,
|
||||
struct intel_wait *wait)
|
||||
{
|
||||
struct intel_breadcrumbs *b = &engine->breadcrumbs;
|
||||
struct rb_node **p, *parent, *completed;
|
||||
bool first;
|
||||
u32 seqno;
|
||||
|
||||
/* Insert the request into the retirement ordered list
|
||||
* of waiters by walking the rbtree. If we are the oldest
|
||||
* seqno in the tree (the first to be retired), then
|
||||
* set ourselves as the bottom-half.
|
||||
*
|
||||
* As we descend the tree, prune completed branches since we hold the
|
||||
* spinlock we know that the first_waiter must be delayed and can
|
||||
* reduce some of the sequential wake up latency if we take action
|
||||
* ourselves and wake up the completed tasks in parallel. Also, by
|
||||
* removing stale elements in the tree, we may be able to reduce the
|
||||
* ping-pong between the old bottom-half and ourselves as first-waiter.
|
||||
*/
|
||||
first = true;
|
||||
parent = NULL;
|
||||
completed = NULL;
|
||||
seqno = intel_engine_get_seqno(engine);
|
||||
|
||||
/* If the request completed before we managed to grab the spinlock,
|
||||
* return now before adding ourselves to the rbtree. We let the
|
||||
* current bottom-half handle any pending wakeups and instead
|
||||
* try and get out of the way quickly.
|
||||
*/
|
||||
if (i915_seqno_passed(seqno, wait->seqno)) {
|
||||
RB_CLEAR_NODE(&wait->node);
|
||||
return first;
|
||||
}
|
||||
|
||||
p = &b->waiters.rb_node;
|
||||
while (*p) {
|
||||
parent = *p;
|
||||
if (wait->seqno == to_wait(parent)->seqno) {
|
||||
/* We have multiple waiters on the same seqno, select
|
||||
* the highest priority task (that with the smallest
|
||||
* task->prio) to serve as the bottom-half for this
|
||||
* group.
|
||||
*/
|
||||
if (wait->tsk->prio > to_wait(parent)->tsk->prio) {
|
||||
p = &parent->rb_right;
|
||||
first = false;
|
||||
} else {
|
||||
p = &parent->rb_left;
|
||||
}
|
||||
} else if (i915_seqno_passed(wait->seqno,
|
||||
to_wait(parent)->seqno)) {
|
||||
p = &parent->rb_right;
|
||||
if (i915_seqno_passed(seqno, to_wait(parent)->seqno))
|
||||
completed = parent;
|
||||
else
|
||||
first = false;
|
||||
} else {
|
||||
p = &parent->rb_left;
|
||||
}
|
||||
}
|
||||
rb_link_node(&wait->node, parent, p);
|
||||
rb_insert_color(&wait->node, &b->waiters);
|
||||
GEM_BUG_ON(!first && !b->irq_seqno_bh);
|
||||
|
||||
if (completed) {
|
||||
struct rb_node *next = rb_next(completed);
|
||||
|
||||
GEM_BUG_ON(!next && !first);
|
||||
if (next && next != &wait->node) {
|
||||
GEM_BUG_ON(first);
|
||||
b->first_wait = to_wait(next);
|
||||
smp_store_mb(b->irq_seqno_bh, b->first_wait->tsk);
|
||||
/* As there is a delay between reading the current
|
||||
* seqno, processing the completed tasks and selecting
|
||||
* the next waiter, we may have missed the interrupt
|
||||
* and so need for the next bottom-half to wakeup.
|
||||
*
|
||||
* Also as we enable the IRQ, we may miss the
|
||||
* interrupt for that seqno, so we have to wake up
|
||||
* the next bottom-half in order to do a coherent check
|
||||
* in case the seqno passed.
|
||||
*/
|
||||
__intel_breadcrumbs_enable_irq(b);
|
||||
if (READ_ONCE(b->irq_posted))
|
||||
wake_up_process(to_wait(next)->tsk);
|
||||
}
|
||||
|
||||
do {
|
||||
struct intel_wait *crumb = to_wait(completed);
|
||||
completed = rb_prev(completed);
|
||||
__intel_breadcrumbs_finish(b, crumb);
|
||||
} while (completed);
|
||||
}
|
||||
|
||||
if (first) {
|
||||
GEM_BUG_ON(rb_first(&b->waiters) != &wait->node);
|
||||
b->first_wait = wait;
|
||||
smp_store_mb(b->irq_seqno_bh, wait->tsk);
|
||||
/* After assigning ourselves as the new bottom-half, we must
|
||||
* perform a cursory check to prevent a missed interrupt.
|
||||
* Either we miss the interrupt whilst programming the hardware,
|
||||
* or if there was a previous waiter (for a later seqno) they
|
||||
* may be woken instead of us (due to the inherent race
|
||||
* in the unlocked read of b->irq_seqno_bh in the irq handler)
|
||||
* and so we miss the wake up.
|
||||
*/
|
||||
__intel_breadcrumbs_enable_irq(b);
|
||||
}
|
||||
GEM_BUG_ON(!b->irq_seqno_bh);
|
||||
GEM_BUG_ON(!b->first_wait);
|
||||
GEM_BUG_ON(rb_first(&b->waiters) != &b->first_wait->node);
|
||||
|
||||
return first;
|
||||
}
|
||||
|
||||
bool intel_engine_add_wait(struct intel_engine_cs *engine,
|
||||
struct intel_wait *wait)
|
||||
{
|
||||
struct intel_breadcrumbs *b = &engine->breadcrumbs;
|
||||
bool first;
|
||||
|
||||
spin_lock(&b->lock);
|
||||
first = __intel_engine_add_wait(engine, wait);
|
||||
spin_unlock(&b->lock);
|
||||
|
||||
return first;
|
||||
}
|
||||
|
||||
void intel_engine_enable_fake_irq(struct intel_engine_cs *engine)
|
||||
{
|
||||
mod_timer(&engine->breadcrumbs.fake_irq, jiffies + 1);
|
||||
}
|
||||
|
||||
static inline bool chain_wakeup(struct rb_node *rb, int priority)
|
||||
{
|
||||
return rb && to_wait(rb)->tsk->prio <= priority;
|
||||
}
|
||||
|
||||
static inline int wakeup_priority(struct intel_breadcrumbs *b,
|
||||
struct task_struct *tsk)
|
||||
{
|
||||
if (tsk == b->signaler)
|
||||
return INT_MIN;
|
||||
else
|
||||
return tsk->prio;
|
||||
}
|
||||
|
||||
void intel_engine_remove_wait(struct intel_engine_cs *engine,
|
||||
struct intel_wait *wait)
|
||||
{
|
||||
struct intel_breadcrumbs *b = &engine->breadcrumbs;
|
||||
|
||||
/* Quick check to see if this waiter was already decoupled from
|
||||
* the tree by the bottom-half to avoid contention on the spinlock
|
||||
* by the herd.
|
||||
*/
|
||||
if (RB_EMPTY_NODE(&wait->node))
|
||||
return;
|
||||
|
||||
spin_lock(&b->lock);
|
||||
|
||||
if (RB_EMPTY_NODE(&wait->node))
|
||||
goto out_unlock;
|
||||
|
||||
if (b->first_wait == wait) {
|
||||
const int priority = wakeup_priority(b, wait->tsk);
|
||||
struct rb_node *next;
|
||||
|
||||
GEM_BUG_ON(b->irq_seqno_bh != wait->tsk);
|
||||
|
||||
/* We are the current bottom-half. Find the next candidate,
|
||||
* the first waiter in the queue on the remaining oldest
|
||||
* request. As multiple seqnos may complete in the time it
|
||||
* takes us to wake up and find the next waiter, we have to
|
||||
* wake up that waiter for it to perform its own coherent
|
||||
* completion check.
|
||||
*/
|
||||
next = rb_next(&wait->node);
|
||||
if (chain_wakeup(next, priority)) {
|
||||
/* If the next waiter is already complete,
|
||||
* wake it up and continue onto the next waiter. So
|
||||
* if have a small herd, they will wake up in parallel
|
||||
* rather than sequentially, which should reduce
|
||||
* the overall latency in waking all the completed
|
||||
* clients.
|
||||
*
|
||||
* However, waking up a chain adds extra latency to
|
||||
* the first_waiter. This is undesirable if that
|
||||
* waiter is a high priority task.
|
||||
*/
|
||||
u32 seqno = intel_engine_get_seqno(engine);
|
||||
|
||||
while (i915_seqno_passed(seqno, to_wait(next)->seqno)) {
|
||||
struct rb_node *n = rb_next(next);
|
||||
|
||||
__intel_breadcrumbs_finish(b, to_wait(next));
|
||||
next = n;
|
||||
if (!chain_wakeup(next, priority))
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (next) {
|
||||
/* In our haste, we may have completed the first waiter
|
||||
* before we enabled the interrupt. Do so now as we
|
||||
* have a second waiter for a future seqno. Afterwards,
|
||||
* we have to wake up that waiter in case we missed
|
||||
* the interrupt, or if we have to handle an
|
||||
* exception rather than a seqno completion.
|
||||
*/
|
||||
b->first_wait = to_wait(next);
|
||||
smp_store_mb(b->irq_seqno_bh, b->first_wait->tsk);
|
||||
if (b->first_wait->seqno != wait->seqno)
|
||||
__intel_breadcrumbs_enable_irq(b);
|
||||
wake_up_process(b->irq_seqno_bh);
|
||||
} else {
|
||||
b->first_wait = NULL;
|
||||
WRITE_ONCE(b->irq_seqno_bh, NULL);
|
||||
__intel_breadcrumbs_disable_irq(b);
|
||||
}
|
||||
} else {
|
||||
GEM_BUG_ON(rb_first(&b->waiters) == &wait->node);
|
||||
}
|
||||
|
||||
GEM_BUG_ON(RB_EMPTY_NODE(&wait->node));
|
||||
rb_erase(&wait->node, &b->waiters);
|
||||
|
||||
out_unlock:
|
||||
GEM_BUG_ON(b->first_wait == wait);
|
||||
GEM_BUG_ON(rb_first(&b->waiters) !=
|
||||
(b->first_wait ? &b->first_wait->node : NULL));
|
||||
GEM_BUG_ON(!b->irq_seqno_bh ^ RB_EMPTY_ROOT(&b->waiters));
|
||||
spin_unlock(&b->lock);
|
||||
}
|
||||
|
||||
static bool signal_complete(struct drm_i915_gem_request *request)
|
||||
{
|
||||
if (!request)
|
||||
return false;
|
||||
|
||||
/* If another process served as the bottom-half it may have already
|
||||
* signalled that this wait is already completed.
|
||||
*/
|
||||
if (intel_wait_complete(&request->signaling.wait))
|
||||
return true;
|
||||
|
||||
/* Carefully check if the request is complete, giving time for the
|
||||
* seqno to be visible or if the GPU hung.
|
||||
*/
|
||||
if (__i915_request_irq_complete(request))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static struct drm_i915_gem_request *to_signaler(struct rb_node *rb)
|
||||
{
|
||||
return container_of(rb, struct drm_i915_gem_request, signaling.node);
|
||||
}
|
||||
|
||||
static void signaler_set_rtpriority(void)
|
||||
{
|
||||
struct sched_param param = { .sched_priority = 1 };
|
||||
|
||||
sched_setscheduler_nocheck(current, SCHED_FIFO, ¶m);
|
||||
}
|
||||
|
||||
static int intel_breadcrumbs_signaler(void *arg)
|
||||
{
|
||||
struct intel_engine_cs *engine = arg;
|
||||
struct intel_breadcrumbs *b = &engine->breadcrumbs;
|
||||
struct drm_i915_gem_request *request;
|
||||
|
||||
/* Install ourselves with high priority to reduce signalling latency */
|
||||
signaler_set_rtpriority();
|
||||
|
||||
do {
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
|
||||
/* We are either woken up by the interrupt bottom-half,
|
||||
* or by a client adding a new signaller. In both cases,
|
||||
* the GPU seqno may have advanced beyond our oldest signal.
|
||||
* If it has, propagate the signal, remove the waiter and
|
||||
* check again with the next oldest signal. Otherwise we
|
||||
* need to wait for a new interrupt from the GPU or for
|
||||
* a new client.
|
||||
*/
|
||||
request = READ_ONCE(b->first_signal);
|
||||
if (signal_complete(request)) {
|
||||
/* Wake up all other completed waiters and select the
|
||||
* next bottom-half for the next user interrupt.
|
||||
*/
|
||||
intel_engine_remove_wait(engine,
|
||||
&request->signaling.wait);
|
||||
|
||||
/* Find the next oldest signal. Note that as we have
|
||||
* not been holding the lock, another client may
|
||||
* have installed an even older signal than the one
|
||||
* we just completed - so double check we are still
|
||||
* the oldest before picking the next one.
|
||||
*/
|
||||
spin_lock(&b->lock);
|
||||
if (request == b->first_signal) {
|
||||
struct rb_node *rb =
|
||||
rb_next(&request->signaling.node);
|
||||
b->first_signal = rb ? to_signaler(rb) : NULL;
|
||||
}
|
||||
rb_erase(&request->signaling.node, &b->signals);
|
||||
spin_unlock(&b->lock);
|
||||
|
||||
i915_gem_request_unreference(request);
|
||||
} else {
|
||||
if (kthread_should_stop())
|
||||
break;
|
||||
|
||||
schedule();
|
||||
}
|
||||
} while (1);
|
||||
__set_current_state(TASK_RUNNING);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void intel_engine_enable_signaling(struct drm_i915_gem_request *request)
|
||||
{
|
||||
struct intel_engine_cs *engine = request->engine;
|
||||
struct intel_breadcrumbs *b = &engine->breadcrumbs;
|
||||
struct rb_node *parent, **p;
|
||||
bool first, wakeup;
|
||||
|
||||
if (unlikely(READ_ONCE(request->signaling.wait.tsk)))
|
||||
return;
|
||||
|
||||
spin_lock(&b->lock);
|
||||
if (unlikely(request->signaling.wait.tsk)) {
|
||||
wakeup = false;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
request->signaling.wait.tsk = b->signaler;
|
||||
request->signaling.wait.seqno = request->seqno;
|
||||
i915_gem_request_reference(request);
|
||||
|
||||
/* First add ourselves into the list of waiters, but register our
|
||||
* bottom-half as the signaller thread. As per usual, only the oldest
|
||||
* waiter (not just signaller) is tasked as the bottom-half waking
|
||||
* up all completed waiters after the user interrupt.
|
||||
*
|
||||
* If we are the oldest waiter, enable the irq (after which we
|
||||
* must double check that the seqno did not complete).
|
||||
*/
|
||||
wakeup = __intel_engine_add_wait(engine, &request->signaling.wait);
|
||||
|
||||
/* Now insert ourselves into the retirement ordered list of signals
|
||||
* on this engine. We track the oldest seqno as that will be the
|
||||
* first signal to complete.
|
||||
*/
|
||||
parent = NULL;
|
||||
first = true;
|
||||
p = &b->signals.rb_node;
|
||||
while (*p) {
|
||||
parent = *p;
|
||||
if (i915_seqno_passed(request->seqno,
|
||||
to_signaler(parent)->seqno)) {
|
||||
p = &parent->rb_right;
|
||||
first = false;
|
||||
} else {
|
||||
p = &parent->rb_left;
|
||||
}
|
||||
}
|
||||
rb_link_node(&request->signaling.node, parent, p);
|
||||
rb_insert_color(&request->signaling.node, &b->signals);
|
||||
if (first)
|
||||
smp_store_mb(b->first_signal, request);
|
||||
|
||||
unlock:
|
||||
spin_unlock(&b->lock);
|
||||
|
||||
if (wakeup)
|
||||
wake_up_process(b->signaler);
|
||||
}
|
||||
|
||||
int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine)
|
||||
{
|
||||
struct intel_breadcrumbs *b = &engine->breadcrumbs;
|
||||
struct task_struct *tsk;
|
||||
|
||||
spin_lock_init(&b->lock);
|
||||
setup_timer(&b->fake_irq,
|
||||
intel_breadcrumbs_fake_irq,
|
||||
(unsigned long)engine);
|
||||
|
||||
/* Spawn a thread to provide a common bottom-half for all signals.
|
||||
* As this is an asynchronous interface we cannot steal the current
|
||||
* task for handling the bottom-half to the user interrupt, therefore
|
||||
* we create a thread to do the coherent seqno dance after the
|
||||
* interrupt and then signal the waitqueue (via the dma-buf/fence).
|
||||
*/
|
||||
tsk = kthread_run(intel_breadcrumbs_signaler, engine,
|
||||
"i915/signal:%d", engine->id);
|
||||
if (IS_ERR(tsk))
|
||||
return PTR_ERR(tsk);
|
||||
|
||||
b->signaler = tsk;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine)
|
||||
{
|
||||
struct intel_breadcrumbs *b = &engine->breadcrumbs;
|
||||
|
||||
if (!IS_ERR_OR_NULL(b->signaler))
|
||||
kthread_stop(b->signaler);
|
||||
|
||||
del_timer_sync(&b->fake_irq);
|
||||
}
|
||||
|
||||
unsigned int intel_kick_waiters(struct drm_i915_private *i915)
|
||||
{
|
||||
struct intel_engine_cs *engine;
|
||||
unsigned int mask = 0;
|
||||
|
||||
/* To avoid the task_struct disappearing beneath us as we wake up
|
||||
* the process, we must first inspect the task_struct->state under the
|
||||
* RCU lock, i.e. as we call wake_up_process() we must be holding the
|
||||
* rcu_read_lock().
|
||||
*/
|
||||
rcu_read_lock();
|
||||
for_each_engine(engine, i915)
|
||||
if (unlikely(intel_engine_wakeup(engine)))
|
||||
mask |= intel_engine_flag(engine);
|
||||
rcu_read_unlock();
|
||||
|
||||
return mask;
|
||||
}
|
||||
|
||||
unsigned int intel_kick_signalers(struct drm_i915_private *i915)
|
||||
{
|
||||
struct intel_engine_cs *engine;
|
||||
unsigned int mask = 0;
|
||||
|
||||
for_each_engine(engine, i915) {
|
||||
if (unlikely(READ_ONCE(engine->breadcrumbs.first_signal))) {
|
||||
wake_up_process(engine->breadcrumbs.signaler);
|
||||
mask |= intel_engine_flag(engine);
|
||||
}
|
||||
}
|
||||
|
||||
return mask;
|
||||
}
|
|
@ -96,7 +96,7 @@ static void i9xx_load_csc_matrix(struct drm_crtc_state *crtc_state)
|
|||
{
|
||||
struct drm_crtc *crtc = crtc_state->crtc;
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
int i, pipe = intel_crtc->pipe;
|
||||
uint16_t coeffs[9] = { 0, };
|
||||
|
@ -207,7 +207,7 @@ static void cherryview_load_csc_matrix(struct drm_crtc_state *state)
|
|||
{
|
||||
struct drm_crtc *crtc = state->crtc;
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
int pipe = to_intel_crtc(crtc)->pipe;
|
||||
uint32_t mode;
|
||||
|
||||
|
@ -255,7 +255,7 @@ static void cherryview_load_csc_matrix(struct drm_crtc_state *state)
|
|||
void intel_color_set_csc(struct drm_crtc_state *crtc_state)
|
||||
{
|
||||
struct drm_device *dev = crtc_state->crtc->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
|
||||
if (dev_priv->display.load_csc_matrix)
|
||||
dev_priv->display.load_csc_matrix(crtc_state);
|
||||
|
@ -266,13 +266,13 @@ static void i9xx_load_luts_internal(struct drm_crtc *crtc,
|
|||
struct drm_property_blob *blob)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
enum pipe pipe = intel_crtc->pipe;
|
||||
int i;
|
||||
|
||||
if (HAS_GMCH_DISPLAY(dev)) {
|
||||
if (intel_crtc->config->has_dsi_encoder)
|
||||
if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DSI))
|
||||
assert_dsi_pll_enabled(dev_priv);
|
||||
else
|
||||
assert_pll_enabled(dev_priv, pipe);
|
||||
|
@ -313,7 +313,7 @@ static void haswell_load_luts(struct drm_crtc_state *crtc_state)
|
|||
{
|
||||
struct drm_crtc *crtc = crtc_state->crtc;
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
struct intel_crtc_state *intel_crtc_state =
|
||||
to_intel_crtc_state(crtc_state);
|
||||
|
@ -343,7 +343,7 @@ static void broadwell_load_luts(struct drm_crtc_state *state)
|
|||
{
|
||||
struct drm_crtc *crtc = state->crtc;
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_crtc_state *intel_state = to_intel_crtc_state(state);
|
||||
enum pipe pipe = to_intel_crtc(crtc)->pipe;
|
||||
uint32_t i, lut_size = INTEL_INFO(dev)->color.degamma_lut_size;
|
||||
|
@ -426,7 +426,7 @@ static void cherryview_load_luts(struct drm_crtc_state *state)
|
|||
{
|
||||
struct drm_crtc *crtc = state->crtc;
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
enum pipe pipe = to_intel_crtc(crtc)->pipe;
|
||||
struct drm_color_lut *lut;
|
||||
uint32_t i, lut_size;
|
||||
|
@ -485,7 +485,7 @@ static void cherryview_load_luts(struct drm_crtc_state *state)
|
|||
void intel_color_load_luts(struct drm_crtc_state *crtc_state)
|
||||
{
|
||||
struct drm_device *dev = crtc_state->crtc->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
|
||||
dev_priv->display.load_luts(crtc_state);
|
||||
}
|
||||
|
@ -526,7 +526,7 @@ int intel_color_check(struct drm_crtc *crtc,
|
|||
void intel_color_init(struct drm_crtc *crtc)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
|
||||
drm_mode_crtc_set_gamma_size(crtc, 256);
|
||||
|
||||
|
|
|
@ -67,7 +67,7 @@ static bool intel_crt_get_hw_state(struct intel_encoder *encoder,
|
|||
enum pipe *pipe)
|
||||
{
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_crt *crt = intel_encoder_to_crt(encoder);
|
||||
enum intel_display_power_domain power_domain;
|
||||
u32 tmp;
|
||||
|
@ -98,7 +98,7 @@ out:
|
|||
|
||||
static unsigned int intel_crt_get_flags(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_crt *crt = intel_encoder_to_crt(encoder);
|
||||
u32 tmp, flags = 0;
|
||||
|
||||
|
@ -146,7 +146,7 @@ static void hsw_crt_get_config(struct intel_encoder *encoder,
|
|||
static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode)
|
||||
{
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_crt *crt = intel_encoder_to_crt(encoder);
|
||||
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
|
||||
const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
|
||||
|
@ -281,7 +281,7 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
|
|||
{
|
||||
struct drm_device *dev = connector->dev;
|
||||
struct intel_crt *crt = intel_attached_crt(connector);
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
u32 adpa;
|
||||
bool ret;
|
||||
|
||||
|
@ -301,8 +301,10 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
|
|||
|
||||
I915_WRITE(crt->adpa_reg, adpa);
|
||||
|
||||
if (wait_for((I915_READ(crt->adpa_reg) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0,
|
||||
1000))
|
||||
if (intel_wait_for_register(dev_priv,
|
||||
crt->adpa_reg,
|
||||
ADPA_CRT_HOTPLUG_FORCE_TRIGGER, 0,
|
||||
1000))
|
||||
DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER");
|
||||
|
||||
if (turn_off_dac) {
|
||||
|
@ -326,7 +328,7 @@ static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
|
|||
{
|
||||
struct drm_device *dev = connector->dev;
|
||||
struct intel_crt *crt = intel_attached_crt(connector);
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
u32 adpa;
|
||||
bool ret;
|
||||
u32 save_adpa;
|
||||
|
@ -338,8 +340,10 @@ static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
|
|||
|
||||
I915_WRITE(crt->adpa_reg, adpa);
|
||||
|
||||
if (wait_for((I915_READ(crt->adpa_reg) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0,
|
||||
1000)) {
|
||||
if (intel_wait_for_register(dev_priv,
|
||||
crt->adpa_reg,
|
||||
ADPA_CRT_HOTPLUG_FORCE_TRIGGER, 0,
|
||||
1000)) {
|
||||
DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER");
|
||||
I915_WRITE(crt->adpa_reg, save_adpa);
|
||||
}
|
||||
|
@ -367,7 +371,7 @@ static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
|
|||
static bool intel_crt_detect_hotplug(struct drm_connector *connector)
|
||||
{
|
||||
struct drm_device *dev = connector->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
u32 stat;
|
||||
bool ret = false;
|
||||
int i, tries = 0;
|
||||
|
@ -394,9 +398,9 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
|
|||
CRT_HOTPLUG_FORCE_DETECT,
|
||||
CRT_HOTPLUG_FORCE_DETECT);
|
||||
/* wait for FORCE_DETECT to go off */
|
||||
if (wait_for((I915_READ(PORT_HOTPLUG_EN) &
|
||||
CRT_HOTPLUG_FORCE_DETECT) == 0,
|
||||
1000))
|
||||
if (intel_wait_for_register(dev_priv, PORT_HOTPLUG_EN,
|
||||
CRT_HOTPLUG_FORCE_DETECT, 0,
|
||||
1000))
|
||||
DRM_DEBUG_KMS("timed out waiting for FORCE_DETECT to go off");
|
||||
}
|
||||
|
||||
|
@ -449,7 +453,7 @@ static int intel_crt_ddc_get_modes(struct drm_connector *connector,
|
|||
static bool intel_crt_detect_ddc(struct drm_connector *connector)
|
||||
{
|
||||
struct intel_crt *crt = intel_attached_crt(connector);
|
||||
struct drm_i915_private *dev_priv = crt->base.base.dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(crt->base.base.dev);
|
||||
struct edid *edid;
|
||||
struct i2c_adapter *i2c;
|
||||
|
||||
|
@ -485,7 +489,7 @@ static enum drm_connector_status
|
|||
intel_crt_load_detect(struct intel_crt *crt, uint32_t pipe)
|
||||
{
|
||||
struct drm_device *dev = crt->base.base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
uint32_t save_bclrpat;
|
||||
uint32_t save_vtotal;
|
||||
uint32_t vtotal, vactive;
|
||||
|
@ -600,7 +604,7 @@ static enum drm_connector_status
|
|||
intel_crt_detect(struct drm_connector *connector, bool force)
|
||||
{
|
||||
struct drm_device *dev = connector->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_crt *crt = intel_attached_crt(connector);
|
||||
struct intel_encoder *intel_encoder = &crt->base;
|
||||
enum intel_display_power_domain power_domain;
|
||||
|
@ -681,7 +685,7 @@ static void intel_crt_destroy(struct drm_connector *connector)
|
|||
static int intel_crt_get_modes(struct drm_connector *connector)
|
||||
{
|
||||
struct drm_device *dev = connector->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_crt *crt = intel_attached_crt(connector);
|
||||
struct intel_encoder *intel_encoder = &crt->base;
|
||||
enum intel_display_power_domain power_domain;
|
||||
|
@ -716,7 +720,7 @@ static int intel_crt_set_property(struct drm_connector *connector,
|
|||
static void intel_crt_reset(struct drm_connector *connector)
|
||||
{
|
||||
struct drm_device *dev = connector->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_crt *crt = intel_attached_crt(connector);
|
||||
|
||||
if (INTEL_INFO(dev)->gen >= 5) {
|
||||
|
@ -743,6 +747,7 @@ static const struct drm_connector_funcs intel_crt_connector_funcs = {
|
|||
.dpms = drm_atomic_helper_connector_dpms,
|
||||
.detect = intel_crt_detect,
|
||||
.fill_modes = drm_helper_probe_single_connector_modes,
|
||||
.late_register = intel_connector_register,
|
||||
.early_unregister = intel_connector_unregister,
|
||||
.destroy = intel_crt_destroy,
|
||||
.set_property = intel_crt_set_property,
|
||||
|
@ -791,7 +796,7 @@ void intel_crt_init(struct drm_device *dev)
|
|||
struct drm_connector *connector;
|
||||
struct intel_crt *crt;
|
||||
struct intel_connector *intel_connector;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
i915_reg_t adpa_reg;
|
||||
u32 adpa;
|
||||
|
||||
|
@ -879,8 +884,6 @@ void intel_crt_init(struct drm_device *dev)
|
|||
|
||||
drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
|
||||
|
||||
drm_connector_register(connector);
|
||||
|
||||
if (!I915_HAS_HOTPLUG(dev))
|
||||
intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT;
|
||||
|
||||
|
|
|
@ -41,15 +41,15 @@
|
|||
* be moved to FW_FAILED.
|
||||
*/
|
||||
|
||||
#define I915_CSR_KBL "i915/kbl_dmc_ver1.bin"
|
||||
#define I915_CSR_KBL "i915/kbl_dmc_ver1_01.bin"
|
||||
MODULE_FIRMWARE(I915_CSR_KBL);
|
||||
#define KBL_CSR_VERSION_REQUIRED CSR_VERSION(1, 1)
|
||||
|
||||
#define I915_CSR_SKL "i915/skl_dmc_ver1.bin"
|
||||
#define I915_CSR_SKL "i915/skl_dmc_ver1_26.bin"
|
||||
MODULE_FIRMWARE(I915_CSR_SKL);
|
||||
#define SKL_CSR_VERSION_REQUIRED CSR_VERSION(1, 23)
|
||||
#define SKL_CSR_VERSION_REQUIRED CSR_VERSION(1, 26)
|
||||
|
||||
#define I915_CSR_BXT "i915/bxt_dmc_ver1.bin"
|
||||
#define I915_CSR_BXT "i915/bxt_dmc_ver1_07.bin"
|
||||
MODULE_FIRMWARE(I915_CSR_BXT);
|
||||
#define BXT_CSR_VERSION_REQUIRED CSR_VERSION(1, 7)
|
||||
|
||||
|
@ -286,7 +286,7 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
|
|||
uint32_t dmc_offset = CSR_DEFAULT_FW_OFFSET, readcount = 0, nbytes;
|
||||
uint32_t i;
|
||||
uint32_t *dmc_payload;
|
||||
uint32_t required_min_version;
|
||||
uint32_t required_version;
|
||||
|
||||
if (!fw)
|
||||
return NULL;
|
||||
|
@ -303,24 +303,23 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
|
|||
csr->version = css_header->version;
|
||||
|
||||
if (IS_KABYLAKE(dev_priv)) {
|
||||
required_min_version = KBL_CSR_VERSION_REQUIRED;
|
||||
required_version = KBL_CSR_VERSION_REQUIRED;
|
||||
} else if (IS_SKYLAKE(dev_priv)) {
|
||||
required_min_version = SKL_CSR_VERSION_REQUIRED;
|
||||
required_version = SKL_CSR_VERSION_REQUIRED;
|
||||
} else if (IS_BROXTON(dev_priv)) {
|
||||
required_min_version = BXT_CSR_VERSION_REQUIRED;
|
||||
required_version = BXT_CSR_VERSION_REQUIRED;
|
||||
} else {
|
||||
MISSING_CASE(INTEL_REVID(dev_priv));
|
||||
required_min_version = 0;
|
||||
required_version = 0;
|
||||
}
|
||||
|
||||
if (csr->version < required_min_version) {
|
||||
DRM_INFO("Refusing to load old DMC firmware v%u.%u,"
|
||||
" please upgrade to v%u.%u or later"
|
||||
" [" FIRMWARE_URL "].\n",
|
||||
if (csr->version != required_version) {
|
||||
DRM_INFO("Refusing to load DMC firmware v%u.%u,"
|
||||
" please use v%u.%u [" FIRMWARE_URL "].\n",
|
||||
CSR_VERSION_MAJOR(csr->version),
|
||||
CSR_VERSION_MINOR(csr->version),
|
||||
CSR_VERSION_MAJOR(required_min_version),
|
||||
CSR_VERSION_MINOR(required_min_version));
|
||||
CSR_VERSION_MAJOR(required_version),
|
||||
CSR_VERSION_MINOR(required_version));
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -413,7 +412,7 @@ static void csr_load_work_fn(struct work_struct *work)
|
|||
csr = &dev_priv->csr;
|
||||
|
||||
ret = request_firmware(&fw, dev_priv->csr.fw_path,
|
||||
&dev_priv->dev->pdev->dev);
|
||||
&dev_priv->drm.pdev->dev);
|
||||
if (fw)
|
||||
dev_priv->csr.dmc_payload = parse_csr_fw(dev_priv, fw);
|
||||
|
||||
|
@ -427,7 +426,7 @@ static void csr_load_work_fn(struct work_struct *work)
|
|||
CSR_VERSION_MAJOR(csr->version),
|
||||
CSR_VERSION_MINOR(csr->version));
|
||||
} else {
|
||||
dev_notice(dev_priv->dev->dev,
|
||||
dev_notice(dev_priv->drm.dev,
|
||||
"Failed to load DMC firmware"
|
||||
" [" FIRMWARE_URL "],"
|
||||
" disabling runtime power management.\n");
|
||||
|
|
|
@ -318,7 +318,7 @@ static void ddi_get_encoder_port(struct intel_encoder *intel_encoder,
|
|||
default:
|
||||
WARN(1, "Invalid DDI encoder type %d\n", intel_encoder->type);
|
||||
/* fallthrough and treat as unknown */
|
||||
case INTEL_OUTPUT_DISPLAYPORT:
|
||||
case INTEL_OUTPUT_DP:
|
||||
case INTEL_OUTPUT_EDP:
|
||||
case INTEL_OUTPUT_HDMI:
|
||||
case INTEL_OUTPUT_UNKNOWN:
|
||||
|
@ -482,7 +482,7 @@ void intel_prepare_ddi_buffer(struct intel_encoder *encoder)
|
|||
ddi_translations = ddi_translations_edp;
|
||||
size = n_edp_entries;
|
||||
break;
|
||||
case INTEL_OUTPUT_DISPLAYPORT:
|
||||
case INTEL_OUTPUT_DP:
|
||||
case INTEL_OUTPUT_HDMI:
|
||||
ddi_translations = ddi_translations_dp;
|
||||
size = n_dp_entries;
|
||||
|
@ -543,7 +543,7 @@ static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
|
|||
void hsw_fdi_link_train(struct drm_crtc *crtc)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
struct intel_encoder *encoder;
|
||||
u32 temp, i, rx_ctl_val;
|
||||
|
@ -834,7 +834,7 @@ static void ddi_dotclock_get(struct intel_crtc_state *pipe_config)
|
|||
if (pipe_config->has_pch_encoder)
|
||||
dotclock = intel_dotclock_calculate(pipe_config->port_clock,
|
||||
&pipe_config->fdi_m_n);
|
||||
else if (pipe_config->has_dp_encoder)
|
||||
else if (intel_crtc_has_dp_encoder(pipe_config))
|
||||
dotclock = intel_dotclock_calculate(pipe_config->port_clock,
|
||||
&pipe_config->dp_m_n);
|
||||
else if (pipe_config->has_hdmi_sink && pipe_config->pipe_bpp == 36)
|
||||
|
@ -851,7 +851,7 @@ static void ddi_dotclock_get(struct intel_crtc_state *pipe_config)
|
|||
static void skl_ddi_clock_get(struct intel_encoder *encoder,
|
||||
struct intel_crtc_state *pipe_config)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
int link_clock = 0;
|
||||
uint32_t dpll_ctl1, dpll;
|
||||
|
||||
|
@ -899,7 +899,7 @@ static void skl_ddi_clock_get(struct intel_encoder *encoder,
|
|||
static void hsw_ddi_clock_get(struct intel_encoder *encoder,
|
||||
struct intel_crtc_state *pipe_config)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
int link_clock = 0;
|
||||
u32 val, pll;
|
||||
|
||||
|
@ -971,7 +971,7 @@ static int bxt_calc_pll_link(struct drm_i915_private *dev_priv,
|
|||
static void bxt_ddi_clock_get(struct intel_encoder *encoder,
|
||||
struct intel_crtc_state *pipe_config)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
enum port port = intel_ddi_get_encoder_port(encoder);
|
||||
uint32_t dpll = port;
|
||||
|
||||
|
@ -1061,14 +1061,14 @@ bool intel_ddi_pll_select(struct intel_crtc *intel_crtc,
|
|||
|
||||
void intel_ddi_set_pipe_settings(struct drm_crtc *crtc)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = crtc->dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
|
||||
enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
|
||||
int type = intel_encoder->type;
|
||||
uint32_t temp;
|
||||
|
||||
if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP || type == INTEL_OUTPUT_DP_MST) {
|
||||
if (type == INTEL_OUTPUT_DP || type == INTEL_OUTPUT_EDP || type == INTEL_OUTPUT_DP_MST) {
|
||||
WARN_ON(transcoder_is_dsi(cpu_transcoder));
|
||||
|
||||
temp = TRANS_MSA_SYNC_CLK;
|
||||
|
@ -1096,7 +1096,7 @@ void intel_ddi_set_vc_payload_alloc(struct drm_crtc *crtc, bool state)
|
|||
{
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
|
||||
uint32_t temp;
|
||||
temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
|
||||
|
@ -1113,7 +1113,7 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
|
|||
struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
|
||||
struct drm_encoder *encoder = &intel_encoder->base;
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
enum pipe pipe = intel_crtc->pipe;
|
||||
enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
|
||||
enum port port = intel_ddi_get_encoder_port(intel_encoder);
|
||||
|
@ -1182,7 +1182,7 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
|
|||
temp |= TRANS_DDI_MODE_SELECT_FDI;
|
||||
temp |= (intel_crtc->config->fdi_lanes - 1) << 1;
|
||||
|
||||
} else if (type == INTEL_OUTPUT_DISPLAYPORT ||
|
||||
} else if (type == INTEL_OUTPUT_DP ||
|
||||
type == INTEL_OUTPUT_EDP) {
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
|
||||
|
||||
|
@ -1223,7 +1223,7 @@ void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
|
|||
bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
|
||||
{
|
||||
struct drm_device *dev = intel_connector->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_encoder *intel_encoder = intel_connector->encoder;
|
||||
int type = intel_connector->base.connector_type;
|
||||
enum port port = intel_ddi_get_encoder_port(intel_encoder);
|
||||
|
@ -1285,7 +1285,7 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
|
|||
enum pipe *pipe)
|
||||
{
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
enum port port = intel_ddi_get_encoder_port(encoder);
|
||||
enum intel_display_power_domain power_domain;
|
||||
u32 tmp;
|
||||
|
@ -1359,7 +1359,7 @@ void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc)
|
|||
{
|
||||
struct drm_crtc *crtc = &intel_crtc->base;
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
|
||||
enum port port = intel_ddi_get_encoder_port(intel_encoder);
|
||||
enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
|
||||
|
@ -1371,7 +1371,7 @@ void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc)
|
|||
|
||||
void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
|
||||
enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
|
||||
|
||||
if (cpu_transcoder != TRANSCODER_EDP)
|
||||
|
@ -1392,7 +1392,7 @@ static void skl_ddi_set_iboost(struct drm_i915_private *dev_priv,
|
|||
dp_iboost = dev_priv->vbt.ddi_port_info[port].dp_boost_level;
|
||||
hdmi_iboost = dev_priv->vbt.ddi_port_info[port].hdmi_boost_level;
|
||||
|
||||
if (type == INTEL_OUTPUT_DISPLAYPORT) {
|
||||
if (type == INTEL_OUTPUT_DP) {
|
||||
if (dp_iboost) {
|
||||
iboost = dp_iboost;
|
||||
} else {
|
||||
|
@ -1450,7 +1450,7 @@ static void bxt_ddi_vswing_sequence(struct drm_i915_private *dev_priv,
|
|||
if (type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp.low_vswing) {
|
||||
n_entries = ARRAY_SIZE(bxt_ddi_translations_edp);
|
||||
ddi_translations = bxt_ddi_translations_edp;
|
||||
} else if (type == INTEL_OUTPUT_DISPLAYPORT
|
||||
} else if (type == INTEL_OUTPUT_DP
|
||||
|| type == INTEL_OUTPUT_EDP) {
|
||||
n_entries = ARRAY_SIZE(bxt_ddi_translations_dp);
|
||||
ddi_translations = bxt_ddi_translations_dp;
|
||||
|
@ -1624,7 +1624,7 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
|
|||
|
||||
intel_ddi_clk_select(intel_encoder, crtc->config);
|
||||
|
||||
if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
|
||||
if (type == INTEL_OUTPUT_DP || type == INTEL_OUTPUT_EDP) {
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
|
||||
|
||||
intel_dp_set_link_params(intel_dp, crtc->config);
|
||||
|
@ -1648,7 +1648,7 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder)
|
|||
{
|
||||
struct drm_encoder *encoder = &intel_encoder->base;
|
||||
struct drm_device *dev = encoder->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
enum port port = intel_ddi_get_encoder_port(intel_encoder);
|
||||
int type = intel_encoder->type;
|
||||
uint32_t val;
|
||||
|
@ -1669,7 +1669,7 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder)
|
|||
if (wait)
|
||||
intel_wait_ddi_buf_idle(dev_priv, port);
|
||||
|
||||
if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
|
||||
if (type == INTEL_OUTPUT_DP || type == INTEL_OUTPUT_EDP) {
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
|
||||
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
|
||||
intel_edp_panel_vdd_on(intel_dp);
|
||||
|
@ -1695,7 +1695,7 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder)
|
|||
struct drm_crtc *crtc = encoder->crtc;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
struct drm_device *dev = encoder->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
enum port port = intel_ddi_get_encoder_port(intel_encoder);
|
||||
int type = intel_encoder->type;
|
||||
|
||||
|
@ -1734,7 +1734,7 @@ static void intel_disable_ddi(struct intel_encoder *intel_encoder)
|
|||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
int type = intel_encoder->type;
|
||||
struct drm_device *dev = encoder->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
|
||||
if (intel_crtc->config->has_audio) {
|
||||
intel_audio_codec_disable(intel_encoder);
|
||||
|
@ -1808,7 +1808,10 @@ static u32 bxt_get_grc(struct drm_i915_private *dev_priv, enum dpio_phy phy)
|
|||
static void bxt_phy_wait_grc_done(struct drm_i915_private *dev_priv,
|
||||
enum dpio_phy phy)
|
||||
{
|
||||
if (wait_for(I915_READ(BXT_PORT_REF_DW3(phy)) & GRC_DONE, 10))
|
||||
if (intel_wait_for_register(dev_priv,
|
||||
BXT_PORT_REF_DW3(phy),
|
||||
GRC_DONE, GRC_DONE,
|
||||
10))
|
||||
DRM_ERROR("timeout waiting for PHY%d GRC\n", phy);
|
||||
}
|
||||
|
||||
|
@ -2121,7 +2124,7 @@ void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp)
|
|||
|
||||
void intel_ddi_fdi_disable(struct drm_crtc *crtc)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = crtc->dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
|
||||
struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
|
||||
uint32_t val;
|
||||
|
||||
|
@ -2154,7 +2157,7 @@ void intel_ddi_fdi_disable(struct drm_crtc *crtc)
|
|||
void intel_ddi_get_config(struct intel_encoder *encoder,
|
||||
struct intel_crtc_state *pipe_config)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
|
||||
enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
|
||||
struct intel_hdmi *intel_hdmi;
|
||||
|
@ -2208,7 +2211,6 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
|
|||
break;
|
||||
case TRANS_DDI_MODE_SELECT_DP_SST:
|
||||
case TRANS_DDI_MODE_SELECT_DP_MST:
|
||||
pipe_config->has_dp_encoder = true;
|
||||
pipe_config->lane_count =
|
||||
((temp & DDI_PORT_WIDTH_MASK) >> DDI_PORT_WIDTH_SHIFT) + 1;
|
||||
intel_dp_get_m_n(intel_crtc, pipe_config);
|
||||
|
@ -2253,7 +2255,7 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
|
|||
static bool intel_ddi_compute_config(struct intel_encoder *encoder,
|
||||
struct intel_crtc_state *pipe_config)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
int type = encoder->type;
|
||||
int port = intel_ddi_get_encoder_port(encoder);
|
||||
int ret;
|
||||
|
@ -2319,7 +2321,7 @@ intel_ddi_init_hdmi_connector(struct intel_digital_port *intel_dig_port)
|
|||
|
||||
void intel_ddi_init(struct drm_device *dev, enum port port)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_digital_port *intel_dig_port;
|
||||
struct intel_encoder *intel_encoder;
|
||||
struct drm_encoder *encoder;
|
||||
|
|
|
@ -0,0 +1,388 @@
|
|||
/*
|
||||
* Copyright © 2016 Intel Corporation
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
* IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "i915_drv.h"
|
||||
|
||||
void intel_device_info_dump(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
const struct intel_device_info *info = &dev_priv->info;
|
||||
|
||||
#define PRINT_S(name) "%s"
|
||||
#define SEP_EMPTY
|
||||
#define PRINT_FLAG(name) info->name ? #name "," : ""
|
||||
#define SEP_COMMA ,
|
||||
DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x rev=0x%02x flags="
|
||||
DEV_INFO_FOR_EACH_FLAG(PRINT_S, SEP_EMPTY),
|
||||
info->gen,
|
||||
dev_priv->drm.pdev->device,
|
||||
dev_priv->drm.pdev->revision,
|
||||
DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_COMMA));
|
||||
#undef PRINT_S
|
||||
#undef SEP_EMPTY
|
||||
#undef PRINT_FLAG
|
||||
#undef SEP_COMMA
|
||||
}
|
||||
|
||||
static void cherryview_sseu_info_init(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct intel_device_info *info = mkwrite_device_info(dev_priv);
|
||||
u32 fuse, eu_dis;
|
||||
|
||||
fuse = I915_READ(CHV_FUSE_GT);
|
||||
|
||||
info->slice_total = 1;
|
||||
|
||||
if (!(fuse & CHV_FGT_DISABLE_SS0)) {
|
||||
info->subslice_per_slice++;
|
||||
eu_dis = fuse & (CHV_FGT_EU_DIS_SS0_R0_MASK |
|
||||
CHV_FGT_EU_DIS_SS0_R1_MASK);
|
||||
info->eu_total += 8 - hweight32(eu_dis);
|
||||
}
|
||||
|
||||
if (!(fuse & CHV_FGT_DISABLE_SS1)) {
|
||||
info->subslice_per_slice++;
|
||||
eu_dis = fuse & (CHV_FGT_EU_DIS_SS1_R0_MASK |
|
||||
CHV_FGT_EU_DIS_SS1_R1_MASK);
|
||||
info->eu_total += 8 - hweight32(eu_dis);
|
||||
}
|
||||
|
||||
info->subslice_total = info->subslice_per_slice;
|
||||
/*
|
||||
* CHV expected to always have a uniform distribution of EU
|
||||
* across subslices.
|
||||
*/
|
||||
info->eu_per_subslice = info->subslice_total ?
|
||||
info->eu_total / info->subslice_total :
|
||||
0;
|
||||
/*
|
||||
* CHV supports subslice power gating on devices with more than
|
||||
* one subslice, and supports EU power gating on devices with
|
||||
* more than one EU pair per subslice.
|
||||
*/
|
||||
info->has_slice_pg = 0;
|
||||
info->has_subslice_pg = (info->subslice_total > 1);
|
||||
info->has_eu_pg = (info->eu_per_subslice > 2);
|
||||
}
|
||||
|
||||
static void gen9_sseu_info_init(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct intel_device_info *info = mkwrite_device_info(dev_priv);
|
||||
int s_max = 3, ss_max = 4, eu_max = 8;
|
||||
int s, ss;
|
||||
u32 fuse2, s_enable, ss_disable, eu_disable;
|
||||
u8 eu_mask = 0xff;
|
||||
|
||||
fuse2 = I915_READ(GEN8_FUSE2);
|
||||
s_enable = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
|
||||
ss_disable = (fuse2 & GEN9_F2_SS_DIS_MASK) >> GEN9_F2_SS_DIS_SHIFT;
|
||||
|
||||
info->slice_total = hweight32(s_enable);
|
||||
/*
|
||||
* The subslice disable field is global, i.e. it applies
|
||||
* to each of the enabled slices.
|
||||
*/
|
||||
info->subslice_per_slice = ss_max - hweight32(ss_disable);
|
||||
info->subslice_total = info->slice_total * info->subslice_per_slice;
|
||||
|
||||
/*
|
||||
* Iterate through enabled slices and subslices to
|
||||
* count the total enabled EU.
|
||||
*/
|
||||
for (s = 0; s < s_max; s++) {
|
||||
if (!(s_enable & BIT(s)))
|
||||
/* skip disabled slice */
|
||||
continue;
|
||||
|
||||
eu_disable = I915_READ(GEN9_EU_DISABLE(s));
|
||||
for (ss = 0; ss < ss_max; ss++) {
|
||||
int eu_per_ss;
|
||||
|
||||
if (ss_disable & BIT(ss))
|
||||
/* skip disabled subslice */
|
||||
continue;
|
||||
|
||||
eu_per_ss = eu_max - hweight8((eu_disable >> (ss*8)) &
|
||||
eu_mask);
|
||||
|
||||
/*
|
||||
* Record which subslice(s) has(have) 7 EUs. we
|
||||
* can tune the hash used to spread work among
|
||||
* subslices if they are unbalanced.
|
||||
*/
|
||||
if (eu_per_ss == 7)
|
||||
info->subslice_7eu[s] |= BIT(ss);
|
||||
|
||||
info->eu_total += eu_per_ss;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* SKL is expected to always have a uniform distribution
|
||||
* of EU across subslices with the exception that any one
|
||||
* EU in any one subslice may be fused off for die
|
||||
* recovery. BXT is expected to be perfectly uniform in EU
|
||||
* distribution.
|
||||
*/
|
||||
info->eu_per_subslice = info->subslice_total ?
|
||||
DIV_ROUND_UP(info->eu_total,
|
||||
info->subslice_total) : 0;
|
||||
/*
|
||||
* SKL supports slice power gating on devices with more than
|
||||
* one slice, and supports EU power gating on devices with
|
||||
* more than one EU pair per subslice. BXT supports subslice
|
||||
* power gating on devices with more than one subslice, and
|
||||
* supports EU power gating on devices with more than one EU
|
||||
* pair per subslice.
|
||||
*/
|
||||
info->has_slice_pg =
|
||||
(IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) &&
|
||||
info->slice_total > 1;
|
||||
info->has_subslice_pg =
|
||||
IS_BROXTON(dev_priv) && info->subslice_total > 1;
|
||||
info->has_eu_pg = info->eu_per_subslice > 2;
|
||||
|
||||
if (IS_BROXTON(dev_priv)) {
|
||||
#define IS_SS_DISABLED(_ss_disable, ss) (_ss_disable & BIT(ss))
|
||||
/*
|
||||
* There is a HW issue in 2x6 fused down parts that requires
|
||||
* Pooled EU to be enabled as a WA. The pool configuration
|
||||
* changes depending upon which subslice is fused down. This
|
||||
* doesn't affect if the device has all 3 subslices enabled.
|
||||
*/
|
||||
/* WaEnablePooledEuFor2x6:bxt */
|
||||
info->has_pooled_eu = ((info->subslice_per_slice == 3) ||
|
||||
(info->subslice_per_slice == 2 &&
|
||||
INTEL_REVID(dev_priv) < BXT_REVID_C0));
|
||||
|
||||
info->min_eu_in_pool = 0;
|
||||
if (info->has_pooled_eu) {
|
||||
if (IS_SS_DISABLED(ss_disable, 0) ||
|
||||
IS_SS_DISABLED(ss_disable, 2))
|
||||
info->min_eu_in_pool = 3;
|
||||
else if (IS_SS_DISABLED(ss_disable, 1))
|
||||
info->min_eu_in_pool = 6;
|
||||
else
|
||||
info->min_eu_in_pool = 9;
|
||||
}
|
||||
#undef IS_SS_DISABLED
|
||||
}
|
||||
}
|
||||
|
||||
static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct intel_device_info *info = mkwrite_device_info(dev_priv);
|
||||
const int s_max = 3, ss_max = 3, eu_max = 8;
|
||||
int s, ss;
|
||||
u32 fuse2, eu_disable[s_max], s_enable, ss_disable;
|
||||
|
||||
fuse2 = I915_READ(GEN8_FUSE2);
|
||||
s_enable = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
|
||||
ss_disable = (fuse2 & GEN8_F2_SS_DIS_MASK) >> GEN8_F2_SS_DIS_SHIFT;
|
||||
|
||||
eu_disable[0] = I915_READ(GEN8_EU_DISABLE0) & GEN8_EU_DIS0_S0_MASK;
|
||||
eu_disable[1] = (I915_READ(GEN8_EU_DISABLE0) >> GEN8_EU_DIS0_S1_SHIFT) |
|
||||
((I915_READ(GEN8_EU_DISABLE1) & GEN8_EU_DIS1_S1_MASK) <<
|
||||
(32 - GEN8_EU_DIS0_S1_SHIFT));
|
||||
eu_disable[2] = (I915_READ(GEN8_EU_DISABLE1) >> GEN8_EU_DIS1_S2_SHIFT) |
|
||||
((I915_READ(GEN8_EU_DISABLE2) & GEN8_EU_DIS2_S2_MASK) <<
|
||||
(32 - GEN8_EU_DIS1_S2_SHIFT));
|
||||
|
||||
info->slice_total = hweight32(s_enable);
|
||||
|
||||
/*
|
||||
* The subslice disable field is global, i.e. it applies
|
||||
* to each of the enabled slices.
|
||||
*/
|
||||
info->subslice_per_slice = ss_max - hweight32(ss_disable);
|
||||
info->subslice_total = info->slice_total * info->subslice_per_slice;
|
||||
|
||||
/*
|
||||
* Iterate through enabled slices and subslices to
|
||||
* count the total enabled EU.
|
||||
*/
|
||||
for (s = 0; s < s_max; s++) {
|
||||
if (!(s_enable & (0x1 << s)))
|
||||
/* skip disabled slice */
|
||||
continue;
|
||||
|
||||
for (ss = 0; ss < ss_max; ss++) {
|
||||
u32 n_disabled;
|
||||
|
||||
if (ss_disable & (0x1 << ss))
|
||||
/* skip disabled subslice */
|
||||
continue;
|
||||
|
||||
n_disabled = hweight8(eu_disable[s] >> (ss * eu_max));
|
||||
|
||||
/*
|
||||
* Record which subslices have 7 EUs.
|
||||
*/
|
||||
if (eu_max - n_disabled == 7)
|
||||
info->subslice_7eu[s] |= 1 << ss;
|
||||
|
||||
info->eu_total += eu_max - n_disabled;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* BDW is expected to always have a uniform distribution of EU across
|
||||
* subslices with the exception that any one EU in any one subslice may
|
||||
* be fused off for die recovery.
|
||||
*/
|
||||
info->eu_per_subslice = info->subslice_total ?
|
||||
DIV_ROUND_UP(info->eu_total, info->subslice_total) : 0;
|
||||
|
||||
/*
|
||||
* BDW supports slice power gating on devices with more than
|
||||
* one slice.
|
||||
*/
|
||||
info->has_slice_pg = (info->slice_total > 1);
|
||||
info->has_subslice_pg = 0;
|
||||
info->has_eu_pg = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Determine various intel_device_info fields at runtime.
|
||||
*
|
||||
* Use it when either:
|
||||
* - it's judged too laborious to fill n static structures with the limit
|
||||
* when a simple if statement does the job,
|
||||
* - run-time checks (eg read fuse/strap registers) are needed.
|
||||
*
|
||||
* This function needs to be called:
|
||||
* - after the MMIO has been setup as we are reading registers,
|
||||
* - after the PCH has been detected,
|
||||
* - before the first usage of the fields it can tweak.
|
||||
*/
|
||||
void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct intel_device_info *info = mkwrite_device_info(dev_priv);
|
||||
enum pipe pipe;
|
||||
|
||||
/*
|
||||
* Skylake and Broxton currently don't expose the topmost plane as its
|
||||
* use is exclusive with the legacy cursor and we only want to expose
|
||||
* one of those, not both. Until we can safely expose the topmost plane
|
||||
* as a DRM_PLANE_TYPE_CURSOR with all the features exposed/supported,
|
||||
* we don't expose the topmost plane at all to prevent ABI breakage
|
||||
* down the line.
|
||||
*/
|
||||
if (IS_BROXTON(dev_priv)) {
|
||||
info->num_sprites[PIPE_A] = 2;
|
||||
info->num_sprites[PIPE_B] = 2;
|
||||
info->num_sprites[PIPE_C] = 1;
|
||||
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
|
||||
for_each_pipe(dev_priv, pipe)
|
||||
info->num_sprites[pipe] = 2;
|
||||
else
|
||||
for_each_pipe(dev_priv, pipe)
|
||||
info->num_sprites[pipe] = 1;
|
||||
|
||||
if (i915.disable_display) {
|
||||
DRM_INFO("Display disabled (module parameter)\n");
|
||||
info->num_pipes = 0;
|
||||
} else if (info->num_pipes > 0 &&
|
||||
(IS_GEN7(dev_priv) || IS_GEN8(dev_priv)) &&
|
||||
HAS_PCH_SPLIT(dev_priv)) {
|
||||
u32 fuse_strap = I915_READ(FUSE_STRAP);
|
||||
u32 sfuse_strap = I915_READ(SFUSE_STRAP);
|
||||
|
||||
/*
|
||||
* SFUSE_STRAP is supposed to have a bit signalling the display
|
||||
* is fused off. Unfortunately it seems that, at least in
|
||||
* certain cases, fused off display means that PCH display
|
||||
* reads don't land anywhere. In that case, we read 0s.
|
||||
*
|
||||
* On CPT/PPT, we can detect this case as SFUSE_STRAP_FUSE_LOCK
|
||||
* should be set when taking over after the firmware.
|
||||
*/
|
||||
if (fuse_strap & ILK_INTERNAL_DISPLAY_DISABLE ||
|
||||
sfuse_strap & SFUSE_STRAP_DISPLAY_DISABLED ||
|
||||
(dev_priv->pch_type == PCH_CPT &&
|
||||
!(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) {
|
||||
DRM_INFO("Display fused off, disabling\n");
|
||||
info->num_pipes = 0;
|
||||
} else if (fuse_strap & IVB_PIPE_C_DISABLE) {
|
||||
DRM_INFO("PipeC fused off\n");
|
||||
info->num_pipes -= 1;
|
||||
}
|
||||
} else if (info->num_pipes > 0 && IS_GEN9(dev_priv)) {
|
||||
u32 dfsm = I915_READ(SKL_DFSM);
|
||||
u8 disabled_mask = 0;
|
||||
bool invalid;
|
||||
int num_bits;
|
||||
|
||||
if (dfsm & SKL_DFSM_PIPE_A_DISABLE)
|
||||
disabled_mask |= BIT(PIPE_A);
|
||||
if (dfsm & SKL_DFSM_PIPE_B_DISABLE)
|
||||
disabled_mask |= BIT(PIPE_B);
|
||||
if (dfsm & SKL_DFSM_PIPE_C_DISABLE)
|
||||
disabled_mask |= BIT(PIPE_C);
|
||||
|
||||
num_bits = hweight8(disabled_mask);
|
||||
|
||||
switch (disabled_mask) {
|
||||
case BIT(PIPE_A):
|
||||
case BIT(PIPE_B):
|
||||
case BIT(PIPE_A) | BIT(PIPE_B):
|
||||
case BIT(PIPE_A) | BIT(PIPE_C):
|
||||
invalid = true;
|
||||
break;
|
||||
default:
|
||||
invalid = false;
|
||||
}
|
||||
|
||||
if (num_bits > info->num_pipes || invalid)
|
||||
DRM_ERROR("invalid pipe fuse configuration: 0x%x\n",
|
||||
disabled_mask);
|
||||
else
|
||||
info->num_pipes -= num_bits;
|
||||
}
|
||||
|
||||
/* Initialize slice/subslice/EU info */
|
||||
if (IS_CHERRYVIEW(dev_priv))
|
||||
cherryview_sseu_info_init(dev_priv);
|
||||
else if (IS_BROADWELL(dev_priv))
|
||||
broadwell_sseu_info_init(dev_priv);
|
||||
else if (INTEL_INFO(dev_priv)->gen >= 9)
|
||||
gen9_sseu_info_init(dev_priv);
|
||||
|
||||
info->has_snoop = !info->has_llc;
|
||||
|
||||
/* Snooping is broken on BXT A stepping. */
|
||||
if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
|
||||
info->has_snoop = false;
|
||||
|
||||
DRM_DEBUG_DRIVER("slice total: %u\n", info->slice_total);
|
||||
DRM_DEBUG_DRIVER("subslice total: %u\n", info->subslice_total);
|
||||
DRM_DEBUG_DRIVER("subslice per slice: %u\n", info->subslice_per_slice);
|
||||
DRM_DEBUG_DRIVER("EU total: %u\n", info->eu_total);
|
||||
DRM_DEBUG_DRIVER("EU per subslice: %u\n", info->eu_per_subslice);
|
||||
DRM_DEBUG_DRIVER("has slice power gating: %s\n",
|
||||
info->has_slice_pg ? "y" : "n");
|
||||
DRM_DEBUG_DRIVER("has subslice power gating: %s\n",
|
||||
info->has_subslice_pg ? "y" : "n");
|
||||
DRM_DEBUG_DRIVER("has EU power gating: %s\n",
|
||||
info->has_eu_pg ? "y" : "n");
|
||||
}
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -47,7 +47,6 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
|
|||
|
||||
pipe_config->dp_encoder_is_mst = true;
|
||||
pipe_config->has_pch_encoder = false;
|
||||
pipe_config->has_dp_encoder = true;
|
||||
bpp = 24;
|
||||
/*
|
||||
* for MST we always configure max link bw - the spec doesn't
|
||||
|
@ -140,7 +139,7 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder)
|
|||
struct intel_digital_port *intel_dig_port = intel_mst->primary;
|
||||
struct intel_dp *intel_dp = &intel_dig_port->dp;
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
enum port port = intel_dig_port->port;
|
||||
int ret;
|
||||
uint32_t temp;
|
||||
|
@ -207,14 +206,17 @@ static void intel_mst_enable_dp(struct intel_encoder *encoder)
|
|||
struct intel_digital_port *intel_dig_port = intel_mst->primary;
|
||||
struct intel_dp *intel_dp = &intel_dig_port->dp;
|
||||
struct drm_device *dev = intel_dig_port->base.base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
enum port port = intel_dig_port->port;
|
||||
int ret;
|
||||
|
||||
DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links);
|
||||
|
||||
if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_ACT_SENT),
|
||||
1))
|
||||
if (intel_wait_for_register(dev_priv,
|
||||
DP_TP_STATUS(port),
|
||||
DP_TP_STATUS_ACT_SENT,
|
||||
DP_TP_STATUS_ACT_SENT,
|
||||
1))
|
||||
DRM_ERROR("Timed out waiting for ACT sent\n");
|
||||
|
||||
ret = drm_dp_check_act_status(&intel_dp->mst_mgr);
|
||||
|
@ -239,12 +241,10 @@ static void intel_dp_mst_enc_get_config(struct intel_encoder *encoder,
|
|||
struct intel_digital_port *intel_dig_port = intel_mst->primary;
|
||||
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
|
||||
u32 temp, flags = 0;
|
||||
|
||||
pipe_config->has_dp_encoder = true;
|
||||
|
||||
temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
|
||||
if (temp & TRANS_DDI_PHSYNC)
|
||||
flags |= DRM_MODE_FLAG_PHSYNC;
|
||||
|
@ -336,6 +336,7 @@ static const struct drm_connector_funcs intel_dp_mst_connector_funcs = {
|
|||
.fill_modes = drm_helper_probe_single_connector_modes,
|
||||
.set_property = intel_dp_mst_set_property,
|
||||
.atomic_get_property = intel_connector_atomic_get_property,
|
||||
.late_register = intel_connector_register,
|
||||
.early_unregister = intel_connector_unregister,
|
||||
.destroy = intel_dp_mst_connector_destroy,
|
||||
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
|
||||
|
@ -477,9 +478,11 @@ static void intel_dp_register_mst_connector(struct drm_connector *connector)
|
|||
{
|
||||
struct intel_connector *intel_connector = to_intel_connector(connector);
|
||||
struct drm_device *dev = connector->dev;
|
||||
|
||||
drm_modeset_lock_all(dev);
|
||||
intel_connector_add_to_fbdev(intel_connector);
|
||||
drm_modeset_unlock_all(dev);
|
||||
|
||||
drm_connector_register(&intel_connector->base);
|
||||
}
|
||||
|
||||
|
|
|
@ -168,7 +168,7 @@ void chv_phy_pre_pll_enable(struct intel_encoder *encoder)
|
|||
{
|
||||
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_crtc *intel_crtc =
|
||||
to_intel_crtc(encoder->base.crtc);
|
||||
enum dpio_channel ch = vlv_dport_to_channel(dport);
|
||||
|
@ -250,7 +250,7 @@ void chv_phy_pre_encoder_enable(struct intel_encoder *encoder)
|
|||
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
|
||||
struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_crtc *intel_crtc =
|
||||
to_intel_crtc(encoder->base.crtc);
|
||||
enum dpio_channel ch = vlv_dport_to_channel(dport);
|
||||
|
@ -400,7 +400,7 @@ void vlv_phy_pre_pll_enable(struct intel_encoder *encoder)
|
|||
{
|
||||
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_crtc *intel_crtc =
|
||||
to_intel_crtc(encoder->base.crtc);
|
||||
enum dpio_channel port = vlv_dport_to_channel(dport);
|
||||
|
@ -429,7 +429,7 @@ void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder)
|
|||
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
|
||||
struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
|
||||
enum dpio_channel port = vlv_dport_to_channel(dport);
|
||||
int pipe = intel_crtc->pipe;
|
||||
|
@ -457,7 +457,7 @@ void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder)
|
|||
void vlv_phy_reset_lanes(struct intel_encoder *encoder)
|
||||
{
|
||||
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
|
||||
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_crtc *intel_crtc =
|
||||
to_intel_crtc(encoder->base.crtc);
|
||||
enum dpio_channel port = vlv_dport_to_channel(dport);
|
||||
|
|
|
@ -83,7 +83,7 @@ void assert_shared_dpll(struct drm_i915_private *dev_priv,
|
|||
void intel_prepare_shared_dpll(struct intel_crtc *crtc)
|
||||
{
|
||||
struct drm_device *dev = crtc->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_shared_dpll *pll = crtc->config->shared_dpll;
|
||||
|
||||
if (WARN_ON(pll == NULL))
|
||||
|
@ -112,7 +112,7 @@ void intel_prepare_shared_dpll(struct intel_crtc *crtc)
|
|||
void intel_enable_shared_dpll(struct intel_crtc *crtc)
|
||||
{
|
||||
struct drm_device *dev = crtc->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_shared_dpll *pll = crtc->config->shared_dpll;
|
||||
unsigned crtc_mask = 1 << drm_crtc_index(&crtc->base);
|
||||
unsigned old_mask;
|
||||
|
@ -151,7 +151,7 @@ out:
|
|||
void intel_disable_shared_dpll(struct intel_crtc *crtc)
|
||||
{
|
||||
struct drm_device *dev = crtc->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_shared_dpll *pll = crtc->config->shared_dpll;
|
||||
unsigned crtc_mask = 1 << drm_crtc_index(&crtc->base);
|
||||
|
||||
|
@ -191,7 +191,7 @@ intel_find_shared_dpll(struct intel_crtc *crtc,
|
|||
enum intel_dpll_id range_min,
|
||||
enum intel_dpll_id range_max)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
struct intel_shared_dpll *pll;
|
||||
struct intel_shared_dpll_config *shared_dpll;
|
||||
enum intel_dpll_id i;
|
||||
|
@ -331,7 +331,7 @@ static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
|
|||
static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
|
||||
struct intel_shared_dpll *pll)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
struct intel_crtc *crtc;
|
||||
|
||||
/* Make sure no transcoder isn't still depending on us. */
|
||||
|
@ -713,7 +713,7 @@ hsw_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
|
|||
pll = intel_find_shared_dpll(crtc, crtc_state,
|
||||
DPLL_ID_WRPLL1, DPLL_ID_WRPLL2);
|
||||
|
||||
} else if (encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
|
||||
} else if (encoder->type == INTEL_OUTPUT_DP ||
|
||||
encoder->type == INTEL_OUTPUT_DP_MST ||
|
||||
encoder->type == INTEL_OUTPUT_EDP) {
|
||||
enum intel_dpll_id pll_id;
|
||||
|
@ -856,7 +856,11 @@ static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv,
|
|||
I915_WRITE(regs[pll->id].ctl,
|
||||
I915_READ(regs[pll->id].ctl) | LCPLL_PLL_ENABLE);
|
||||
|
||||
if (wait_for(I915_READ(DPLL_STATUS) & DPLL_LOCK(pll->id), 5))
|
||||
if (intel_wait_for_register(dev_priv,
|
||||
DPLL_STATUS,
|
||||
DPLL_LOCK(pll->id),
|
||||
DPLL_LOCK(pll->id),
|
||||
5))
|
||||
DRM_ERROR("DPLL %d not locked\n", pll->id);
|
||||
}
|
||||
|
||||
|
@ -1222,7 +1226,7 @@ skl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
|
|||
DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
|
||||
DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
|
||||
wrpll_params.central_freq;
|
||||
} else if (encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
|
||||
} else if (encoder->type == INTEL_OUTPUT_DP ||
|
||||
encoder->type == INTEL_OUTPUT_DP_MST ||
|
||||
encoder->type == INTEL_OUTPUT_EDP) {
|
||||
switch (crtc_state->port_clock / 2) {
|
||||
|
@ -1374,8 +1378,8 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
|
|||
I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
|
||||
POSTING_READ(BXT_PORT_PLL_ENABLE(port));
|
||||
|
||||
if (wait_for_atomic_us((I915_READ(BXT_PORT_PLL_ENABLE(port)) &
|
||||
PORT_PLL_LOCK), 200))
|
||||
if (wait_for_us((I915_READ(BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_LOCK),
|
||||
200))
|
||||
DRM_ERROR("PLL %d not locked\n", port);
|
||||
|
||||
/*
|
||||
|
@ -1530,7 +1534,7 @@ bxt_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
|
|||
clk_div.m2_frac_en = clk_div.m2_frac != 0;
|
||||
|
||||
vco = best_clock.vco;
|
||||
} else if (encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
|
||||
} else if (encoder->type == INTEL_OUTPUT_DP ||
|
||||
encoder->type == INTEL_OUTPUT_EDP) {
|
||||
int i;
|
||||
|
||||
|
@ -1632,7 +1636,7 @@ static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = {
|
|||
|
||||
static void intel_ddi_pll_init(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
|
||||
if (INTEL_GEN(dev_priv) < 9) {
|
||||
uint32_t val = I915_READ(LCPLL_CTL);
|
||||
|
@ -1719,7 +1723,7 @@ static const struct intel_dpll_mgr bxt_pll_mgr = {
|
|||
|
||||
void intel_shared_dpll_init(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
const struct intel_dpll_mgr *dpll_mgr = NULL;
|
||||
const struct dpll_info *dpll_info;
|
||||
int i;
|
||||
|
|
|
@ -69,39 +69,63 @@
|
|||
})
|
||||
|
||||
#define wait_for(COND, MS) _wait_for((COND), (MS) * 1000, 1000)
|
||||
#define wait_for_us(COND, US) _wait_for((COND), (US), 1)
|
||||
|
||||
/* If CONFIG_PREEMPT_COUNT is disabled, in_atomic() always reports false. */
|
||||
#if defined(CONFIG_DRM_I915_DEBUG) && defined(CONFIG_PREEMPT_COUNT)
|
||||
# define _WAIT_FOR_ATOMIC_CHECK WARN_ON_ONCE(!in_atomic())
|
||||
# define _WAIT_FOR_ATOMIC_CHECK(ATOMIC) WARN_ON_ONCE((ATOMIC) && !in_atomic())
|
||||
#else
|
||||
# define _WAIT_FOR_ATOMIC_CHECK do { } while (0)
|
||||
# define _WAIT_FOR_ATOMIC_CHECK(ATOMIC) do { } while (0)
|
||||
#endif
|
||||
|
||||
#define _wait_for_atomic(COND, US) ({ \
|
||||
unsigned long end__; \
|
||||
int ret__ = 0; \
|
||||
_WAIT_FOR_ATOMIC_CHECK; \
|
||||
#define _wait_for_atomic(COND, US, ATOMIC) \
|
||||
({ \
|
||||
int cpu, ret, timeout = (US) * 1000; \
|
||||
u64 base; \
|
||||
_WAIT_FOR_ATOMIC_CHECK(ATOMIC); \
|
||||
BUILD_BUG_ON((US) > 50000); \
|
||||
end__ = (local_clock() >> 10) + (US) + 1; \
|
||||
while (!(COND)) { \
|
||||
if (time_after((unsigned long)(local_clock() >> 10), end__)) { \
|
||||
/* Unlike the regular wait_for(), this atomic variant \
|
||||
* cannot be preempted (and we'll just ignore the issue\
|
||||
* of irq interruptions) and so we know that no time \
|
||||
* has passed since the last check of COND and can \
|
||||
* immediately report the timeout. \
|
||||
*/ \
|
||||
ret__ = -ETIMEDOUT; \
|
||||
if (!(ATOMIC)) { \
|
||||
preempt_disable(); \
|
||||
cpu = smp_processor_id(); \
|
||||
} \
|
||||
base = local_clock(); \
|
||||
for (;;) { \
|
||||
u64 now = local_clock(); \
|
||||
if (!(ATOMIC)) \
|
||||
preempt_enable(); \
|
||||
if (COND) { \
|
||||
ret = 0; \
|
||||
break; \
|
||||
} \
|
||||
if (now - base >= timeout) { \
|
||||
ret = -ETIMEDOUT; \
|
||||
break; \
|
||||
} \
|
||||
cpu_relax(); \
|
||||
if (!(ATOMIC)) { \
|
||||
preempt_disable(); \
|
||||
if (unlikely(cpu != smp_processor_id())) { \
|
||||
timeout -= now - base; \
|
||||
cpu = smp_processor_id(); \
|
||||
base = local_clock(); \
|
||||
} \
|
||||
} \
|
||||
} \
|
||||
ret; \
|
||||
})
|
||||
|
||||
#define wait_for_us(COND, US) \
|
||||
({ \
|
||||
int ret__; \
|
||||
BUILD_BUG_ON(!__builtin_constant_p(US)); \
|
||||
if ((US) > 10) \
|
||||
ret__ = _wait_for((COND), (US), 10); \
|
||||
else \
|
||||
ret__ = _wait_for_atomic((COND), (US), 0); \
|
||||
ret__; \
|
||||
})
|
||||
|
||||
#define wait_for_atomic(COND, MS) _wait_for_atomic((COND), (MS) * 1000)
|
||||
#define wait_for_atomic_us(COND, US) _wait_for_atomic((COND), (US))
|
||||
#define wait_for_atomic(COND, MS) _wait_for_atomic((COND), (MS) * 1000, 1)
|
||||
#define wait_for_atomic_us(COND, US) _wait_for_atomic((COND), (US), 1)
|
||||
|
||||
#define KHz(x) (1000 * (x))
|
||||
#define MHz(x) KHz(1000 * (x))
|
||||
|
@ -135,7 +159,7 @@ enum intel_output_type {
|
|||
INTEL_OUTPUT_LVDS = 4,
|
||||
INTEL_OUTPUT_TVOUT = 5,
|
||||
INTEL_OUTPUT_HDMI = 6,
|
||||
INTEL_OUTPUT_DISPLAYPORT = 7,
|
||||
INTEL_OUTPUT_DP = 7,
|
||||
INTEL_OUTPUT_EDP = 8,
|
||||
INTEL_OUTPUT_DSI = 9,
|
||||
INTEL_OUTPUT_UNKNOWN = 10,
|
||||
|
@ -159,6 +183,7 @@ struct intel_framebuffer {
|
|||
struct intel_fbdev {
|
||||
struct drm_fb_helper helper;
|
||||
struct intel_framebuffer *fb;
|
||||
async_cookie_t cookie;
|
||||
int preferred_bpp;
|
||||
};
|
||||
|
||||
|
@ -497,12 +522,10 @@ struct intel_crtc_state {
|
|||
*/
|
||||
bool limited_color_range;
|
||||
|
||||
/* DP has a bunch of special case unfortunately, so mark the pipe
|
||||
* accordingly. */
|
||||
bool has_dp_encoder;
|
||||
|
||||
/* DSI has special cases */
|
||||
bool has_dsi_encoder;
|
||||
/* Bitmask of encoder types (enum intel_output_type)
|
||||
* driven by the pipe.
|
||||
*/
|
||||
unsigned int output_types;
|
||||
|
||||
/* Whether we should send NULL infoframes. Required for audio. */
|
||||
bool has_hdmi_sink;
|
||||
|
@ -861,6 +884,11 @@ struct intel_dp {
|
|||
* this port. Only relevant on VLV/CHV.
|
||||
*/
|
||||
enum pipe pps_pipe;
|
||||
/*
|
||||
* Set if the sequencer may be reset due to a power transition,
|
||||
* requiring a reinitialization. Only relevant on BXT.
|
||||
*/
|
||||
bool pps_reset;
|
||||
struct edp_power_seq pps_delays;
|
||||
|
||||
bool can_mst; /* this port supports mst */
|
||||
|
@ -957,14 +985,14 @@ vlv_pipe_to_channel(enum pipe pipe)
|
|||
static inline struct drm_crtc *
|
||||
intel_get_crtc_for_pipe(struct drm_device *dev, int pipe)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
return dev_priv->pipe_to_crtc_mapping[pipe];
|
||||
}
|
||||
|
||||
static inline struct drm_crtc *
|
||||
intel_get_crtc_for_plane(struct drm_device *dev, int plane)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
return dev_priv->plane_to_crtc_mapping[plane];
|
||||
}
|
||||
|
||||
|
@ -1157,7 +1185,20 @@ int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
|
|||
struct drm_file *file_priv);
|
||||
enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
|
||||
enum pipe pipe);
|
||||
bool intel_pipe_has_type(struct intel_crtc *crtc, enum intel_output_type type);
|
||||
static inline bool
|
||||
intel_crtc_has_type(const struct intel_crtc_state *crtc_state,
|
||||
enum intel_output_type type)
|
||||
{
|
||||
return crtc_state->output_types & (1 << type);
|
||||
}
|
||||
static inline bool
|
||||
intel_crtc_has_dp_encoder(const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
return crtc_state->output_types &
|
||||
((1 << INTEL_OUTPUT_DP) |
|
||||
(1 << INTEL_OUTPUT_DP_MST) |
|
||||
(1 << INTEL_OUTPUT_EDP));
|
||||
}
|
||||
static inline void
|
||||
intel_wait_for_vblank(struct drm_device *dev, int pipe)
|
||||
{
|
||||
|
@ -1338,7 +1379,7 @@ void intel_dp_mst_resume(struct drm_device *dev);
|
|||
int intel_dp_max_link_rate(struct intel_dp *intel_dp);
|
||||
int intel_dp_rate_select(struct intel_dp *intel_dp, int rate);
|
||||
void intel_dp_hot_plug(struct intel_encoder *intel_encoder);
|
||||
void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv);
|
||||
void intel_power_sequencer_reset(struct drm_i915_private *dev_priv);
|
||||
uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes);
|
||||
void intel_plane_destroy(struct drm_plane *plane);
|
||||
void intel_edp_drrs_enable(struct intel_dp *intel_dp);
|
||||
|
@ -1451,6 +1492,7 @@ void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable);
|
|||
|
||||
/* intel_lvds.c */
|
||||
void intel_lvds_init(struct drm_device *dev);
|
||||
struct intel_encoder *intel_get_lvds_encoder(struct drm_device *dev);
|
||||
bool intel_is_dual_link_lvds(struct drm_device *dev);
|
||||
|
||||
|
||||
|
@ -1489,7 +1531,8 @@ void intel_gmch_panel_fitting(struct intel_crtc *crtc,
|
|||
int fitting_mode);
|
||||
void intel_panel_set_backlight_acpi(struct intel_connector *connector,
|
||||
u32 level, u32 max);
|
||||
int intel_panel_setup_backlight(struct drm_connector *connector, enum pipe pipe);
|
||||
int intel_panel_setup_backlight(struct drm_connector *connector,
|
||||
enum pipe pipe);
|
||||
void intel_panel_enable_backlight(struct intel_connector *connector);
|
||||
void intel_panel_disable_backlight(struct intel_connector *connector);
|
||||
void intel_panel_destroy_backlight(struct drm_connector *connector);
|
||||
|
@ -1498,11 +1541,15 @@ extern struct drm_display_mode *intel_find_panel_downclock(
|
|||
struct drm_device *dev,
|
||||
struct drm_display_mode *fixed_mode,
|
||||
struct drm_connector *connector);
|
||||
void intel_backlight_register(struct drm_device *dev);
|
||||
|
||||
#if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE)
|
||||
int intel_backlight_device_register(struct intel_connector *connector);
|
||||
void intel_backlight_device_unregister(struct intel_connector *connector);
|
||||
#else /* CONFIG_BACKLIGHT_CLASS_DEVICE */
|
||||
static int intel_backlight_device_register(struct intel_connector *connector)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline void intel_backlight_device_unregister(struct intel_connector *connector)
|
||||
{
|
||||
}
|
||||
|
|
|
@ -84,13 +84,15 @@ static void wait_for_dsi_fifo_empty(struct intel_dsi *intel_dsi, enum port port)
|
|||
{
|
||||
struct drm_encoder *encoder = &intel_dsi->base.base;
|
||||
struct drm_device *dev = encoder->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
u32 mask;
|
||||
|
||||
mask = LP_CTRL_FIFO_EMPTY | HS_CTRL_FIFO_EMPTY |
|
||||
LP_DATA_FIFO_EMPTY | HS_DATA_FIFO_EMPTY;
|
||||
|
||||
if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(port)) & mask) == mask, 100))
|
||||
if (intel_wait_for_register(dev_priv,
|
||||
MIPI_GEN_FIFO_STAT(port), mask, mask,
|
||||
100))
|
||||
DRM_ERROR("DPI FIFOs are not empty\n");
|
||||
}
|
||||
|
||||
|
@ -129,7 +131,7 @@ static ssize_t intel_dsi_host_transfer(struct mipi_dsi_host *host,
|
|||
{
|
||||
struct intel_dsi_host *intel_dsi_host = to_intel_dsi_host(host);
|
||||
struct drm_device *dev = intel_dsi_host->intel_dsi->base.base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
enum port port = intel_dsi_host->port;
|
||||
struct mipi_dsi_packet packet;
|
||||
ssize_t ret;
|
||||
|
@ -158,8 +160,10 @@ static ssize_t intel_dsi_host_transfer(struct mipi_dsi_host *host,
|
|||
|
||||
/* note: this is never true for reads */
|
||||
if (packet.payload_length) {
|
||||
|
||||
if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(port)) & data_mask) == 0, 50))
|
||||
if (intel_wait_for_register(dev_priv,
|
||||
MIPI_GEN_FIFO_STAT(port),
|
||||
data_mask, 0,
|
||||
50))
|
||||
DRM_ERROR("Timeout waiting for HS/LP DATA FIFO !full\n");
|
||||
|
||||
write_data(dev_priv, data_reg, packet.payload,
|
||||
|
@ -170,7 +174,10 @@ static ssize_t intel_dsi_host_transfer(struct mipi_dsi_host *host,
|
|||
I915_WRITE(MIPI_INTR_STAT(port), GEN_READ_DATA_AVAIL);
|
||||
}
|
||||
|
||||
if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(port)) & ctrl_mask) == 0, 50)) {
|
||||
if (intel_wait_for_register(dev_priv,
|
||||
MIPI_GEN_FIFO_STAT(port),
|
||||
ctrl_mask, 0,
|
||||
50)) {
|
||||
DRM_ERROR("Timeout waiting for HS/LP CTRL FIFO !full\n");
|
||||
}
|
||||
|
||||
|
@ -179,7 +186,10 @@ static ssize_t intel_dsi_host_transfer(struct mipi_dsi_host *host,
|
|||
/* ->rx_len is set only for reads */
|
||||
if (msg->rx_len) {
|
||||
data_mask = GEN_READ_DATA_AVAIL;
|
||||
if (wait_for((I915_READ(MIPI_INTR_STAT(port)) & data_mask) == data_mask, 50))
|
||||
if (intel_wait_for_register(dev_priv,
|
||||
MIPI_INTR_STAT(port),
|
||||
data_mask, data_mask,
|
||||
50))
|
||||
DRM_ERROR("Timeout waiting for read data.\n");
|
||||
|
||||
read_data(dev_priv, data_reg, msg->rx_buf, msg->rx_len);
|
||||
|
@ -250,7 +260,7 @@ static int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs,
|
|||
{
|
||||
struct drm_encoder *encoder = &intel_dsi->base.base;
|
||||
struct drm_device *dev = encoder->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
u32 mask;
|
||||
|
||||
/* XXX: pipe, hs */
|
||||
|
@ -269,7 +279,9 @@ static int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs,
|
|||
I915_WRITE(MIPI_DPI_CONTROL(port), cmd);
|
||||
|
||||
mask = SPL_PKT_SENT_INTERRUPT;
|
||||
if (wait_for((I915_READ(MIPI_INTR_STAT(port)) & mask) == mask, 100))
|
||||
if (intel_wait_for_register(dev_priv,
|
||||
MIPI_INTR_STAT(port), mask, mask,
|
||||
100))
|
||||
DRM_ERROR("Video mode command 0x%08x send failed.\n", cmd);
|
||||
|
||||
return 0;
|
||||
|
@ -302,7 +314,7 @@ static inline bool is_cmd_mode(struct intel_dsi *intel_dsi)
|
|||
static bool intel_dsi_compute_config(struct intel_encoder *encoder,
|
||||
struct intel_crtc_state *pipe_config)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi,
|
||||
base);
|
||||
struct intel_connector *intel_connector = intel_dsi->attached_connector;
|
||||
|
@ -313,8 +325,6 @@ static bool intel_dsi_compute_config(struct intel_encoder *encoder,
|
|||
|
||||
DRM_DEBUG_KMS("\n");
|
||||
|
||||
pipe_config->has_dsi_encoder = true;
|
||||
|
||||
if (fixed_mode) {
|
||||
intel_fixed_panel_mode(fixed_mode, adjusted_mode);
|
||||
|
||||
|
@ -348,7 +358,7 @@ static bool intel_dsi_compute_config(struct intel_encoder *encoder,
|
|||
|
||||
static void bxt_dsi_device_ready(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
|
||||
enum port port;
|
||||
u32 val;
|
||||
|
@ -387,7 +397,7 @@ static void bxt_dsi_device_ready(struct intel_encoder *encoder)
|
|||
|
||||
static void vlv_dsi_device_ready(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
|
||||
enum port port;
|
||||
u32 val;
|
||||
|
@ -437,7 +447,7 @@ static void intel_dsi_device_ready(struct intel_encoder *encoder)
|
|||
static void intel_dsi_port_enable(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
|
||||
enum port port;
|
||||
|
@ -478,7 +488,7 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder)
|
|||
static void intel_dsi_port_disable(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
|
||||
enum port port;
|
||||
|
||||
|
@ -497,7 +507,7 @@ static void intel_dsi_port_disable(struct intel_encoder *encoder)
|
|||
static void intel_dsi_enable(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
|
||||
enum port port;
|
||||
|
||||
|
@ -528,7 +538,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder);
|
|||
static void intel_dsi_pre_enable(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
|
||||
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
|
||||
enum port port;
|
||||
|
@ -602,7 +612,7 @@ static void intel_dsi_pre_disable(struct intel_encoder *encoder)
|
|||
static void intel_dsi_disable(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
|
||||
enum port port;
|
||||
u32 temp;
|
||||
|
@ -641,7 +651,7 @@ static void intel_dsi_disable(struct intel_encoder *encoder)
|
|||
static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
|
||||
enum port port;
|
||||
|
||||
|
@ -667,8 +677,9 @@ static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
|
|||
/* Wait till Clock lanes are in LP-00 state for MIPI Port A
|
||||
* only. MIPI Port C has no similar bit for checking
|
||||
*/
|
||||
if (wait_for(((I915_READ(port_ctrl) & AFE_LATCHOUT)
|
||||
== 0x00000), 30))
|
||||
if (intel_wait_for_register(dev_priv,
|
||||
port_ctrl, AFE_LATCHOUT, 0,
|
||||
30))
|
||||
DRM_ERROR("DSI LP not going Low\n");
|
||||
|
||||
/* Disable MIPI PHY transparent latch */
|
||||
|
@ -685,7 +696,7 @@ static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
|
|||
|
||||
static void intel_dsi_post_disable(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
|
||||
|
||||
DRM_DEBUG_KMS("\n");
|
||||
|
@ -720,7 +731,7 @@ static void intel_dsi_post_disable(struct intel_encoder *encoder)
|
|||
static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
|
||||
enum pipe *pipe)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
enum intel_display_power_domain power_domain;
|
||||
|
@ -794,7 +805,7 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder,
|
|||
struct intel_crtc_state *pipe_config)
|
||||
{
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct drm_display_mode *adjusted_mode =
|
||||
&pipe_config->base.adjusted_mode;
|
||||
struct drm_display_mode *adjusted_mode_sw;
|
||||
|
@ -954,8 +965,6 @@ static void intel_dsi_get_config(struct intel_encoder *encoder,
|
|||
u32 pclk;
|
||||
DRM_DEBUG_KMS("\n");
|
||||
|
||||
pipe_config->has_dsi_encoder = true;
|
||||
|
||||
if (IS_BROXTON(dev))
|
||||
bxt_dsi_get_pipe_config(encoder, pipe_config);
|
||||
|
||||
|
@ -1013,7 +1022,7 @@ static void set_dsi_timings(struct drm_encoder *encoder,
|
|||
const struct drm_display_mode *adjusted_mode)
|
||||
{
|
||||
struct drm_device *dev = encoder->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
|
||||
enum port port;
|
||||
unsigned int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
|
||||
|
@ -1099,7 +1108,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder)
|
|||
{
|
||||
struct drm_encoder *encoder = &intel_encoder->base;
|
||||
struct drm_device *dev = encoder->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
|
||||
const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
|
||||
|
@ -1390,6 +1399,7 @@ static const struct drm_connector_helper_funcs intel_dsi_connector_helper_funcs
|
|||
static const struct drm_connector_funcs intel_dsi_connector_funcs = {
|
||||
.dpms = drm_atomic_helper_connector_dpms,
|
||||
.detect = intel_dsi_detect,
|
||||
.late_register = intel_connector_register,
|
||||
.early_unregister = intel_connector_unregister,
|
||||
.destroy = intel_dsi_connector_destroy,
|
||||
.fill_modes = drm_helper_probe_single_connector_modes,
|
||||
|
@ -1420,7 +1430,7 @@ void intel_dsi_init(struct drm_device *dev)
|
|||
struct intel_connector *intel_connector;
|
||||
struct drm_connector *connector;
|
||||
struct drm_display_mode *scan, *fixed_mode = NULL;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
enum port port;
|
||||
unsigned int i;
|
||||
|
||||
|
@ -1587,13 +1597,10 @@ void intel_dsi_init(struct drm_device *dev)
|
|||
connector->display_info.height_mm = fixed_mode->height_mm;
|
||||
|
||||
intel_panel_init(&intel_connector->panel, fixed_mode, NULL);
|
||||
intel_panel_setup_backlight(connector, INVALID_PIPE);
|
||||
|
||||
intel_dsi_add_properties(intel_connector);
|
||||
|
||||
drm_connector_register(connector);
|
||||
|
||||
intel_panel_setup_backlight(connector, INVALID_PIPE);
|
||||
|
||||
return;
|
||||
|
||||
err:
|
||||
|
|
|
@ -159,7 +159,7 @@ static int dcs_setup_backlight(struct intel_connector *connector,
|
|||
int intel_dsi_dcs_init_backlight_funcs(struct intel_connector *intel_connector)
|
||||
{
|
||||
struct drm_device *dev = intel_connector->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_encoder *encoder = intel_connector->encoder;
|
||||
struct intel_panel *panel = &intel_connector->panel;
|
||||
|
||||
|
|
|
@ -303,7 +303,7 @@ static void chv_exec_gpio(struct drm_i915_private *dev_priv,
|
|||
static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
|
||||
{
|
||||
struct drm_device *dev = intel_dsi->base.base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
u8 gpio_source, gpio_index;
|
||||
bool value;
|
||||
|
||||
|
@ -469,7 +469,7 @@ static int vbt_panel_get_modes(struct drm_panel *panel)
|
|||
struct vbt_panel *vbt_panel = to_vbt_panel(panel);
|
||||
struct intel_dsi *intel_dsi = vbt_panel->intel_dsi;
|
||||
struct drm_device *dev = intel_dsi->base.base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct drm_display_mode *mode;
|
||||
|
||||
if (!panel->connector)
|
||||
|
@ -497,7 +497,7 @@ static const struct drm_panel_funcs vbt_panel_funcs = {
|
|||
struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id)
|
||||
{
|
||||
struct drm_device *dev = intel_dsi->base.base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct mipi_config *mipi_config = dev_priv->vbt.dsi.config;
|
||||
struct mipi_pps_data *pps = dev_priv->vbt.dsi.pps;
|
||||
struct drm_display_mode *mode = dev_priv->vbt.lfp_lvds_vbt_mode;
|
||||
|
@ -649,14 +649,13 @@ struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id)
|
|||
);
|
||||
|
||||
/*
|
||||
* Exit zero is unified val ths_zero and ths_exit
|
||||
* Exit zero is unified val ths_zero and ths_exit
|
||||
* minimum value for ths_exit = 110ns
|
||||
* min (exit_zero_cnt * 2) = 110/UI
|
||||
* exit_zero_cnt = 55/UI
|
||||
*/
|
||||
if (exit_zero_cnt < (55 * ui_den / ui_num))
|
||||
if ((55 * ui_den) % ui_num)
|
||||
exit_zero_cnt += 1;
|
||||
if (exit_zero_cnt < (55 * ui_den / ui_num) && (55 * ui_den) % ui_num)
|
||||
exit_zero_cnt += 1;
|
||||
|
||||
/* clk zero count */
|
||||
clk_zero_cnt = DIV_ROUND_UP(
|
||||
|
|
|
@ -55,12 +55,10 @@ static int dsi_calc_mnp(struct drm_i915_private *dev_priv,
|
|||
struct intel_crtc_state *config,
|
||||
int target_dsi_clk)
|
||||
{
|
||||
unsigned int calc_m = 0, calc_p = 0;
|
||||
unsigned int m_min, m_max, p_min = 2, p_max = 6;
|
||||
unsigned int m, n, p;
|
||||
int ref_clk;
|
||||
int delta = target_dsi_clk;
|
||||
u32 m_seed;
|
||||
unsigned int calc_m, calc_p;
|
||||
int delta, ref_clk;
|
||||
|
||||
/* target_dsi_clk is expected in kHz */
|
||||
if (target_dsi_clk < 300000 || target_dsi_clk > 1150000) {
|
||||
|
@ -80,6 +78,10 @@ static int dsi_calc_mnp(struct drm_i915_private *dev_priv,
|
|||
m_max = 92;
|
||||
}
|
||||
|
||||
calc_p = p_min;
|
||||
calc_m = m_min;
|
||||
delta = abs(target_dsi_clk - (m_min * ref_clk) / (p_min * n));
|
||||
|
||||
for (m = m_min; m <= m_max && delta; m++) {
|
||||
for (p = p_min; p <= p_max && delta; p++) {
|
||||
/*
|
||||
|
@ -97,11 +99,10 @@ static int dsi_calc_mnp(struct drm_i915_private *dev_priv,
|
|||
}
|
||||
|
||||
/* register has log2(N1), this works fine for powers of two */
|
||||
n = ffs(n) - 1;
|
||||
m_seed = lfsr_converts[calc_m - 62];
|
||||
config->dsi_pll.ctrl = 1 << (DSI_PLL_P1_POST_DIV_SHIFT + calc_p - 2);
|
||||
config->dsi_pll.div = n << DSI_PLL_N1_DIV_SHIFT |
|
||||
m_seed << DSI_PLL_M1_DIV_SHIFT;
|
||||
config->dsi_pll.div =
|
||||
(ffs(n) - 1) << DSI_PLL_N1_DIV_SHIFT |
|
||||
(u32)lfsr_converts[calc_m - 62] << DSI_PLL_M1_DIV_SHIFT;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -113,7 +114,7 @@ static int dsi_calc_mnp(struct drm_i915_private *dev_priv,
|
|||
static int vlv_compute_dsi_pll(struct intel_encoder *encoder,
|
||||
struct intel_crtc_state *config)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
|
||||
int ret;
|
||||
u32 dsi_clk;
|
||||
|
@ -234,8 +235,11 @@ static void bxt_disable_dsi_pll(struct intel_encoder *encoder)
|
|||
* PLL lock should deassert within 200us.
|
||||
* Wait up to 1ms before timing out.
|
||||
*/
|
||||
if (wait_for((I915_READ(BXT_DSI_PLL_ENABLE)
|
||||
& BXT_DSI_PLL_LOCKED) == 0, 1))
|
||||
if (intel_wait_for_register(dev_priv,
|
||||
BXT_DSI_PLL_ENABLE,
|
||||
BXT_DSI_PLL_LOCKED,
|
||||
0,
|
||||
1))
|
||||
DRM_ERROR("Timeout waiting for PLL lock deassertion\n");
|
||||
}
|
||||
|
||||
|
@ -321,7 +325,7 @@ static u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
|
|||
u32 dsi_clk;
|
||||
u32 dsi_ratio;
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
|
||||
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
|
||||
/* Divide by zero */
|
||||
if (!pipe_bpp) {
|
||||
|
@ -356,7 +360,7 @@ u32 intel_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
|
|||
static void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
|
||||
{
|
||||
u32 temp;
|
||||
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
|
||||
|
||||
temp = I915_READ(MIPI_CTRL(port));
|
||||
|
@ -370,7 +374,7 @@ static void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
|
|||
static void bxt_dsi_program_clocks(struct drm_device *dev, enum port port,
|
||||
const struct intel_crtc_state *config)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
u32 tmp;
|
||||
u32 dsi_rate = 0;
|
||||
u32 pll_ratio = 0;
|
||||
|
@ -465,7 +469,7 @@ static int bxt_compute_dsi_pll(struct intel_encoder *encoder,
|
|||
static void bxt_enable_dsi_pll(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *config)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
|
||||
enum port port;
|
||||
u32 val;
|
||||
|
@ -486,7 +490,11 @@ static void bxt_enable_dsi_pll(struct intel_encoder *encoder,
|
|||
I915_WRITE(BXT_DSI_PLL_ENABLE, val);
|
||||
|
||||
/* Timeout and fail if PLL not locked */
|
||||
if (wait_for(I915_READ(BXT_DSI_PLL_ENABLE) & BXT_DSI_PLL_LOCKED, 1)) {
|
||||
if (intel_wait_for_register(dev_priv,
|
||||
BXT_DSI_PLL_ENABLE,
|
||||
BXT_DSI_PLL_LOCKED,
|
||||
BXT_DSI_PLL_LOCKED,
|
||||
1)) {
|
||||
DRM_ERROR("Timed out waiting for DSI PLL to lock\n");
|
||||
return;
|
||||
}
|
||||
|
@ -542,7 +550,7 @@ static void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
|
|||
{
|
||||
u32 tmp;
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
|
||||
/* Clear old configurations */
|
||||
tmp = I915_READ(BXT_MIPI_CLOCK_CTL);
|
||||
|
|
|
@ -122,7 +122,7 @@ static struct intel_dvo *intel_attached_dvo(struct drm_connector *connector)
|
|||
static bool intel_dvo_connector_get_hw_state(struct intel_connector *connector)
|
||||
{
|
||||
struct drm_device *dev = connector->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_dvo *intel_dvo = intel_attached_dvo(&connector->base);
|
||||
u32 tmp;
|
||||
|
||||
|
@ -138,7 +138,7 @@ static bool intel_dvo_get_hw_state(struct intel_encoder *encoder,
|
|||
enum pipe *pipe)
|
||||
{
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
|
||||
u32 tmp;
|
||||
|
||||
|
@ -155,7 +155,7 @@ static bool intel_dvo_get_hw_state(struct intel_encoder *encoder,
|
|||
static void intel_dvo_get_config(struct intel_encoder *encoder,
|
||||
struct intel_crtc_state *pipe_config)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
|
||||
u32 tmp, flags = 0;
|
||||
|
||||
|
@ -176,7 +176,7 @@ static void intel_dvo_get_config(struct intel_encoder *encoder,
|
|||
|
||||
static void intel_disable_dvo(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
|
||||
i915_reg_t dvo_reg = intel_dvo->dev.dvo_reg;
|
||||
u32 temp = I915_READ(dvo_reg);
|
||||
|
@ -188,7 +188,7 @@ static void intel_disable_dvo(struct intel_encoder *encoder)
|
|||
|
||||
static void intel_enable_dvo(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
|
||||
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
|
||||
i915_reg_t dvo_reg = intel_dvo->dev.dvo_reg;
|
||||
|
@ -256,7 +256,7 @@ static bool intel_dvo_compute_config(struct intel_encoder *encoder,
|
|||
static void intel_dvo_pre_enable(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
|
||||
const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
|
||||
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
|
||||
|
@ -305,7 +305,7 @@ intel_dvo_detect(struct drm_connector *connector, bool force)
|
|||
|
||||
static int intel_dvo_get_modes(struct drm_connector *connector)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = connector->dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(connector->dev);
|
||||
const struct drm_display_mode *fixed_mode =
|
||||
to_intel_connector(connector)->panel.fixed_mode;
|
||||
|
||||
|
@ -341,6 +341,7 @@ static void intel_dvo_destroy(struct drm_connector *connector)
|
|||
static const struct drm_connector_funcs intel_dvo_connector_funcs = {
|
||||
.dpms = drm_atomic_helper_connector_dpms,
|
||||
.detect = intel_dvo_detect,
|
||||
.late_register = intel_connector_register,
|
||||
.early_unregister = intel_connector_unregister,
|
||||
.destroy = intel_dvo_destroy,
|
||||
.fill_modes = drm_helper_probe_single_connector_modes,
|
||||
|
@ -378,7 +379,7 @@ static struct drm_display_mode *
|
|||
intel_dvo_get_current_mode(struct drm_connector *connector)
|
||||
{
|
||||
struct drm_device *dev = connector->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
|
||||
uint32_t dvo_val = I915_READ(intel_dvo->dev.dvo_reg);
|
||||
struct drm_display_mode *mode = NULL;
|
||||
|
@ -420,7 +421,7 @@ static char intel_dvo_port_name(i915_reg_t dvo_reg)
|
|||
|
||||
void intel_dvo_init(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_encoder *intel_encoder;
|
||||
struct intel_dvo *intel_dvo;
|
||||
struct intel_connector *intel_connector;
|
||||
|
@ -550,7 +551,6 @@ void intel_dvo_init(struct drm_device *dev)
|
|||
intel_dvo->panel_wants_dither = true;
|
||||
}
|
||||
|
||||
drm_connector_register(connector);
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
|
@ -124,7 +124,9 @@ static void i8xx_fbc_deactivate(struct drm_i915_private *dev_priv)
|
|||
I915_WRITE(FBC_CONTROL, fbc_ctl);
|
||||
|
||||
/* Wait for compressing bit to clear */
|
||||
if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) {
|
||||
if (intel_wait_for_register(dev_priv,
|
||||
FBC_STATUS, FBC_STAT_COMPRESSING, 0,
|
||||
10)) {
|
||||
DRM_DEBUG_KMS("FBC idle timed out\n");
|
||||
return;
|
||||
}
|
||||
|
@ -390,7 +392,7 @@ static void intel_fbc_work_fn(struct work_struct *__work)
|
|||
struct intel_fbc *fbc = &dev_priv->fbc;
|
||||
struct intel_fbc_work *work = &fbc->work;
|
||||
struct intel_crtc *crtc = fbc->crtc;
|
||||
struct drm_vblank_crtc *vblank = &dev_priv->dev->vblank[crtc->pipe];
|
||||
struct drm_vblank_crtc *vblank = &dev_priv->drm.vblank[crtc->pipe];
|
||||
|
||||
if (drm_crtc_vblank_get(&crtc->base)) {
|
||||
DRM_ERROR("vblank not available for FBC on pipe %c\n",
|
||||
|
@ -443,7 +445,7 @@ out:
|
|||
|
||||
static void intel_fbc_schedule_activation(struct intel_crtc *crtc)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
struct intel_fbc *fbc = &dev_priv->fbc;
|
||||
struct intel_fbc_work *work = &fbc->work;
|
||||
|
||||
|
@ -553,7 +555,7 @@ again:
|
|||
|
||||
static int intel_fbc_alloc_cfb(struct intel_crtc *crtc)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
struct intel_fbc *fbc = &dev_priv->fbc;
|
||||
struct drm_mm_node *uninitialized_var(compressed_llb);
|
||||
int size, fb_cpp, ret;
|
||||
|
@ -684,7 +686,7 @@ static bool pixel_format_is_valid(struct drm_i915_private *dev_priv,
|
|||
*/
|
||||
static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
struct intel_fbc *fbc = &dev_priv->fbc;
|
||||
unsigned int effective_w, effective_h, max_w, max_h;
|
||||
|
||||
|
@ -711,7 +713,7 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
|
|||
struct intel_crtc_state *crtc_state,
|
||||
struct intel_plane_state *plane_state)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
struct intel_fbc *fbc = &dev_priv->fbc;
|
||||
struct intel_fbc_state_cache *cache = &fbc->state_cache;
|
||||
struct drm_framebuffer *fb = plane_state->base.fb;
|
||||
|
@ -744,7 +746,7 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
|
|||
|
||||
static bool intel_fbc_can_activate(struct intel_crtc *crtc)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
struct intel_fbc *fbc = &dev_priv->fbc;
|
||||
struct intel_fbc_state_cache *cache = &fbc->state_cache;
|
||||
|
||||
|
@ -816,22 +818,16 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
|
|||
|
||||
static bool intel_fbc_can_choose(struct intel_crtc *crtc)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
struct intel_fbc *fbc = &dev_priv->fbc;
|
||||
bool enable_by_default = IS_BROADWELL(dev_priv);
|
||||
|
||||
if (intel_vgpu_active(dev_priv)) {
|
||||
fbc->no_fbc_reason = "VGPU is active";
|
||||
return false;
|
||||
}
|
||||
|
||||
if (i915.enable_fbc < 0 && !enable_by_default) {
|
||||
fbc->no_fbc_reason = "disabled per chip default";
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!i915.enable_fbc) {
|
||||
fbc->no_fbc_reason = "disabled per module param";
|
||||
fbc->no_fbc_reason = "disabled per module param or by default";
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -851,7 +847,7 @@ static bool intel_fbc_can_choose(struct intel_crtc *crtc)
|
|||
static void intel_fbc_get_reg_params(struct intel_crtc *crtc,
|
||||
struct intel_fbc_reg_params *params)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
struct intel_fbc *fbc = &dev_priv->fbc;
|
||||
struct intel_fbc_state_cache *cache = &fbc->state_cache;
|
||||
|
||||
|
@ -884,7 +880,7 @@ void intel_fbc_pre_update(struct intel_crtc *crtc,
|
|||
struct intel_crtc_state *crtc_state,
|
||||
struct intel_plane_state *plane_state)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
struct intel_fbc *fbc = &dev_priv->fbc;
|
||||
|
||||
if (!fbc_supported(dev_priv))
|
||||
|
@ -910,7 +906,7 @@ unlock:
|
|||
|
||||
static void __intel_fbc_post_update(struct intel_crtc *crtc)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
struct intel_fbc *fbc = &dev_priv->fbc;
|
||||
struct intel_fbc_reg_params old_params;
|
||||
|
||||
|
@ -943,7 +939,7 @@ static void __intel_fbc_post_update(struct intel_crtc *crtc)
|
|||
|
||||
void intel_fbc_post_update(struct intel_crtc *crtc)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
struct intel_fbc *fbc = &dev_priv->fbc;
|
||||
|
||||
if (!fbc_supported(dev_priv))
|
||||
|
@ -992,13 +988,13 @@ void intel_fbc_flush(struct drm_i915_private *dev_priv,
|
|||
if (!fbc_supported(dev_priv))
|
||||
return;
|
||||
|
||||
if (origin == ORIGIN_GTT || origin == ORIGIN_FLIP)
|
||||
return;
|
||||
|
||||
mutex_lock(&fbc->lock);
|
||||
|
||||
fbc->busy_bits &= ~frontbuffer_bits;
|
||||
|
||||
if (origin == ORIGIN_GTT || origin == ORIGIN_FLIP)
|
||||
goto out;
|
||||
|
||||
if (!fbc->busy_bits && fbc->enabled &&
|
||||
(frontbuffer_bits & intel_fbc_get_frontbuffer_bit(fbc))) {
|
||||
if (fbc->active)
|
||||
|
@ -1007,6 +1003,7 @@ void intel_fbc_flush(struct drm_i915_private *dev_priv,
|
|||
__intel_fbc_post_update(fbc->crtc);
|
||||
}
|
||||
|
||||
out:
|
||||
mutex_unlock(&fbc->lock);
|
||||
}
|
||||
|
||||
|
@ -1088,7 +1085,7 @@ void intel_fbc_enable(struct intel_crtc *crtc,
|
|||
struct intel_crtc_state *crtc_state,
|
||||
struct intel_plane_state *plane_state)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
struct intel_fbc *fbc = &dev_priv->fbc;
|
||||
|
||||
if (!fbc_supported(dev_priv))
|
||||
|
@ -1159,7 +1156,7 @@ static void __intel_fbc_disable(struct drm_i915_private *dev_priv)
|
|||
*/
|
||||
void intel_fbc_disable(struct intel_crtc *crtc)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
struct intel_fbc *fbc = &dev_priv->fbc;
|
||||
|
||||
if (!fbc_supported(dev_priv))
|
||||
|
@ -1213,12 +1210,32 @@ void intel_fbc_init_pipe_state(struct drm_i915_private *dev_priv)
|
|||
if (!no_fbc_on_multiple_pipes(dev_priv))
|
||||
return;
|
||||
|
||||
for_each_intel_crtc(dev_priv->dev, crtc)
|
||||
for_each_intel_crtc(&dev_priv->drm, crtc)
|
||||
if (intel_crtc_active(&crtc->base) &&
|
||||
to_intel_plane_state(crtc->base.primary->state)->visible)
|
||||
dev_priv->fbc.visible_pipes_mask |= (1 << crtc->pipe);
|
||||
}
|
||||
|
||||
/*
|
||||
* The DDX driver changes its behavior depending on the value it reads from
|
||||
* i915.enable_fbc, so sanitize it by translating the default value into either
|
||||
* 0 or 1 in order to allow it to know what's going on.
|
||||
*
|
||||
* Notice that this is done at driver initialization and we still allow user
|
||||
* space to change the value during runtime without sanitizing it again. IGT
|
||||
* relies on being able to change i915.enable_fbc at runtime.
|
||||
*/
|
||||
static int intel_sanitize_fbc_option(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
if (i915.enable_fbc >= 0)
|
||||
return !!i915.enable_fbc;
|
||||
|
||||
if (IS_BROADWELL(dev_priv))
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_fbc_init - Initialize FBC
|
||||
* @dev_priv: the i915 device
|
||||
|
@ -1236,6 +1253,9 @@ void intel_fbc_init(struct drm_i915_private *dev_priv)
|
|||
fbc->active = false;
|
||||
fbc->work.scheduled = false;
|
||||
|
||||
i915.enable_fbc = intel_sanitize_fbc_option(dev_priv);
|
||||
DRM_DEBUG_KMS("Sanitized enable_fbc value: %d\n", i915.enable_fbc);
|
||||
|
||||
if (!HAS_FBC(dev_priv)) {
|
||||
fbc->no_fbc_reason = "unsupported by this chipset";
|
||||
return;
|
||||
|
|
|
@ -362,23 +362,24 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
|
|||
bool *enabled, int width, int height)
|
||||
{
|
||||
struct drm_device *dev = fb_helper->dev;
|
||||
unsigned long conn_configured, mask;
|
||||
unsigned int count = min(fb_helper->connector_count, BITS_PER_LONG);
|
||||
int i, j;
|
||||
bool *save_enabled;
|
||||
bool fallback = true;
|
||||
int num_connectors_enabled = 0;
|
||||
int num_connectors_detected = 0;
|
||||
uint64_t conn_configured = 0, mask;
|
||||
int pass = 0;
|
||||
|
||||
save_enabled = kcalloc(fb_helper->connector_count, sizeof(bool),
|
||||
GFP_KERNEL);
|
||||
save_enabled = kcalloc(count, sizeof(bool), GFP_KERNEL);
|
||||
if (!save_enabled)
|
||||
return false;
|
||||
|
||||
memcpy(save_enabled, enabled, fb_helper->connector_count);
|
||||
mask = (1 << fb_helper->connector_count) - 1;
|
||||
memcpy(save_enabled, enabled, count);
|
||||
mask = BIT(count) - 1;
|
||||
conn_configured = 0;
|
||||
retry:
|
||||
for (i = 0; i < fb_helper->connector_count; i++) {
|
||||
for (i = 0; i < count; i++) {
|
||||
struct drm_fb_helper_connector *fb_conn;
|
||||
struct drm_connector *connector;
|
||||
struct drm_encoder *encoder;
|
||||
|
@ -388,7 +389,7 @@ retry:
|
|||
fb_conn = fb_helper->connector_info[i];
|
||||
connector = fb_conn->connector;
|
||||
|
||||
if (conn_configured & (1 << i))
|
||||
if (conn_configured & BIT(i))
|
||||
continue;
|
||||
|
||||
if (pass == 0 && !connector->has_tile)
|
||||
|
@ -400,7 +401,7 @@ retry:
|
|||
if (!enabled[i]) {
|
||||
DRM_DEBUG_KMS("connector %s not enabled, skipping\n",
|
||||
connector->name);
|
||||
conn_configured |= (1 << i);
|
||||
conn_configured |= BIT(i);
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -419,7 +420,7 @@ retry:
|
|||
DRM_DEBUG_KMS("connector %s has no encoder or crtc, skipping\n",
|
||||
connector->name);
|
||||
enabled[i] = false;
|
||||
conn_configured |= (1 << i);
|
||||
conn_configured |= BIT(i);
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -432,14 +433,15 @@ retry:
|
|||
intel_crtc->lut_b[j] = j;
|
||||
}
|
||||
|
||||
new_crtc = intel_fb_helper_crtc(fb_helper, connector->state->crtc);
|
||||
new_crtc = intel_fb_helper_crtc(fb_helper,
|
||||
connector->state->crtc);
|
||||
|
||||
/*
|
||||
* Make sure we're not trying to drive multiple connectors
|
||||
* with a single CRTC, since our cloning support may not
|
||||
* match the BIOS.
|
||||
*/
|
||||
for (j = 0; j < fb_helper->connector_count; j++) {
|
||||
for (j = 0; j < count; j++) {
|
||||
if (crtcs[j] == new_crtc) {
|
||||
DRM_DEBUG_KMS("fallback: cloned configuration\n");
|
||||
goto bail;
|
||||
|
@ -498,7 +500,7 @@ retry:
|
|||
modes[i]->flags & DRM_MODE_FLAG_INTERLACE ? "i" :"");
|
||||
|
||||
fallback = false;
|
||||
conn_configured |= (1 << i);
|
||||
conn_configured |= BIT(i);
|
||||
}
|
||||
|
||||
if ((conn_configured & mask) != mask) {
|
||||
|
@ -522,7 +524,7 @@ retry:
|
|||
if (fallback) {
|
||||
bail:
|
||||
DRM_DEBUG_KMS("Not using firmware configuration\n");
|
||||
memcpy(enabled, save_enabled, fb_helper->connector_count);
|
||||
memcpy(enabled, save_enabled, count);
|
||||
kfree(save_enabled);
|
||||
return false;
|
||||
}
|
||||
|
@ -538,8 +540,7 @@ static const struct drm_fb_helper_funcs intel_fb_helper_funcs = {
|
|||
.fb_probe = intelfb_create,
|
||||
};
|
||||
|
||||
static void intel_fbdev_destroy(struct drm_device *dev,
|
||||
struct intel_fbdev *ifbdev)
|
||||
static void intel_fbdev_destroy(struct intel_fbdev *ifbdev)
|
||||
{
|
||||
/* We rely on the object-free to release the VMA pinning for
|
||||
* the info->screen_base mmaping. Leaking the VMA is simpler than
|
||||
|
@ -552,12 +553,14 @@ static void intel_fbdev_destroy(struct drm_device *dev,
|
|||
drm_fb_helper_fini(&ifbdev->helper);
|
||||
|
||||
if (ifbdev->fb) {
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
mutex_lock(&ifbdev->helper.dev->struct_mutex);
|
||||
intel_unpin_fb_obj(&ifbdev->fb->base, BIT(DRM_ROTATE_0));
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
mutex_unlock(&ifbdev->helper.dev->struct_mutex);
|
||||
|
||||
drm_framebuffer_remove(&ifbdev->fb->base);
|
||||
}
|
||||
|
||||
kfree(ifbdev);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -690,9 +693,9 @@ out:
|
|||
|
||||
static void intel_fbdev_suspend_worker(struct work_struct *work)
|
||||
{
|
||||
intel_fbdev_set_suspend(container_of(work,
|
||||
struct drm_i915_private,
|
||||
fbdev_suspend_work)->dev,
|
||||
intel_fbdev_set_suspend(&container_of(work,
|
||||
struct drm_i915_private,
|
||||
fbdev_suspend_work)->drm,
|
||||
FBINFO_STATE_RUNNING,
|
||||
true);
|
||||
}
|
||||
|
@ -700,7 +703,7 @@ static void intel_fbdev_suspend_worker(struct work_struct *work)
|
|||
int intel_fbdev_init(struct drm_device *dev)
|
||||
{
|
||||
struct intel_fbdev *ifbdev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
int ret;
|
||||
|
||||
if (WARN_ON(INTEL_INFO(dev)->num_pipes == 0))
|
||||
|
@ -732,38 +735,50 @@ int intel_fbdev_init(struct drm_device *dev)
|
|||
|
||||
static void intel_fbdev_initial_config(void *data, async_cookie_t cookie)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = data;
|
||||
struct intel_fbdev *ifbdev = dev_priv->fbdev;
|
||||
struct intel_fbdev *ifbdev = data;
|
||||
|
||||
/* Due to peculiar init order wrt to hpd handling this is separate. */
|
||||
if (drm_fb_helper_initial_config(&ifbdev->helper,
|
||||
ifbdev->preferred_bpp))
|
||||
intel_fbdev_fini(dev_priv->dev);
|
||||
intel_fbdev_fini(ifbdev->helper.dev);
|
||||
}
|
||||
|
||||
void intel_fbdev_initial_config_async(struct drm_device *dev)
|
||||
{
|
||||
async_schedule(intel_fbdev_initial_config, to_i915(dev));
|
||||
struct intel_fbdev *ifbdev = to_i915(dev)->fbdev;
|
||||
|
||||
ifbdev->cookie = async_schedule(intel_fbdev_initial_config, ifbdev);
|
||||
}
|
||||
|
||||
static void intel_fbdev_sync(struct intel_fbdev *ifbdev)
|
||||
{
|
||||
if (!ifbdev->cookie)
|
||||
return;
|
||||
|
||||
/* Only serialises with all preceding async calls, hence +1 */
|
||||
async_synchronize_cookie(ifbdev->cookie + 1);
|
||||
ifbdev->cookie = 0;
|
||||
}
|
||||
|
||||
void intel_fbdev_fini(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
if (!dev_priv->fbdev)
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_fbdev *ifbdev = dev_priv->fbdev;
|
||||
|
||||
if (!ifbdev)
|
||||
return;
|
||||
|
||||
flush_work(&dev_priv->fbdev_suspend_work);
|
||||
|
||||
if (!current_is_async())
|
||||
async_synchronize_full();
|
||||
intel_fbdev_destroy(dev, dev_priv->fbdev);
|
||||
kfree(dev_priv->fbdev);
|
||||
intel_fbdev_sync(ifbdev);
|
||||
|
||||
intel_fbdev_destroy(ifbdev);
|
||||
dev_priv->fbdev = NULL;
|
||||
}
|
||||
|
||||
void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_fbdev *ifbdev = dev_priv->fbdev;
|
||||
struct fb_info *info;
|
||||
|
||||
|
@ -812,7 +827,7 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous
|
|||
|
||||
void intel_fbdev_output_poll_changed(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
if (dev_priv->fbdev)
|
||||
drm_fb_helper_hotplug_event(&dev_priv->fbdev->helper);
|
||||
}
|
||||
|
@ -820,13 +835,15 @@ void intel_fbdev_output_poll_changed(struct drm_device *dev)
|
|||
void intel_fbdev_restore_mode(struct drm_device *dev)
|
||||
{
|
||||
int ret;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_fbdev *ifbdev = dev_priv->fbdev;
|
||||
struct drm_fb_helper *fb_helper;
|
||||
|
||||
if (!ifbdev)
|
||||
return;
|
||||
|
||||
intel_fbdev_sync(ifbdev);
|
||||
|
||||
fb_helper = &ifbdev->helper;
|
||||
|
||||
ret = drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper);
|
||||
|
|
|
@ -50,7 +50,7 @@
|
|||
|
||||
static bool ivb_can_enable_err_int(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_crtc *crtc;
|
||||
enum pipe pipe;
|
||||
|
||||
|
@ -68,7 +68,7 @@ static bool ivb_can_enable_err_int(struct drm_device *dev)
|
|||
|
||||
static bool cpt_can_enable_serr_int(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
enum pipe pipe;
|
||||
struct intel_crtc *crtc;
|
||||
|
||||
|
@ -105,7 +105,7 @@ static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev,
|
|||
enum pipe pipe,
|
||||
bool enable, bool old)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
i915_reg_t reg = PIPESTAT(pipe);
|
||||
u32 pipestat = I915_READ(reg) & 0xffff0000;
|
||||
|
||||
|
@ -123,7 +123,7 @@ static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev,
|
|||
static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
|
||||
enum pipe pipe, bool enable)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN :
|
||||
DE_PIPEB_FIFO_UNDERRUN;
|
||||
|
||||
|
@ -154,7 +154,7 @@ static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
|
|||
enum pipe pipe,
|
||||
bool enable, bool old)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
if (enable) {
|
||||
I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe));
|
||||
|
||||
|
@ -176,7 +176,7 @@ static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
|
|||
static void broadwell_set_fifo_underrun_reporting(struct drm_device *dev,
|
||||
enum pipe pipe, bool enable)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
|
||||
if (enable)
|
||||
bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_FIFO_UNDERRUN);
|
||||
|
@ -188,7 +188,7 @@ static void ibx_set_fifo_underrun_reporting(struct drm_device *dev,
|
|||
enum transcoder pch_transcoder,
|
||||
bool enable)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
uint32_t bit = (pch_transcoder == TRANSCODER_A) ?
|
||||
SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER;
|
||||
|
||||
|
@ -220,7 +220,7 @@ static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
|
|||
enum transcoder pch_transcoder,
|
||||
bool enable, bool old)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
|
||||
if (enable) {
|
||||
I915_WRITE(SERR_INT,
|
||||
|
@ -244,7 +244,7 @@ static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
|
|||
static bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
|
||||
enum pipe pipe, bool enable)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
bool old;
|
||||
|
@ -289,7 +289,7 @@ bool intel_set_cpu_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
|
|||
bool ret;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, flags);
|
||||
ret = __intel_set_cpu_fifo_underrun_reporting(dev_priv->dev, pipe,
|
||||
ret = __intel_set_cpu_fifo_underrun_reporting(&dev_priv->drm, pipe,
|
||||
enable);
|
||||
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
|
||||
|
||||
|
@ -334,10 +334,12 @@ bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
|
|||
intel_crtc->pch_fifo_underrun_disabled = !enable;
|
||||
|
||||
if (HAS_PCH_IBX(dev_priv))
|
||||
ibx_set_fifo_underrun_reporting(dev_priv->dev, pch_transcoder,
|
||||
ibx_set_fifo_underrun_reporting(&dev_priv->drm,
|
||||
pch_transcoder,
|
||||
enable);
|
||||
else
|
||||
cpt_set_fifo_underrun_reporting(dev_priv->dev, pch_transcoder,
|
||||
cpt_set_fifo_underrun_reporting(&dev_priv->drm,
|
||||
pch_transcoder,
|
||||
enable, old);
|
||||
|
||||
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
|
||||
|
@ -405,7 +407,7 @@ void intel_check_cpu_fifo_underruns(struct drm_i915_private *dev_priv)
|
|||
|
||||
spin_lock_irq(&dev_priv->irq_lock);
|
||||
|
||||
for_each_intel_crtc(dev_priv->dev, crtc) {
|
||||
for_each_intel_crtc(&dev_priv->drm, crtc) {
|
||||
if (crtc->cpu_fifo_underrun_disabled)
|
||||
continue;
|
||||
|
||||
|
@ -432,7 +434,7 @@ void intel_check_pch_fifo_underruns(struct drm_i915_private *dev_priv)
|
|||
|
||||
spin_lock_irq(&dev_priv->irq_lock);
|
||||
|
||||
for_each_intel_crtc(dev_priv->dev, crtc) {
|
||||
for_each_intel_crtc(&dev_priv->drm, crtc) {
|
||||
if (crtc->pch_fifo_underrun_disabled)
|
||||
continue;
|
||||
|
||||
|
|
|
@ -26,6 +26,7 @@
|
|||
|
||||
#include "intel_guc_fwif.h"
|
||||
#include "i915_guc_reg.h"
|
||||
#include "intel_ringbuffer.h"
|
||||
|
||||
struct drm_i915_gem_request;
|
||||
|
||||
|
@ -86,7 +87,7 @@ struct i915_guc_client {
|
|||
int retcode;
|
||||
|
||||
/* Per-engine counts of GuC submissions */
|
||||
uint64_t submissions[GUC_MAX_ENGINES_NUM];
|
||||
uint64_t submissions[I915_NUM_ENGINES];
|
||||
};
|
||||
|
||||
enum intel_guc_fw_status {
|
||||
|
@ -143,8 +144,8 @@ struct intel_guc {
|
|||
uint32_t action_fail; /* Total number of failures */
|
||||
int32_t action_err; /* Last error code */
|
||||
|
||||
uint64_t submissions[GUC_MAX_ENGINES_NUM];
|
||||
uint32_t last_seqno[GUC_MAX_ENGINES_NUM];
|
||||
uint64_t submissions[I915_NUM_ENGINES];
|
||||
uint32_t last_seqno[I915_NUM_ENGINES];
|
||||
};
|
||||
|
||||
/* intel_guc_loader.c */
|
||||
|
|
|
@ -65,6 +65,9 @@ MODULE_FIRMWARE(I915_SKL_GUC_UCODE);
|
|||
#define I915_BXT_GUC_UCODE "i915/bxt_guc_ver8_7.bin"
|
||||
MODULE_FIRMWARE(I915_BXT_GUC_UCODE);
|
||||
|
||||
#define I915_KBL_GUC_UCODE "i915/kbl_guc_ver9_14.bin"
|
||||
MODULE_FIRMWARE(I915_KBL_GUC_UCODE);
|
||||
|
||||
/* User-friendly representation of an enum */
|
||||
const char *intel_guc_fw_status_repr(enum intel_guc_fw_status status)
|
||||
{
|
||||
|
@ -87,7 +90,7 @@ static void direct_interrupts_to_host(struct drm_i915_private *dev_priv)
|
|||
struct intel_engine_cs *engine;
|
||||
int irqs;
|
||||
|
||||
/* tell all command streamers NOT to forward interrupts and vblank to GuC */
|
||||
/* tell all command streamers NOT to forward interrupts or vblank to GuC */
|
||||
irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_NEVER);
|
||||
irqs |= _MASKED_BIT_DISABLE(GFX_INTERRUPT_STEERING);
|
||||
for_each_engine(engine, dev_priv)
|
||||
|
@ -105,9 +108,8 @@ static void direct_interrupts_to_guc(struct drm_i915_private *dev_priv)
|
|||
int irqs;
|
||||
u32 tmp;
|
||||
|
||||
/* tell all command streamers to forward interrupts and vblank to GuC */
|
||||
irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_ALWAYS);
|
||||
irqs |= _MASKED_BIT_ENABLE(GFX_INTERRUPT_STEERING);
|
||||
/* tell all command streamers to forward interrupts (but not vblank) to GuC */
|
||||
irqs = _MASKED_BIT_ENABLE(GFX_INTERRUPT_STEERING);
|
||||
for_each_engine(engine, dev_priv)
|
||||
I915_WRITE(RING_MODE_GEN7(engine), irqs);
|
||||
|
||||
|
@ -312,7 +314,7 @@ static u32 guc_wopcm_size(struct drm_i915_private *dev_priv)
|
|||
static int guc_ucode_xfer(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
int ret;
|
||||
|
||||
ret = i915_gem_object_set_to_gtt_domain(guc_fw->guc_fw_obj, false);
|
||||
|
@ -411,7 +413,7 @@ static int i915_reset_guc(struct drm_i915_private *dev_priv)
|
|||
*/
|
||||
int intel_guc_setup(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
|
||||
const char *fw_path = guc_fw->guc_fw_path;
|
||||
int retries, ret, err;
|
||||
|
@ -606,7 +608,7 @@ static void guc_fw_fetch(struct drm_device *dev, struct intel_guc_fw *guc_fw)
|
|||
|
||||
/* Header and uCode will be loaded to WOPCM. Size of the two. */
|
||||
size = guc_fw->header_size + guc_fw->ucode_size;
|
||||
if (size > guc_wopcm_size(dev->dev_private)) {
|
||||
if (size > guc_wopcm_size(to_i915(dev))) {
|
||||
DRM_ERROR("Firmware is too large to fit in WOPCM\n");
|
||||
goto fail;
|
||||
}
|
||||
|
@ -679,7 +681,7 @@ fail:
|
|||
*/
|
||||
void intel_guc_init(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
|
||||
const char *fw_path;
|
||||
|
||||
|
@ -699,6 +701,10 @@ void intel_guc_init(struct drm_device *dev)
|
|||
fw_path = I915_BXT_GUC_UCODE;
|
||||
guc_fw->guc_fw_major_wanted = 8;
|
||||
guc_fw->guc_fw_minor_wanted = 7;
|
||||
} else if (IS_KABYLAKE(dev)) {
|
||||
fw_path = I915_KBL_GUC_UCODE;
|
||||
guc_fw->guc_fw_major_wanted = 9;
|
||||
guc_fw->guc_fw_minor_wanted = 14;
|
||||
} else {
|
||||
fw_path = ""; /* unknown device */
|
||||
}
|
||||
|
@ -728,7 +734,7 @@ void intel_guc_init(struct drm_device *dev)
|
|||
*/
|
||||
void intel_guc_fini(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
|
|
|
@ -63,7 +63,7 @@ int intel_gvt_init(struct drm_i915_private *dev_priv)
|
|||
|
||||
if (!is_supported_device(dev_priv)) {
|
||||
DRM_DEBUG_DRIVER("Unsupported device. GVT-g is disabled\n");
|
||||
return 0;
|
||||
goto bail;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -72,16 +72,20 @@ int intel_gvt_init(struct drm_i915_private *dev_priv)
|
|||
ret = intel_gvt_init_host();
|
||||
if (ret) {
|
||||
DRM_DEBUG_DRIVER("Not in host or MPT modules not found\n");
|
||||
return 0;
|
||||
goto bail;
|
||||
}
|
||||
|
||||
ret = intel_gvt_init_device(dev_priv);
|
||||
if (ret) {
|
||||
DRM_DEBUG_DRIVER("Fail to init GVT device\n");
|
||||
return 0;
|
||||
goto bail;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
bail:
|
||||
i915.enable_gvt = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -47,7 +47,7 @@ static void
|
|||
assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi)
|
||||
{
|
||||
struct drm_device *dev = intel_hdmi_to_dev(intel_hdmi);
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
uint32_t enabled_bits;
|
||||
|
||||
enabled_bits = HAS_DDI(dev) ? DDI_BUF_CTL_ENABLE : SDVO_ENABLE;
|
||||
|
@ -138,7 +138,7 @@ static void g4x_write_infoframe(struct drm_encoder *encoder,
|
|||
{
|
||||
const uint32_t *data = frame;
|
||||
struct drm_device *dev = encoder->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
u32 val = I915_READ(VIDEO_DIP_CTL);
|
||||
int i;
|
||||
|
||||
|
@ -192,7 +192,7 @@ static void ibx_write_infoframe(struct drm_encoder *encoder,
|
|||
{
|
||||
const uint32_t *data = frame;
|
||||
struct drm_device *dev = encoder->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
|
||||
i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
|
||||
u32 val = I915_READ(reg);
|
||||
|
@ -251,7 +251,7 @@ static void cpt_write_infoframe(struct drm_encoder *encoder,
|
|||
{
|
||||
const uint32_t *data = frame;
|
||||
struct drm_device *dev = encoder->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
|
||||
i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
|
||||
u32 val = I915_READ(reg);
|
||||
|
@ -308,7 +308,7 @@ static void vlv_write_infoframe(struct drm_encoder *encoder,
|
|||
{
|
||||
const uint32_t *data = frame;
|
||||
struct drm_device *dev = encoder->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
|
||||
i915_reg_t reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
|
||||
u32 val = I915_READ(reg);
|
||||
|
@ -366,7 +366,7 @@ static void hsw_write_infoframe(struct drm_encoder *encoder,
|
|||
{
|
||||
const uint32_t *data = frame;
|
||||
struct drm_device *dev = encoder->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
|
||||
enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
|
||||
i915_reg_t ctl_reg = HSW_TVIDEO_DIP_CTL(cpu_transcoder);
|
||||
|
@ -508,7 +508,7 @@ static void g4x_set_infoframes(struct drm_encoder *encoder,
|
|||
bool enable,
|
||||
const struct drm_display_mode *adjusted_mode)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = encoder->dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->dev);
|
||||
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
|
||||
struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
|
||||
i915_reg_t reg = VIDEO_DIP_CTL;
|
||||
|
@ -629,7 +629,7 @@ static bool gcp_default_phase_possible(int pipe_bpp,
|
|||
|
||||
static bool intel_hdmi_set_gcp_infoframe(struct drm_encoder *encoder)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = encoder->dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->dev);
|
||||
struct intel_crtc *crtc = to_intel_crtc(encoder->crtc);
|
||||
i915_reg_t reg;
|
||||
u32 val = 0;
|
||||
|
@ -661,7 +661,7 @@ static void ibx_set_infoframes(struct drm_encoder *encoder,
|
|||
bool enable,
|
||||
const struct drm_display_mode *adjusted_mode)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = encoder->dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->dev);
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
|
||||
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
|
||||
struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
|
||||
|
@ -713,7 +713,7 @@ static void cpt_set_infoframes(struct drm_encoder *encoder,
|
|||
bool enable,
|
||||
const struct drm_display_mode *adjusted_mode)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = encoder->dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->dev);
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
|
||||
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
|
||||
i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
|
||||
|
@ -755,7 +755,7 @@ static void vlv_set_infoframes(struct drm_encoder *encoder,
|
|||
bool enable,
|
||||
const struct drm_display_mode *adjusted_mode)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = encoder->dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->dev);
|
||||
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
|
||||
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
|
||||
|
@ -807,7 +807,7 @@ static void hsw_set_infoframes(struct drm_encoder *encoder,
|
|||
bool enable,
|
||||
const struct drm_display_mode *adjusted_mode)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = encoder->dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->dev);
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
|
||||
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
|
||||
i915_reg_t reg = HSW_TVIDEO_DIP_CTL(intel_crtc->config->cpu_transcoder);
|
||||
|
@ -855,7 +855,7 @@ void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable)
|
|||
static void intel_hdmi_prepare(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
|
||||
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
|
||||
const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
|
||||
|
@ -894,7 +894,7 @@ static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
|
|||
enum pipe *pipe)
|
||||
{
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
|
||||
enum intel_display_power_domain power_domain;
|
||||
u32 tmp;
|
||||
|
@ -931,7 +931,7 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
|
|||
{
|
||||
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
u32 tmp, flags = 0;
|
||||
int dotclock;
|
||||
|
||||
|
@ -988,7 +988,7 @@ static void intel_enable_hdmi_audio(struct intel_encoder *encoder)
|
|||
static void g4x_enable_hdmi(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
|
||||
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
|
||||
u32 temp;
|
||||
|
@ -1009,7 +1009,7 @@ static void g4x_enable_hdmi(struct intel_encoder *encoder)
|
|||
static void ibx_enable_hdmi(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
|
||||
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
|
||||
u32 temp;
|
||||
|
@ -1058,7 +1058,7 @@ static void ibx_enable_hdmi(struct intel_encoder *encoder)
|
|||
static void cpt_enable_hdmi(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
|
||||
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
|
||||
enum pipe pipe = crtc->pipe;
|
||||
|
@ -1115,7 +1115,7 @@ static void vlv_enable_hdmi(struct intel_encoder *encoder)
|
|||
static void intel_disable_hdmi(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
|
||||
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
|
||||
u32 temp;
|
||||
|
@ -1154,7 +1154,7 @@ static void intel_disable_hdmi(struct intel_encoder *encoder)
|
|||
I915_WRITE(intel_hdmi->hdmi_reg, temp);
|
||||
POSTING_READ(intel_hdmi->hdmi_reg);
|
||||
|
||||
intel_wait_for_vblank_if_active(dev_priv->dev, PIPE_A);
|
||||
intel_wait_for_vblank_if_active(&dev_priv->drm, PIPE_A);
|
||||
intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
|
||||
intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
|
||||
}
|
||||
|
@ -1273,33 +1273,15 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
|
|||
static bool hdmi_12bpc_possible(struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct drm_device *dev = crtc_state->base.crtc->dev;
|
||||
struct drm_atomic_state *state;
|
||||
struct intel_encoder *encoder;
|
||||
struct drm_connector *connector;
|
||||
struct drm_connector_state *connector_state;
|
||||
int count = 0, count_hdmi = 0;
|
||||
int i;
|
||||
|
||||
if (HAS_GMCH_DISPLAY(dev))
|
||||
return false;
|
||||
|
||||
state = crtc_state->base.state;
|
||||
|
||||
for_each_connector_in_state(state, connector, connector_state, i) {
|
||||
if (connector_state->crtc != crtc_state->base.crtc)
|
||||
continue;
|
||||
|
||||
encoder = to_intel_encoder(connector_state->best_encoder);
|
||||
|
||||
count_hdmi += encoder->type == INTEL_OUTPUT_HDMI;
|
||||
count++;
|
||||
}
|
||||
|
||||
/*
|
||||
* HDMI 12bpc affects the clocks, so it's only possible
|
||||
* when not cloning with other encoder types.
|
||||
*/
|
||||
return count_hdmi > 0 && count_hdmi == count;
|
||||
return crtc_state->output_types == 1 << INTEL_OUTPUT_HDMI;
|
||||
}
|
||||
|
||||
bool intel_hdmi_compute_config(struct intel_encoder *encoder,
|
||||
|
@ -1575,7 +1557,7 @@ intel_hdmi_set_property(struct drm_connector *connector,
|
|||
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
|
||||
struct intel_digital_port *intel_dig_port =
|
||||
hdmi_to_dig_port(intel_hdmi);
|
||||
struct drm_i915_private *dev_priv = connector->dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(connector->dev);
|
||||
int ret;
|
||||
|
||||
ret = drm_object_property_set_value(&connector->base, property, val);
|
||||
|
@ -1674,7 +1656,7 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder)
|
|||
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
|
||||
struct intel_hdmi *intel_hdmi = &dport->hdmi;
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_crtc *intel_crtc =
|
||||
to_intel_crtc(encoder->base.crtc);
|
||||
const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
|
||||
|
@ -1722,7 +1704,7 @@ static void vlv_hdmi_post_disable(struct intel_encoder *encoder)
|
|||
static void chv_hdmi_post_disable(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
|
||||
mutex_lock(&dev_priv->sb_lock);
|
||||
|
||||
|
@ -1737,7 +1719,7 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
|
|||
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
|
||||
struct intel_hdmi *intel_hdmi = &dport->hdmi;
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_crtc *intel_crtc =
|
||||
to_intel_crtc(encoder->base.crtc);
|
||||
const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
|
||||
|
@ -1774,6 +1756,7 @@ static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
|
|||
.fill_modes = drm_helper_probe_single_connector_modes,
|
||||
.set_property = intel_hdmi_set_property,
|
||||
.atomic_get_property = intel_connector_atomic_get_property,
|
||||
.late_register = intel_connector_register,
|
||||
.early_unregister = intel_connector_unregister,
|
||||
.destroy = intel_hdmi_destroy,
|
||||
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
|
||||
|
@ -1806,7 +1789,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
|
|||
struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
|
||||
struct intel_encoder *intel_encoder = &intel_dig_port->base;
|
||||
struct drm_device *dev = intel_encoder->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
enum port port = intel_dig_port->port;
|
||||
uint8_t alternate_ddc_pin;
|
||||
|
||||
|
@ -1914,7 +1897,6 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
|
|||
intel_hdmi_add_properties(intel_hdmi, connector);
|
||||
|
||||
intel_connector_attach_encoder(intel_connector, intel_encoder);
|
||||
drm_connector_register(connector);
|
||||
intel_hdmi->attached_connector = intel_connector;
|
||||
|
||||
/* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
|
||||
|
|
|
@ -144,7 +144,7 @@ static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv,
|
|||
|
||||
static void intel_hpd_irq_storm_disable(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
struct drm_mode_config *mode_config = &dev->mode_config;
|
||||
struct intel_connector *intel_connector;
|
||||
struct intel_encoder *intel_encoder;
|
||||
|
@ -191,7 +191,7 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
|
|||
struct drm_i915_private *dev_priv =
|
||||
container_of(work, typeof(*dev_priv),
|
||||
hotplug.reenable_work.work);
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
struct drm_mode_config *mode_config = &dev->mode_config;
|
||||
int i;
|
||||
|
||||
|
@ -302,7 +302,7 @@ static void i915_hotplug_work_func(struct work_struct *work)
|
|||
{
|
||||
struct drm_i915_private *dev_priv =
|
||||
container_of(work, struct drm_i915_private, hotplug.hotplug_work);
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
struct drm_mode_config *mode_config = &dev->mode_config;
|
||||
struct intel_connector *intel_connector;
|
||||
struct intel_encoder *intel_encoder;
|
||||
|
@ -455,7 +455,7 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
|
|||
*/
|
||||
void intel_hpd_init(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
struct drm_mode_config *mode_config = &dev->mode_config;
|
||||
struct drm_connector *connector;
|
||||
int i;
|
||||
|
|
|
@ -113,7 +113,7 @@ to_intel_gmbus(struct i2c_adapter *i2c)
|
|||
void
|
||||
intel_i2c_reset(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
|
||||
I915_WRITE(GMBUS0, 0);
|
||||
I915_WRITE(GMBUS4, 0);
|
||||
|
@ -138,7 +138,7 @@ static void intel_i2c_quirk_set(struct drm_i915_private *dev_priv, bool enable)
|
|||
static u32 get_reserved(struct intel_gmbus *bus)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = bus->dev_priv;
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
u32 reserved = 0;
|
||||
|
||||
/* On most chips, these bits must be preserved in software. */
|
||||
|
@ -212,7 +212,7 @@ intel_gpio_pre_xfer(struct i2c_adapter *adapter)
|
|||
adapter);
|
||||
struct drm_i915_private *dev_priv = bus->dev_priv;
|
||||
|
||||
intel_i2c_reset(dev_priv->dev);
|
||||
intel_i2c_reset(&dev_priv->drm);
|
||||
intel_i2c_quirk_set(dev_priv, true);
|
||||
set_data(bus, 1);
|
||||
set_clock(bus, 1);
|
||||
|
@ -298,15 +298,16 @@ gmbus_wait_idle(struct drm_i915_private *dev_priv)
|
|||
{
|
||||
int ret;
|
||||
|
||||
#define C ((I915_READ_NOTRACE(GMBUS2) & GMBUS_ACTIVE) == 0)
|
||||
|
||||
if (!HAS_GMBUS_IRQ(dev_priv))
|
||||
return wait_for(C, 10);
|
||||
return intel_wait_for_register(dev_priv,
|
||||
GMBUS2, GMBUS_ACTIVE, 0,
|
||||
10);
|
||||
|
||||
/* Important: The hw handles only the first bit, so set only one! */
|
||||
I915_WRITE(GMBUS4, GMBUS_IDLE_EN);
|
||||
|
||||
ret = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
|
||||
ret = wait_event_timeout(dev_priv->gmbus_wait_queue,
|
||||
(I915_READ_NOTRACE(GMBUS2) & GMBUS_ACTIVE) == 0,
|
||||
msecs_to_jiffies_timeout(10));
|
||||
|
||||
I915_WRITE(GMBUS4, 0);
|
||||
|
@ -315,7 +316,6 @@ gmbus_wait_idle(struct drm_i915_private *dev_priv)
|
|||
return 0;
|
||||
else
|
||||
return -ETIMEDOUT;
|
||||
#undef C
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -632,7 +632,7 @@ static const struct i2c_algorithm gmbus_algorithm = {
|
|||
*/
|
||||
int intel_setup_gmbus(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_gmbus *bus;
|
||||
unsigned int pin;
|
||||
int ret;
|
||||
|
@ -688,7 +688,7 @@ int intel_setup_gmbus(struct drm_device *dev)
|
|||
goto err;
|
||||
}
|
||||
|
||||
intel_i2c_reset(dev_priv->dev);
|
||||
intel_i2c_reset(&dev_priv->drm);
|
||||
|
||||
return 0;
|
||||
|
||||
|
@ -736,7 +736,7 @@ void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit)
|
|||
|
||||
void intel_teardown_gmbus(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_gmbus *bus;
|
||||
unsigned int pin;
|
||||
|
||||
|
|
|
@ -789,9 +789,6 @@ intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
|
|||
intel_logical_ring_emit(ringbuf, MI_NOOP);
|
||||
intel_logical_ring_advance(ringbuf);
|
||||
|
||||
if (intel_engine_stopped(engine))
|
||||
return 0;
|
||||
|
||||
/* We keep the previous context alive until we retire the following
|
||||
* request. This ensures that any the context object is still pinned
|
||||
* for any residual writes the HW makes into it on the context switch
|
||||
|
@ -826,7 +823,7 @@ int intel_execlists_submission(struct i915_execbuffer_params *params,
|
|||
{
|
||||
struct drm_device *dev = params->dev;
|
||||
struct intel_engine_cs *engine = params->engine;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_ringbuffer *ringbuf = params->ctx->engine[engine->id].ringbuf;
|
||||
u64 exec_start;
|
||||
int instp_mode;
|
||||
|
@ -902,7 +899,7 @@ void intel_execlists_cancel_requests(struct intel_engine_cs *engine)
|
|||
struct drm_i915_gem_request *req, *tmp;
|
||||
LIST_HEAD(cancel_list);
|
||||
|
||||
WARN_ON(!mutex_is_locked(&engine->i915->dev->struct_mutex));
|
||||
WARN_ON(!mutex_is_locked(&engine->i915->drm.struct_mutex));
|
||||
|
||||
spin_lock_bh(&engine->execlist_lock);
|
||||
list_replace_init(&engine->execlist_queue, &cancel_list);
|
||||
|
@ -929,7 +926,10 @@ void intel_logical_ring_stop(struct intel_engine_cs *engine)
|
|||
|
||||
/* TODO: Is this correct with Execlists enabled? */
|
||||
I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING));
|
||||
if (wait_for((I915_READ_MODE(engine) & MODE_IDLE) != 0, 1000)) {
|
||||
if (intel_wait_for_register(dev_priv,
|
||||
RING_MI_MODE(engine->mmio_base),
|
||||
MODE_IDLE, MODE_IDLE,
|
||||
1000)) {
|
||||
DRM_ERROR("%s :timed out trying to stop ring\n", engine->name);
|
||||
return;
|
||||
}
|
||||
|
@ -961,7 +961,7 @@ static int intel_lr_context_pin(struct i915_gem_context *ctx,
|
|||
u32 *lrc_reg_state;
|
||||
int ret;
|
||||
|
||||
lockdep_assert_held(&ctx->i915->dev->struct_mutex);
|
||||
lockdep_assert_held(&ctx->i915->drm.struct_mutex);
|
||||
|
||||
if (ce->pin_count++)
|
||||
return 0;
|
||||
|
@ -1011,7 +1011,7 @@ void intel_lr_context_unpin(struct i915_gem_context *ctx,
|
|||
{
|
||||
struct intel_context *ce = &ctx->engine[engine->id];
|
||||
|
||||
lockdep_assert_held(&ctx->i915->dev->struct_mutex);
|
||||
lockdep_assert_held(&ctx->i915->drm.struct_mutex);
|
||||
GEM_BUG_ON(ce->pin_count == 0);
|
||||
|
||||
if (--ce->pin_count)
|
||||
|
@ -1296,6 +1296,31 @@ static int gen9_init_indirectctx_bb(struct intel_engine_cs *engine,
|
|||
wa_ctx_emit(batch, index, 0);
|
||||
wa_ctx_emit(batch, index, 0);
|
||||
}
|
||||
|
||||
/* WaMediaPoolStateCmdInWABB:bxt */
|
||||
if (HAS_POOLED_EU(engine->i915)) {
|
||||
/*
|
||||
* EU pool configuration is setup along with golden context
|
||||
* during context initialization. This value depends on
|
||||
* device type (2x6 or 3x6) and needs to be updated based
|
||||
* on which subslice is disabled especially for 2x6
|
||||
* devices, however it is safe to load default
|
||||
* configuration of 3x6 device instead of masking off
|
||||
* corresponding bits because HW ignores bits of a disabled
|
||||
* subslice and drops down to appropriate config. Please
|
||||
* see render_state_setup() in i915_gem_render_state.c for
|
||||
* possible configurations, to avoid duplication they are
|
||||
* not shown here again.
|
||||
*/
|
||||
u32 eu_pool_config = 0x00777000;
|
||||
wa_ctx_emit(batch, index, GEN9_MEDIA_POOL_STATE);
|
||||
wa_ctx_emit(batch, index, GEN9_MEDIA_POOL_ENABLE);
|
||||
wa_ctx_emit(batch, index, eu_pool_config);
|
||||
wa_ctx_emit(batch, index, 0);
|
||||
wa_ctx_emit(batch, index, 0);
|
||||
wa_ctx_emit(batch, index, 0);
|
||||
}
|
||||
|
||||
/* Pad to end of cacheline */
|
||||
while (index % CACHELINE_DWORDS)
|
||||
wa_ctx_emit(batch, index, MI_NOOP);
|
||||
|
@ -1353,8 +1378,8 @@ static int lrc_setup_wa_ctx_obj(struct intel_engine_cs *engine, u32 size)
|
|||
{
|
||||
int ret;
|
||||
|
||||
engine->wa_ctx.obj = i915_gem_object_create(engine->i915->dev,
|
||||
PAGE_ALIGN(size));
|
||||
engine->wa_ctx.obj = i915_gem_object_create(&engine->i915->drm,
|
||||
PAGE_ALIGN(size));
|
||||
if (IS_ERR(engine->wa_ctx.obj)) {
|
||||
DRM_DEBUG_DRIVER("alloc LRC WA ctx backing obj failed.\n");
|
||||
ret = PTR_ERR(engine->wa_ctx.obj);
|
||||
|
@ -1614,36 +1639,18 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static bool gen8_logical_ring_get_irq(struct intel_engine_cs *engine)
|
||||
static void gen8_logical_ring_enable_irq(struct intel_engine_cs *engine)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = engine->i915;
|
||||
unsigned long flags;
|
||||
|
||||
if (WARN_ON(!intel_irqs_enabled(dev_priv)))
|
||||
return false;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, flags);
|
||||
if (engine->irq_refcount++ == 0) {
|
||||
I915_WRITE_IMR(engine,
|
||||
~(engine->irq_enable_mask | engine->irq_keep_mask));
|
||||
POSTING_READ(RING_IMR(engine->mmio_base));
|
||||
}
|
||||
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
|
||||
|
||||
return true;
|
||||
I915_WRITE_IMR(engine,
|
||||
~(engine->irq_enable_mask | engine->irq_keep_mask));
|
||||
POSTING_READ_FW(RING_IMR(engine->mmio_base));
|
||||
}
|
||||
|
||||
static void gen8_logical_ring_put_irq(struct intel_engine_cs *engine)
|
||||
static void gen8_logical_ring_disable_irq(struct intel_engine_cs *engine)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = engine->i915;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, flags);
|
||||
if (--engine->irq_refcount == 0) {
|
||||
I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
|
||||
POSTING_READ(RING_IMR(engine->mmio_base));
|
||||
}
|
||||
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
|
||||
I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
|
||||
}
|
||||
|
||||
static int gen8_emit_flush(struct drm_i915_gem_request *request,
|
||||
|
@ -1780,16 +1787,6 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static u32 gen8_get_seqno(struct intel_engine_cs *engine)
|
||||
{
|
||||
return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
|
||||
}
|
||||
|
||||
static void gen8_set_seqno(struct intel_engine_cs *engine, u32 seqno)
|
||||
{
|
||||
intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
|
||||
}
|
||||
|
||||
static void bxt_a_seqno_barrier(struct intel_engine_cs *engine)
|
||||
{
|
||||
/*
|
||||
|
@ -1805,14 +1802,6 @@ static void bxt_a_seqno_barrier(struct intel_engine_cs *engine)
|
|||
intel_flush_status_page(engine, I915_GEM_HWS_INDEX);
|
||||
}
|
||||
|
||||
static void bxt_a_set_seqno(struct intel_engine_cs *engine, u32 seqno)
|
||||
{
|
||||
intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
|
||||
|
||||
/* See bxt_a_get_seqno() explaining the reason for the clflush. */
|
||||
intel_flush_status_page(engine, I915_GEM_HWS_INDEX);
|
||||
}
|
||||
|
||||
/*
|
||||
* Reserve space for 2 NOOPs at the end of each request to be
|
||||
* used as a workaround for not being allowed to do lite
|
||||
|
@ -1838,7 +1827,7 @@ static int gen8_emit_request(struct drm_i915_gem_request *request)
|
|||
intel_hws_seqno_address(request->engine) |
|
||||
MI_FLUSH_DW_USE_GTT);
|
||||
intel_logical_ring_emit(ringbuf, 0);
|
||||
intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request));
|
||||
intel_logical_ring_emit(ringbuf, request->seqno);
|
||||
intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
|
||||
intel_logical_ring_emit(ringbuf, MI_NOOP);
|
||||
return intel_logical_ring_advance_and_submit(request);
|
||||
|
@ -1958,6 +1947,8 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
|
|||
i915_cmd_parser_fini_ring(engine);
|
||||
i915_gem_batch_pool_fini(&engine->batch_pool);
|
||||
|
||||
intel_engine_fini_breadcrumbs(engine);
|
||||
|
||||
if (engine->status_page.obj) {
|
||||
i915_gem_object_unpin_map(engine->status_page.obj);
|
||||
engine->status_page.obj = NULL;
|
||||
|
@ -1979,15 +1970,11 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine)
|
|||
engine->init_hw = gen8_init_common_ring;
|
||||
engine->emit_request = gen8_emit_request;
|
||||
engine->emit_flush = gen8_emit_flush;
|
||||
engine->irq_get = gen8_logical_ring_get_irq;
|
||||
engine->irq_put = gen8_logical_ring_put_irq;
|
||||
engine->irq_enable = gen8_logical_ring_enable_irq;
|
||||
engine->irq_disable = gen8_logical_ring_disable_irq;
|
||||
engine->emit_bb_start = gen8_emit_bb_start;
|
||||
engine->get_seqno = gen8_get_seqno;
|
||||
engine->set_seqno = gen8_set_seqno;
|
||||
if (IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1)) {
|
||||
if (IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1))
|
||||
engine->irq_seqno_barrier = bxt_a_seqno_barrier;
|
||||
engine->set_seqno = bxt_a_set_seqno;
|
||||
}
|
||||
}
|
||||
|
||||
static inline void
|
||||
|
@ -1995,7 +1982,6 @@ logical_ring_default_irqs(struct intel_engine_cs *engine, unsigned shift)
|
|||
{
|
||||
engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << shift;
|
||||
engine->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift;
|
||||
init_waitqueue_head(&engine->irq_queue);
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -2016,12 +2002,94 @@ lrc_setup_hws(struct intel_engine_cs *engine,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
logical_ring_init(struct intel_engine_cs *engine)
|
||||
{
|
||||
struct i915_gem_context *dctx = engine->i915->kernel_context;
|
||||
int ret;
|
||||
|
||||
ret = intel_engine_init_breadcrumbs(engine);
|
||||
if (ret)
|
||||
goto error;
|
||||
|
||||
ret = i915_cmd_parser_init_ring(engine);
|
||||
if (ret)
|
||||
goto error;
|
||||
|
||||
ret = execlists_context_deferred_alloc(dctx, engine);
|
||||
if (ret)
|
||||
goto error;
|
||||
|
||||
/* As this is the default context, always pin it */
|
||||
ret = intel_lr_context_pin(dctx, engine);
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed to pin context for %s: %d\n",
|
||||
engine->name, ret);
|
||||
goto error;
|
||||
}
|
||||
|
||||
/* And setup the hardware status page. */
|
||||
ret = lrc_setup_hws(engine, dctx->engine[engine->id].state);
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed to set up hws %s: %d\n", engine->name, ret);
|
||||
goto error;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
error:
|
||||
intel_logical_ring_cleanup(engine);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int logical_render_ring_init(struct intel_engine_cs *engine)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = engine->i915;
|
||||
int ret;
|
||||
|
||||
if (HAS_L3_DPF(dev_priv))
|
||||
engine->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
|
||||
|
||||
/* Override some for render ring. */
|
||||
if (INTEL_GEN(dev_priv) >= 9)
|
||||
engine->init_hw = gen9_init_render_ring;
|
||||
else
|
||||
engine->init_hw = gen8_init_render_ring;
|
||||
engine->init_context = gen8_init_rcs_context;
|
||||
engine->cleanup = intel_fini_pipe_control;
|
||||
engine->emit_flush = gen8_emit_flush_render;
|
||||
engine->emit_request = gen8_emit_request_render;
|
||||
|
||||
ret = intel_init_pipe_control(engine, 4096);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = intel_init_workaround_bb(engine);
|
||||
if (ret) {
|
||||
/*
|
||||
* We continue even if we fail to initialize WA batch
|
||||
* because we only expect rare glitches but nothing
|
||||
* critical to prevent us from using GPU
|
||||
*/
|
||||
DRM_ERROR("WA batch buffer initialization failed: %d\n",
|
||||
ret);
|
||||
}
|
||||
|
||||
ret = logical_ring_init(engine);
|
||||
if (ret) {
|
||||
lrc_destroy_wa_ctx_obj(engine);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct logical_ring_info {
|
||||
const char *name;
|
||||
unsigned exec_id;
|
||||
unsigned guc_id;
|
||||
u32 mmio_base;
|
||||
unsigned irq_shift;
|
||||
int (*init)(struct intel_engine_cs *engine);
|
||||
} logical_rings[] = {
|
||||
[RCS] = {
|
||||
.name = "render ring",
|
||||
|
@ -2029,6 +2097,7 @@ static const struct logical_ring_info {
|
|||
.guc_id = GUC_RENDER_ENGINE,
|
||||
.mmio_base = RENDER_RING_BASE,
|
||||
.irq_shift = GEN8_RCS_IRQ_SHIFT,
|
||||
.init = logical_render_ring_init,
|
||||
},
|
||||
[BCS] = {
|
||||
.name = "blitter ring",
|
||||
|
@ -2036,6 +2105,7 @@ static const struct logical_ring_info {
|
|||
.guc_id = GUC_BLITTER_ENGINE,
|
||||
.mmio_base = BLT_RING_BASE,
|
||||
.irq_shift = GEN8_BCS_IRQ_SHIFT,
|
||||
.init = logical_ring_init,
|
||||
},
|
||||
[VCS] = {
|
||||
.name = "bsd ring",
|
||||
|
@ -2043,6 +2113,7 @@ static const struct logical_ring_info {
|
|||
.guc_id = GUC_VIDEO_ENGINE,
|
||||
.mmio_base = GEN6_BSD_RING_BASE,
|
||||
.irq_shift = GEN8_VCS1_IRQ_SHIFT,
|
||||
.init = logical_ring_init,
|
||||
},
|
||||
[VCS2] = {
|
||||
.name = "bsd2 ring",
|
||||
|
@ -2050,6 +2121,7 @@ static const struct logical_ring_info {
|
|||
.guc_id = GUC_VIDEO_ENGINE2,
|
||||
.mmio_base = GEN8_BSD2_RING_BASE,
|
||||
.irq_shift = GEN8_VCS2_IRQ_SHIFT,
|
||||
.init = logical_ring_init,
|
||||
},
|
||||
[VECS] = {
|
||||
.name = "video enhancement ring",
|
||||
|
@ -2057,14 +2129,14 @@ static const struct logical_ring_info {
|
|||
.guc_id = GUC_VIDEOENHANCE_ENGINE,
|
||||
.mmio_base = VEBOX_RING_BASE,
|
||||
.irq_shift = GEN8_VECS_IRQ_SHIFT,
|
||||
.init = logical_ring_init,
|
||||
},
|
||||
};
|
||||
|
||||
static struct intel_engine_cs *
|
||||
logical_ring_setup(struct drm_device *dev, enum intel_engine_id id)
|
||||
logical_ring_setup(struct drm_i915_private *dev_priv, enum intel_engine_id id)
|
||||
{
|
||||
const struct logical_ring_info *info = &logical_rings[id];
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_engine_cs *engine = &dev_priv->engine[id];
|
||||
enum forcewake_domains fw_domains;
|
||||
|
||||
|
@ -2107,169 +2179,62 @@ logical_ring_setup(struct drm_device *dev, enum intel_engine_id id)
|
|||
logical_ring_default_irqs(engine, info->irq_shift);
|
||||
|
||||
intel_engine_init_hangcheck(engine);
|
||||
i915_gem_batch_pool_init(dev, &engine->batch_pool);
|
||||
i915_gem_batch_pool_init(&dev_priv->drm, &engine->batch_pool);
|
||||
|
||||
return engine;
|
||||
}
|
||||
|
||||
static int
|
||||
logical_ring_init(struct intel_engine_cs *engine)
|
||||
{
|
||||
struct i915_gem_context *dctx = engine->i915->kernel_context;
|
||||
int ret;
|
||||
|
||||
ret = i915_cmd_parser_init_ring(engine);
|
||||
if (ret)
|
||||
goto error;
|
||||
|
||||
ret = execlists_context_deferred_alloc(dctx, engine);
|
||||
if (ret)
|
||||
goto error;
|
||||
|
||||
/* As this is the default context, always pin it */
|
||||
ret = intel_lr_context_pin(dctx, engine);
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed to pin context for %s: %d\n",
|
||||
engine->name, ret);
|
||||
goto error;
|
||||
}
|
||||
|
||||
/* And setup the hardware status page. */
|
||||
ret = lrc_setup_hws(engine, dctx->engine[engine->id].state);
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed to set up hws %s: %d\n", engine->name, ret);
|
||||
goto error;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
error:
|
||||
intel_logical_ring_cleanup(engine);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int logical_render_ring_init(struct drm_device *dev)
|
||||
{
|
||||
struct intel_engine_cs *engine = logical_ring_setup(dev, RCS);
|
||||
int ret;
|
||||
|
||||
if (HAS_L3_DPF(dev))
|
||||
engine->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
|
||||
|
||||
/* Override some for render ring. */
|
||||
if (INTEL_INFO(dev)->gen >= 9)
|
||||
engine->init_hw = gen9_init_render_ring;
|
||||
else
|
||||
engine->init_hw = gen8_init_render_ring;
|
||||
engine->init_context = gen8_init_rcs_context;
|
||||
engine->cleanup = intel_fini_pipe_control;
|
||||
engine->emit_flush = gen8_emit_flush_render;
|
||||
engine->emit_request = gen8_emit_request_render;
|
||||
|
||||
ret = intel_init_pipe_control(engine);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = intel_init_workaround_bb(engine);
|
||||
if (ret) {
|
||||
/*
|
||||
* We continue even if we fail to initialize WA batch
|
||||
* because we only expect rare glitches but nothing
|
||||
* critical to prevent us from using GPU
|
||||
*/
|
||||
DRM_ERROR("WA batch buffer initialization failed: %d\n",
|
||||
ret);
|
||||
}
|
||||
|
||||
ret = logical_ring_init(engine);
|
||||
if (ret) {
|
||||
lrc_destroy_wa_ctx_obj(engine);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int logical_bsd_ring_init(struct drm_device *dev)
|
||||
{
|
||||
struct intel_engine_cs *engine = logical_ring_setup(dev, VCS);
|
||||
|
||||
return logical_ring_init(engine);
|
||||
}
|
||||
|
||||
static int logical_bsd2_ring_init(struct drm_device *dev)
|
||||
{
|
||||
struct intel_engine_cs *engine = logical_ring_setup(dev, VCS2);
|
||||
|
||||
return logical_ring_init(engine);
|
||||
}
|
||||
|
||||
static int logical_blt_ring_init(struct drm_device *dev)
|
||||
{
|
||||
struct intel_engine_cs *engine = logical_ring_setup(dev, BCS);
|
||||
|
||||
return logical_ring_init(engine);
|
||||
}
|
||||
|
||||
static int logical_vebox_ring_init(struct drm_device *dev)
|
||||
{
|
||||
struct intel_engine_cs *engine = logical_ring_setup(dev, VECS);
|
||||
|
||||
return logical_ring_init(engine);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_logical_rings_init() - allocate, populate and init the Engine Command Streamers
|
||||
* @dev: DRM device.
|
||||
*
|
||||
* This function inits the engines for an Execlists submission style (the equivalent in the
|
||||
* legacy ringbuffer submission world would be i915_gem_init_engines). It does it only for
|
||||
* those engines that are present in the hardware.
|
||||
* This function inits the engines for an Execlists submission style (the
|
||||
* equivalent in the legacy ringbuffer submission world would be
|
||||
* i915_gem_init_engines). It does it only for those engines that are present in
|
||||
* the hardware.
|
||||
*
|
||||
* Return: non-zero if the initialization failed.
|
||||
*/
|
||||
int intel_logical_rings_init(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
unsigned int mask = 0;
|
||||
unsigned int i;
|
||||
int ret;
|
||||
|
||||
ret = logical_render_ring_init(dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
WARN_ON(INTEL_INFO(dev_priv)->ring_mask &
|
||||
GENMASK(sizeof(mask) * BITS_PER_BYTE - 1, I915_NUM_ENGINES));
|
||||
|
||||
if (HAS_BSD(dev)) {
|
||||
ret = logical_bsd_ring_init(dev);
|
||||
for (i = 0; i < ARRAY_SIZE(logical_rings); i++) {
|
||||
if (!HAS_ENGINE(dev_priv, i))
|
||||
continue;
|
||||
|
||||
if (!logical_rings[i].init)
|
||||
continue;
|
||||
|
||||
ret = logical_rings[i].init(logical_ring_setup(dev_priv, i));
|
||||
if (ret)
|
||||
goto cleanup_render_ring;
|
||||
goto cleanup;
|
||||
|
||||
mask |= ENGINE_MASK(i);
|
||||
}
|
||||
|
||||
if (HAS_BLT(dev)) {
|
||||
ret = logical_blt_ring_init(dev);
|
||||
if (ret)
|
||||
goto cleanup_bsd_ring;
|
||||
}
|
||||
|
||||
if (HAS_VEBOX(dev)) {
|
||||
ret = logical_vebox_ring_init(dev);
|
||||
if (ret)
|
||||
goto cleanup_blt_ring;
|
||||
}
|
||||
|
||||
if (HAS_BSD2(dev)) {
|
||||
ret = logical_bsd2_ring_init(dev);
|
||||
if (ret)
|
||||
goto cleanup_vebox_ring;
|
||||
/*
|
||||
* Catch failures to update logical_rings table when the new engines
|
||||
* are added to the driver by a warning and disabling the forgotten
|
||||
* engines.
|
||||
*/
|
||||
if (WARN_ON(mask != INTEL_INFO(dev_priv)->ring_mask)) {
|
||||
struct intel_device_info *info =
|
||||
(struct intel_device_info *)&dev_priv->info;
|
||||
info->ring_mask = mask;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
cleanup_vebox_ring:
|
||||
intel_logical_ring_cleanup(&dev_priv->engine[VECS]);
|
||||
cleanup_blt_ring:
|
||||
intel_logical_ring_cleanup(&dev_priv->engine[BCS]);
|
||||
cleanup_bsd_ring:
|
||||
intel_logical_ring_cleanup(&dev_priv->engine[VCS]);
|
||||
cleanup_render_ring:
|
||||
intel_logical_ring_cleanup(&dev_priv->engine[RCS]);
|
||||
cleanup:
|
||||
for (i = 0; i < I915_NUM_ENGINES; i++)
|
||||
intel_logical_ring_cleanup(&dev_priv->engine[i]);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -2546,7 +2511,7 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
|
|||
/* One extra page as the sharing data between driver and GuC */
|
||||
context_size += PAGE_SIZE * LRC_PPHWSP_PN;
|
||||
|
||||
ctx_obj = i915_gem_object_create(ctx->i915->dev, context_size);
|
||||
ctx_obj = i915_gem_object_create(&ctx->i915->drm, context_size);
|
||||
if (IS_ERR(ctx_obj)) {
|
||||
DRM_DEBUG_DRIVER("Alloc LRC backing obj failed.\n");
|
||||
return PTR_ERR(ctx_obj);
|
||||
|
|
|
@ -72,7 +72,7 @@ static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
|
|||
enum pipe *pipe)
|
||||
{
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
|
||||
enum intel_display_power_domain power_domain;
|
||||
u32 tmp;
|
||||
|
@ -106,7 +106,7 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
|
|||
struct intel_crtc_state *pipe_config)
|
||||
{
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
|
||||
u32 tmp, flags = 0;
|
||||
|
||||
|
@ -140,7 +140,7 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder)
|
|||
{
|
||||
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
|
||||
const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
|
||||
int pipe = crtc->pipe;
|
||||
|
@ -184,8 +184,8 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder)
|
|||
* panels behave in the two modes. For now, let's just maintain the
|
||||
* value we got from the BIOS.
|
||||
*/
|
||||
temp &= ~LVDS_A3_POWER_MASK;
|
||||
temp |= lvds_encoder->a3_power;
|
||||
temp &= ~LVDS_A3_POWER_MASK;
|
||||
temp |= lvds_encoder->a3_power;
|
||||
|
||||
/* Set the dithering flag on LVDS as needed, note that there is no
|
||||
* special lvds dither control bit on pch-split platforms, dithering is
|
||||
|
@ -216,7 +216,7 @@ static void intel_enable_lvds(struct intel_encoder *encoder)
|
|||
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
|
||||
struct intel_connector *intel_connector =
|
||||
&lvds_encoder->attached_connector->base;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
i915_reg_t ctl_reg, stat_reg;
|
||||
|
||||
if (HAS_PCH_SPLIT(dev)) {
|
||||
|
@ -231,7 +231,7 @@ static void intel_enable_lvds(struct intel_encoder *encoder)
|
|||
|
||||
I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON);
|
||||
POSTING_READ(lvds_encoder->reg);
|
||||
if (wait_for((I915_READ(stat_reg) & PP_ON) != 0, 1000))
|
||||
if (intel_wait_for_register(dev_priv, stat_reg, PP_ON, PP_ON, 1000))
|
||||
DRM_ERROR("timed out waiting for panel to power on\n");
|
||||
|
||||
intel_panel_enable_backlight(intel_connector);
|
||||
|
@ -241,7 +241,7 @@ static void intel_disable_lvds(struct intel_encoder *encoder)
|
|||
{
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
i915_reg_t ctl_reg, stat_reg;
|
||||
|
||||
if (HAS_PCH_SPLIT(dev)) {
|
||||
|
@ -253,7 +253,7 @@ static void intel_disable_lvds(struct intel_encoder *encoder)
|
|||
}
|
||||
|
||||
I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON);
|
||||
if (wait_for((I915_READ(stat_reg) & PP_ON) == 0, 1000))
|
||||
if (intel_wait_for_register(dev_priv, stat_reg, PP_ON, 0, 1000))
|
||||
DRM_ERROR("timed out waiting for panel to power off\n");
|
||||
|
||||
I915_WRITE(lvds_encoder->reg, I915_READ(lvds_encoder->reg) & ~LVDS_PORT_EN);
|
||||
|
@ -442,7 +442,7 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
|
|||
container_of(nb, struct intel_lvds_connector, lid_notifier);
|
||||
struct drm_connector *connector = &lvds_connector->base.base;
|
||||
struct drm_device *dev = connector->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
|
||||
if (dev->switch_power_state != DRM_SWITCH_POWER_ON)
|
||||
return NOTIFY_OK;
|
||||
|
@ -555,6 +555,7 @@ static const struct drm_connector_funcs intel_lvds_connector_funcs = {
|
|||
.fill_modes = drm_helper_probe_single_connector_modes,
|
||||
.set_property = intel_lvds_set_property,
|
||||
.atomic_get_property = intel_connector_atomic_get_property,
|
||||
.late_register = intel_connector_register,
|
||||
.early_unregister = intel_connector_unregister,
|
||||
.destroy = intel_lvds_destroy,
|
||||
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
|
||||
|
@ -810,27 +811,29 @@ static const struct dmi_system_id intel_dual_link_lvds[] = {
|
|||
{ } /* terminating entry */
|
||||
};
|
||||
|
||||
struct intel_encoder *intel_get_lvds_encoder(struct drm_device *dev)
|
||||
{
|
||||
struct intel_encoder *intel_encoder;
|
||||
|
||||
for_each_intel_encoder(dev, intel_encoder)
|
||||
if (intel_encoder->type == INTEL_OUTPUT_LVDS)
|
||||
return intel_encoder;
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
bool intel_is_dual_link_lvds(struct drm_device *dev)
|
||||
{
|
||||
struct intel_encoder *encoder;
|
||||
struct intel_lvds_encoder *lvds_encoder;
|
||||
struct intel_encoder *encoder = intel_get_lvds_encoder(dev);
|
||||
|
||||
for_each_intel_encoder(dev, encoder) {
|
||||
if (encoder->type == INTEL_OUTPUT_LVDS) {
|
||||
lvds_encoder = to_lvds_encoder(&encoder->base);
|
||||
|
||||
return lvds_encoder->is_dual_link;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
return encoder && to_lvds_encoder(&encoder->base)->is_dual_link;
|
||||
}
|
||||
|
||||
static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder)
|
||||
{
|
||||
struct drm_device *dev = lvds_encoder->base.base.dev;
|
||||
unsigned int val;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
|
||||
/* use the module option value if specified */
|
||||
if (i915.lvds_channel_mode > 0)
|
||||
|
@ -880,7 +883,7 @@ static bool intel_lvds_supported(struct drm_device *dev)
|
|||
*/
|
||||
void intel_lvds_init(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_lvds_encoder *lvds_encoder;
|
||||
struct intel_encoder *intel_encoder;
|
||||
struct intel_lvds_connector *lvds_connector;
|
||||
|
@ -1118,6 +1121,7 @@ out:
|
|||
mutex_unlock(&dev->mode_config.mutex);
|
||||
|
||||
intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
|
||||
intel_panel_setup_backlight(connector, INVALID_PIPE);
|
||||
|
||||
lvds_encoder->is_dual_link = compute_is_dual_link_lvds(lvds_encoder);
|
||||
DRM_DEBUG_KMS("detected %s-link lvds configuration\n",
|
||||
|
@ -1130,9 +1134,6 @@ out:
|
|||
DRM_DEBUG_KMS("lid notifier registration failed\n");
|
||||
lvds_connector->lid_notifier.notifier_call = NULL;
|
||||
}
|
||||
drm_connector_register(connector);
|
||||
|
||||
intel_panel_setup_backlight(connector, INVALID_PIPE);
|
||||
|
||||
return;
|
||||
|
||||
|
|
|
@ -82,7 +82,7 @@ void
|
|||
intel_attach_force_audio_property(struct drm_connector *connector)
|
||||
{
|
||||
struct drm_device *dev = connector->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct drm_property *prop;
|
||||
|
||||
prop = dev_priv->force_audio_property;
|
||||
|
@ -109,7 +109,7 @@ void
|
|||
intel_attach_broadcast_rgb_property(struct drm_connector *connector)
|
||||
{
|
||||
struct drm_device *dev = connector->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct drm_property *prop;
|
||||
|
||||
prop = dev_priv->broadcast_rgb_property;
|
||||
|
|
|
@ -232,11 +232,28 @@ struct opregion_asle_ext {
|
|||
#define SWSCI_SBCB_POST_VBE_PM SWSCI_FUNCTION_CODE(SWSCI_SBCB, 19)
|
||||
#define SWSCI_SBCB_ENABLE_DISABLE_AUDIO SWSCI_FUNCTION_CODE(SWSCI_SBCB, 21)
|
||||
|
||||
#define ACPI_OTHER_OUTPUT (0<<8)
|
||||
#define ACPI_VGA_OUTPUT (1<<8)
|
||||
#define ACPI_TV_OUTPUT (2<<8)
|
||||
#define ACPI_DIGITAL_OUTPUT (3<<8)
|
||||
#define ACPI_LVDS_OUTPUT (4<<8)
|
||||
/*
|
||||
* ACPI Specification, Revision 5.0, Appendix B.3.2 _DOD (Enumerate All Devices
|
||||
* Attached to the Display Adapter).
|
||||
*/
|
||||
#define ACPI_DISPLAY_INDEX_SHIFT 0
|
||||
#define ACPI_DISPLAY_INDEX_MASK (0xf << 0)
|
||||
#define ACPI_DISPLAY_PORT_ATTACHMENT_SHIFT 4
|
||||
#define ACPI_DISPLAY_PORT_ATTACHMENT_MASK (0xf << 4)
|
||||
#define ACPI_DISPLAY_TYPE_SHIFT 8
|
||||
#define ACPI_DISPLAY_TYPE_MASK (0xf << 8)
|
||||
#define ACPI_DISPLAY_TYPE_OTHER (0 << 8)
|
||||
#define ACPI_DISPLAY_TYPE_VGA (1 << 8)
|
||||
#define ACPI_DISPLAY_TYPE_TV (2 << 8)
|
||||
#define ACPI_DISPLAY_TYPE_EXTERNAL_DIGITAL (3 << 8)
|
||||
#define ACPI_DISPLAY_TYPE_INTERNAL_DIGITAL (4 << 8)
|
||||
#define ACPI_VENDOR_SPECIFIC_SHIFT 12
|
||||
#define ACPI_VENDOR_SPECIFIC_MASK (0xf << 12)
|
||||
#define ACPI_BIOS_CAN_DETECT (1 << 16)
|
||||
#define ACPI_DEPENDS_ON_VGA (1 << 17)
|
||||
#define ACPI_PIPE_ID_SHIFT 18
|
||||
#define ACPI_PIPE_ID_MASK (7 << 18)
|
||||
#define ACPI_DEVICE_ID_SCHEME (1 << 31)
|
||||
|
||||
#define MAX_DSLP 1500
|
||||
|
||||
|
@ -244,7 +261,7 @@ static int swsci(struct drm_i915_private *dev_priv,
|
|||
u32 function, u32 parm, u32 *parm_out)
|
||||
{
|
||||
struct opregion_swsci *swsci = dev_priv->opregion.swsci;
|
||||
struct pci_dev *pdev = dev_priv->dev->pdev;
|
||||
struct pci_dev *pdev = dev_priv->drm.pdev;
|
||||
u32 main_function, sub_function, scic;
|
||||
u16 swsci_val;
|
||||
u32 dslp;
|
||||
|
@ -366,7 +383,7 @@ int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
|
|||
type = DISPLAY_TYPE_CRT;
|
||||
break;
|
||||
case INTEL_OUTPUT_UNKNOWN:
|
||||
case INTEL_OUTPUT_DISPLAYPORT:
|
||||
case INTEL_OUTPUT_DP:
|
||||
case INTEL_OUTPUT_HDMI:
|
||||
case INTEL_OUTPUT_DP_MST:
|
||||
type = DISPLAY_TYPE_EXTERNAL_FLAT_PANEL;
|
||||
|
@ -418,7 +435,7 @@ static u32 asle_set_backlight(struct drm_i915_private *dev_priv, u32 bclp)
|
|||
{
|
||||
struct intel_connector *connector;
|
||||
struct opregion_asle *asle = dev_priv->opregion.asle;
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
|
||||
DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp);
|
||||
|
||||
|
@ -657,10 +674,51 @@ static void set_did(struct intel_opregion *opregion, int i, u32 val)
|
|||
}
|
||||
}
|
||||
|
||||
static u32 acpi_display_type(struct drm_connector *connector)
|
||||
{
|
||||
u32 display_type;
|
||||
|
||||
switch (connector->connector_type) {
|
||||
case DRM_MODE_CONNECTOR_VGA:
|
||||
case DRM_MODE_CONNECTOR_DVIA:
|
||||
display_type = ACPI_DISPLAY_TYPE_VGA;
|
||||
break;
|
||||
case DRM_MODE_CONNECTOR_Composite:
|
||||
case DRM_MODE_CONNECTOR_SVIDEO:
|
||||
case DRM_MODE_CONNECTOR_Component:
|
||||
case DRM_MODE_CONNECTOR_9PinDIN:
|
||||
case DRM_MODE_CONNECTOR_TV:
|
||||
display_type = ACPI_DISPLAY_TYPE_TV;
|
||||
break;
|
||||
case DRM_MODE_CONNECTOR_DVII:
|
||||
case DRM_MODE_CONNECTOR_DVID:
|
||||
case DRM_MODE_CONNECTOR_DisplayPort:
|
||||
case DRM_MODE_CONNECTOR_HDMIA:
|
||||
case DRM_MODE_CONNECTOR_HDMIB:
|
||||
display_type = ACPI_DISPLAY_TYPE_EXTERNAL_DIGITAL;
|
||||
break;
|
||||
case DRM_MODE_CONNECTOR_LVDS:
|
||||
case DRM_MODE_CONNECTOR_eDP:
|
||||
case DRM_MODE_CONNECTOR_DSI:
|
||||
display_type = ACPI_DISPLAY_TYPE_INTERNAL_DIGITAL;
|
||||
break;
|
||||
case DRM_MODE_CONNECTOR_Unknown:
|
||||
case DRM_MODE_CONNECTOR_VIRTUAL:
|
||||
display_type = ACPI_DISPLAY_TYPE_OTHER;
|
||||
break;
|
||||
default:
|
||||
MISSING_CASE(connector->connector_type);
|
||||
display_type = ACPI_DISPLAY_TYPE_OTHER;
|
||||
break;
|
||||
}
|
||||
|
||||
return display_type;
|
||||
}
|
||||
|
||||
static void intel_didl_outputs(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct intel_opregion *opregion = &dev_priv->opregion;
|
||||
struct pci_dev *pdev = dev_priv->dev->pdev;
|
||||
struct pci_dev *pdev = dev_priv->drm.pdev;
|
||||
struct drm_connector *connector;
|
||||
acpi_handle handle;
|
||||
struct acpi_device *acpi_dev, *acpi_cdev, *acpi_video_bus = NULL;
|
||||
|
@ -724,37 +782,18 @@ end:
|
|||
|
||||
blind_set:
|
||||
i = 0;
|
||||
list_for_each_entry(connector, &dev_priv->dev->mode_config.connector_list, head) {
|
||||
int output_type = ACPI_OTHER_OUTPUT;
|
||||
list_for_each_entry(connector,
|
||||
&dev_priv->drm.mode_config.connector_list, head) {
|
||||
int display_type = acpi_display_type(connector);
|
||||
|
||||
if (i >= max_outputs) {
|
||||
DRM_DEBUG_KMS("More than %u outputs in connector list\n",
|
||||
max_outputs);
|
||||
return;
|
||||
}
|
||||
switch (connector->connector_type) {
|
||||
case DRM_MODE_CONNECTOR_VGA:
|
||||
case DRM_MODE_CONNECTOR_DVIA:
|
||||
output_type = ACPI_VGA_OUTPUT;
|
||||
break;
|
||||
case DRM_MODE_CONNECTOR_Composite:
|
||||
case DRM_MODE_CONNECTOR_SVIDEO:
|
||||
case DRM_MODE_CONNECTOR_Component:
|
||||
case DRM_MODE_CONNECTOR_9PinDIN:
|
||||
output_type = ACPI_TV_OUTPUT;
|
||||
break;
|
||||
case DRM_MODE_CONNECTOR_DVII:
|
||||
case DRM_MODE_CONNECTOR_DVID:
|
||||
case DRM_MODE_CONNECTOR_DisplayPort:
|
||||
case DRM_MODE_CONNECTOR_HDMIA:
|
||||
case DRM_MODE_CONNECTOR_HDMIB:
|
||||
output_type = ACPI_DIGITAL_OUTPUT;
|
||||
break;
|
||||
case DRM_MODE_CONNECTOR_LVDS:
|
||||
output_type = ACPI_LVDS_OUTPUT;
|
||||
break;
|
||||
}
|
||||
|
||||
temp = get_did(opregion, i);
|
||||
set_did(opregion, i, temp | (1 << 31) | output_type | i);
|
||||
set_did(opregion, i, temp | (1 << 31) | display_type | i);
|
||||
i++;
|
||||
}
|
||||
goto end;
|
||||
|
@ -916,7 +955,7 @@ static const struct dmi_system_id intel_no_opregion_vbt[] = {
|
|||
int intel_opregion_setup(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct intel_opregion *opregion = &dev_priv->opregion;
|
||||
struct pci_dev *pdev = dev_priv->dev->pdev;
|
||||
struct pci_dev *pdev = dev_priv->drm.pdev;
|
||||
u32 asls, mboxes;
|
||||
char buf[sizeof(OPREGION_SIGNATURE)];
|
||||
int err = 0;
|
||||
|
|
|
@ -409,7 +409,7 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
|
|||
struct intel_engine_cs *engine = &dev_priv->engine[RCS];
|
||||
int ret;
|
||||
|
||||
lockdep_assert_held(&dev_priv->dev->struct_mutex);
|
||||
lockdep_assert_held(&dev_priv->drm.struct_mutex);
|
||||
|
||||
/* Only wait if there is actually an old frame to release to
|
||||
* guarantee forward progress.
|
||||
|
@ -741,8 +741,8 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
|
|||
u32 swidth, swidthsw, sheight, ostride;
|
||||
enum pipe pipe = overlay->crtc->pipe;
|
||||
|
||||
lockdep_assert_held(&dev_priv->dev->struct_mutex);
|
||||
WARN_ON(!drm_modeset_is_locked(&dev_priv->dev->mode_config.connection_mutex));
|
||||
lockdep_assert_held(&dev_priv->drm.struct_mutex);
|
||||
WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
|
||||
|
||||
ret = intel_overlay_release_old_vid(overlay);
|
||||
if (ret != 0)
|
||||
|
@ -836,7 +836,8 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
|
|||
overlay->old_vid_bo = overlay->vid_bo;
|
||||
overlay->vid_bo = new_bo;
|
||||
|
||||
intel_frontbuffer_flip(dev_priv->dev, INTEL_FRONTBUFFER_OVERLAY(pipe));
|
||||
intel_frontbuffer_flip(&dev_priv->drm,
|
||||
INTEL_FRONTBUFFER_OVERLAY(pipe));
|
||||
|
||||
return 0;
|
||||
|
||||
|
@ -851,8 +852,8 @@ int intel_overlay_switch_off(struct intel_overlay *overlay)
|
|||
struct overlay_registers __iomem *regs;
|
||||
int ret;
|
||||
|
||||
lockdep_assert_held(&dev_priv->dev->struct_mutex);
|
||||
WARN_ON(!drm_modeset_is_locked(&dev_priv->dev->mode_config.connection_mutex));
|
||||
lockdep_assert_held(&dev_priv->drm.struct_mutex);
|
||||
WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
|
||||
|
||||
ret = intel_overlay_recover_from_interrupt(overlay);
|
||||
if (ret != 0)
|
||||
|
@ -1084,7 +1085,7 @@ int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
|
|||
struct drm_file *file_priv)
|
||||
{
|
||||
struct drm_intel_overlay_put_image *put_image_rec = data;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_overlay *overlay;
|
||||
struct drm_crtc *drmmode_crtc;
|
||||
struct intel_crtc *crtc;
|
||||
|
@ -1282,7 +1283,7 @@ int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
|
|||
struct drm_file *file_priv)
|
||||
{
|
||||
struct drm_intel_overlay_attrs *attrs = data;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_overlay *overlay;
|
||||
struct overlay_registers __iomem *regs;
|
||||
int ret;
|
||||
|
@ -1379,7 +1380,7 @@ void intel_setup_overlay(struct drm_i915_private *dev_priv)
|
|||
if (!overlay)
|
||||
return;
|
||||
|
||||
mutex_lock(&dev_priv->dev->struct_mutex);
|
||||
mutex_lock(&dev_priv->drm.struct_mutex);
|
||||
if (WARN_ON(dev_priv->overlay))
|
||||
goto out_free;
|
||||
|
||||
|
@ -1387,9 +1388,10 @@ void intel_setup_overlay(struct drm_i915_private *dev_priv)
|
|||
|
||||
reg_bo = NULL;
|
||||
if (!OVERLAY_NEEDS_PHYSICAL(dev_priv))
|
||||
reg_bo = i915_gem_object_create_stolen(dev_priv->dev, PAGE_SIZE);
|
||||
reg_bo = i915_gem_object_create_stolen(&dev_priv->drm,
|
||||
PAGE_SIZE);
|
||||
if (reg_bo == NULL)
|
||||
reg_bo = i915_gem_object_create(dev_priv->dev, PAGE_SIZE);
|
||||
reg_bo = i915_gem_object_create(&dev_priv->drm, PAGE_SIZE);
|
||||
if (IS_ERR(reg_bo))
|
||||
goto out_free;
|
||||
overlay->reg_bo = reg_bo;
|
||||
|
@ -1434,7 +1436,7 @@ void intel_setup_overlay(struct drm_i915_private *dev_priv)
|
|||
intel_overlay_unmap_regs(overlay, regs);
|
||||
|
||||
dev_priv->overlay = overlay;
|
||||
mutex_unlock(&dev_priv->dev->struct_mutex);
|
||||
mutex_unlock(&dev_priv->drm.struct_mutex);
|
||||
DRM_INFO("initialized overlay support\n");
|
||||
return;
|
||||
|
||||
|
@ -1444,7 +1446,7 @@ out_unpin_bo:
|
|||
out_free_bo:
|
||||
drm_gem_object_unreference(®_bo->base);
|
||||
out_free:
|
||||
mutex_unlock(&dev_priv->dev->struct_mutex);
|
||||
mutex_unlock(&dev_priv->drm.struct_mutex);
|
||||
kfree(overlay);
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -377,7 +377,7 @@ out:
|
|||
enum drm_connector_status
|
||||
intel_panel_detect(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
|
||||
/* Assume that the BIOS does not lie through the OpRegion... */
|
||||
if (!i915.panel_ignore_lid && dev_priv->opregion.lid_state) {
|
||||
|
@ -504,7 +504,7 @@ static u32 i9xx_get_backlight(struct intel_connector *connector)
|
|||
if (panel->backlight.combination_mode) {
|
||||
u8 lbpc;
|
||||
|
||||
pci_read_config_byte(dev_priv->dev->pdev, LBPC, &lbpc);
|
||||
pci_read_config_byte(dev_priv->drm.pdev, LBPC, &lbpc);
|
||||
val *= lbpc;
|
||||
}
|
||||
|
||||
|
@ -592,7 +592,7 @@ static void i9xx_set_backlight(struct intel_connector *connector, u32 level)
|
|||
|
||||
lbpc = level * 0xfe / panel->backlight.max + 1;
|
||||
level /= lbpc;
|
||||
pci_write_config_byte(dev_priv->dev->pdev, LBPC, lbpc);
|
||||
pci_write_config_byte(dev_priv->drm.pdev, LBPC, lbpc);
|
||||
}
|
||||
|
||||
if (IS_GEN4(dev_priv)) {
|
||||
|
@ -822,7 +822,7 @@ void intel_panel_disable_backlight(struct intel_connector *connector)
|
|||
* backlight. This will leave the backlight on unnecessarily when
|
||||
* another client is not activated.
|
||||
*/
|
||||
if (dev_priv->dev->switch_power_state == DRM_SWITCH_POWER_CHANGING) {
|
||||
if (dev_priv->drm.switch_power_state == DRM_SWITCH_POWER_CHANGING) {
|
||||
DRM_DEBUG_DRIVER("Skipping backlight disable on vga switch\n");
|
||||
return;
|
||||
}
|
||||
|
@ -1142,7 +1142,7 @@ static int intel_backlight_device_get_brightness(struct backlight_device *bd)
|
|||
{
|
||||
struct intel_connector *connector = bl_get_data(bd);
|
||||
struct drm_device *dev = connector->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
u32 hw_level;
|
||||
int ret;
|
||||
|
||||
|
@ -1163,7 +1163,7 @@ static const struct backlight_ops intel_backlight_device_ops = {
|
|||
.get_brightness = intel_backlight_device_get_brightness,
|
||||
};
|
||||
|
||||
static int intel_backlight_device_register(struct intel_connector *connector)
|
||||
int intel_backlight_device_register(struct intel_connector *connector)
|
||||
{
|
||||
struct intel_panel *panel = &connector->panel;
|
||||
struct backlight_properties props;
|
||||
|
@ -1225,11 +1225,6 @@ void intel_backlight_device_unregister(struct intel_connector *connector)
|
|||
panel->backlight.device = NULL;
|
||||
}
|
||||
}
|
||||
#else /* CONFIG_BACKLIGHT_CLASS_DEVICE */
|
||||
static int intel_backlight_device_register(struct intel_connector *connector)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_BACKLIGHT_CLASS_DEVICE */
|
||||
|
||||
/*
|
||||
|
@ -1321,7 +1316,7 @@ static u32 i9xx_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
|
|||
static u32 i965_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
|
||||
{
|
||||
struct drm_device *dev = connector->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
int clock;
|
||||
|
||||
if (IS_G4X(dev_priv))
|
||||
|
@ -1736,7 +1731,8 @@ intel_panel_init_backlight_funcs(struct intel_panel *panel)
|
|||
panel->backlight.set = bxt_set_backlight;
|
||||
panel->backlight.get = bxt_get_backlight;
|
||||
panel->backlight.hz_to_pwm = bxt_hz_to_pwm;
|
||||
} else if (HAS_PCH_LPT(dev_priv) || HAS_PCH_SPT(dev_priv)) {
|
||||
} else if (HAS_PCH_LPT(dev_priv) || HAS_PCH_SPT(dev_priv) ||
|
||||
HAS_PCH_KBP(dev_priv)) {
|
||||
panel->backlight.setup = lpt_setup_backlight;
|
||||
panel->backlight.enable = lpt_enable_backlight;
|
||||
panel->backlight.disable = lpt_disable_backlight;
|
||||
|
@ -1809,11 +1805,3 @@ void intel_panel_fini(struct intel_panel *panel)
|
|||
drm_mode_destroy(intel_connector->base.dev,
|
||||
panel->downclock_mode);
|
||||
}
|
||||
|
||||
void intel_backlight_register(struct drm_device *dev)
|
||||
{
|
||||
struct intel_connector *connector;
|
||||
|
||||
for_each_intel_connector(dev, connector)
|
||||
intel_backlight_device_register(connector);
|
||||
}
|
||||
|
|
|
@ -57,7 +57,7 @@
|
|||
|
||||
static void gen9_init_clock_gating(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
|
||||
/* See Bspec note for PSR2_CTL bit 31, Wa#828:skl,bxt,kbl */
|
||||
I915_WRITE(CHICKEN_PAR1_1,
|
||||
|
@ -83,7 +83,7 @@ static void gen9_init_clock_gating(struct drm_device *dev)
|
|||
|
||||
static void bxt_init_clock_gating(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
|
||||
gen9_init_clock_gating(dev);
|
||||
|
||||
|
@ -109,7 +109,7 @@ static void bxt_init_clock_gating(struct drm_device *dev)
|
|||
|
||||
static void i915_pineview_get_mem_freq(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
u32 tmp;
|
||||
|
||||
tmp = I915_READ(CLKCFG);
|
||||
|
@ -148,7 +148,7 @@ static void i915_pineview_get_mem_freq(struct drm_device *dev)
|
|||
|
||||
static void i915_ironlake_get_mem_freq(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
u16 ddrpll, csipll;
|
||||
|
||||
ddrpll = I915_READ16(DDRMPLL1);
|
||||
|
@ -319,7 +319,7 @@ static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable)
|
|||
|
||||
void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
u32 val;
|
||||
|
||||
if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
|
||||
|
@ -375,7 +375,7 @@ static const int pessimal_latency_ns = 5000;
|
|||
static int vlv_get_fifo_size(struct drm_device *dev,
|
||||
enum pipe pipe, int plane)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
int sprite0_start, sprite1_start, size;
|
||||
|
||||
switch (pipe) {
|
||||
|
@ -426,7 +426,7 @@ static int vlv_get_fifo_size(struct drm_device *dev,
|
|||
|
||||
static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
uint32_t dsparb = I915_READ(DSPARB);
|
||||
int size;
|
||||
|
||||
|
@ -442,7 +442,7 @@ static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
|
|||
|
||||
static int i830_get_fifo_size(struct drm_device *dev, int plane)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
uint32_t dsparb = I915_READ(DSPARB);
|
||||
int size;
|
||||
|
||||
|
@ -459,7 +459,7 @@ static int i830_get_fifo_size(struct drm_device *dev, int plane)
|
|||
|
||||
static int i845_get_fifo_size(struct drm_device *dev, int plane)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
uint32_t dsparb = I915_READ(DSPARB);
|
||||
int size;
|
||||
|
||||
|
@ -637,7 +637,7 @@ static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
|
|||
static void pineview_update_wm(struct drm_crtc *unused_crtc)
|
||||
{
|
||||
struct drm_device *dev = unused_crtc->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct drm_crtc *crtc;
|
||||
const struct cxsr_latency *latency;
|
||||
u32 reg;
|
||||
|
@ -934,7 +934,7 @@ static unsigned int vlv_wm_method2(unsigned int pixel_rate,
|
|||
|
||||
static void vlv_setup_wm_latency(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
|
||||
/* all latencies in usec */
|
||||
dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM2] = 3;
|
||||
|
@ -1325,7 +1325,7 @@ static void vlv_merge_wm(struct drm_device *dev,
|
|||
static void vlv_update_wm(struct drm_crtc *crtc)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
enum pipe pipe = intel_crtc->pipe;
|
||||
struct vlv_wm_values wm = {};
|
||||
|
@ -1381,7 +1381,7 @@ static void g4x_update_wm(struct drm_crtc *crtc)
|
|||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
static const int sr_latency_ns = 12000;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
|
||||
int plane_sr, cursor_sr;
|
||||
unsigned int enabled = 0;
|
||||
|
@ -1438,7 +1438,7 @@ static void g4x_update_wm(struct drm_crtc *crtc)
|
|||
static void i965_update_wm(struct drm_crtc *unused_crtc)
|
||||
{
|
||||
struct drm_device *dev = unused_crtc->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct drm_crtc *crtc;
|
||||
int srwm = 1;
|
||||
int cursor_sr = 16;
|
||||
|
@ -1512,7 +1512,7 @@ static void i965_update_wm(struct drm_crtc *unused_crtc)
|
|||
static void i9xx_update_wm(struct drm_crtc *unused_crtc)
|
||||
{
|
||||
struct drm_device *dev = unused_crtc->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
const struct intel_watermark_params *wm_info;
|
||||
uint32_t fwater_lo;
|
||||
uint32_t fwater_hi;
|
||||
|
@ -1642,7 +1642,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
|
|||
static void i845_update_wm(struct drm_crtc *unused_crtc)
|
||||
{
|
||||
struct drm_device *dev = unused_crtc->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct drm_crtc *crtc;
|
||||
const struct drm_display_mode *adjusted_mode;
|
||||
uint32_t fwater_lo;
|
||||
|
@ -2070,7 +2070,7 @@ hsw_compute_linetime_wm(const struct intel_crtc_state *cstate)
|
|||
|
||||
static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[8])
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
|
||||
if (IS_GEN9(dev)) {
|
||||
uint32_t val;
|
||||
|
@ -2236,7 +2236,7 @@ static void intel_print_wm_latency(struct drm_device *dev,
|
|||
static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
|
||||
uint16_t wm[5], uint16_t min)
|
||||
{
|
||||
int level, max_level = ilk_wm_max_level(dev_priv->dev);
|
||||
int level, max_level = ilk_wm_max_level(&dev_priv->drm);
|
||||
|
||||
if (wm[0] >= min)
|
||||
return false;
|
||||
|
@ -2250,7 +2250,7 @@ static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
|
|||
|
||||
static void snb_wm_latency_quirk(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
bool changed;
|
||||
|
||||
/*
|
||||
|
@ -2272,7 +2272,7 @@ static void snb_wm_latency_quirk(struct drm_device *dev)
|
|||
|
||||
static void ilk_setup_wm_latency(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
|
||||
intel_read_wm_latency(dev, dev_priv->wm.pri_latency);
|
||||
|
||||
|
@ -2294,7 +2294,7 @@ static void ilk_setup_wm_latency(struct drm_device *dev)
|
|||
|
||||
static void skl_setup_wm_latency(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
|
||||
intel_read_wm_latency(dev, dev_priv->wm.skl_latency);
|
||||
intel_print_wm_latency(dev, "Gen9 Plane", dev_priv->wm.skl_latency);
|
||||
|
@ -2330,7 +2330,7 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
|
|||
struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
|
||||
struct intel_pipe_wm *pipe_wm;
|
||||
struct drm_device *dev = state->dev;
|
||||
const struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
const struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_plane *intel_plane;
|
||||
struct intel_plane_state *pristate = NULL;
|
||||
struct intel_plane_state *sprstate = NULL;
|
||||
|
@ -2505,7 +2505,7 @@ static void ilk_wm_merge(struct drm_device *dev,
|
|||
const struct ilk_wm_maximums *max,
|
||||
struct intel_pipe_wm *merged)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
int level, max_level = ilk_wm_max_level(dev);
|
||||
int last_enabled_level = max_level;
|
||||
|
||||
|
@ -2565,7 +2565,7 @@ static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm)
|
|||
/* The value we need to program into the WM_LPx latency field */
|
||||
static unsigned int ilk_wm_lp_latency(struct drm_device *dev, int level)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
|
||||
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
|
||||
return 2 * level;
|
||||
|
@ -2765,7 +2765,7 @@ static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv,
|
|||
static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
|
||||
struct ilk_wm_values *results)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
struct ilk_wm_values *previous = &dev_priv->wm.hw;
|
||||
unsigned int dirty;
|
||||
uint32_t val;
|
||||
|
@ -2840,7 +2840,7 @@ static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
|
|||
|
||||
bool ilk_disable_lp_wm(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
|
||||
return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL);
|
||||
}
|
||||
|
@ -3498,7 +3498,6 @@ skl_compute_wm_level(const struct drm_i915_private *dev_priv,
|
|||
int level,
|
||||
struct skl_wm_level *result)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct drm_atomic_state *state = cstate->base.state;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
|
||||
struct drm_plane *plane;
|
||||
|
@ -3514,7 +3513,9 @@ skl_compute_wm_level(const struct drm_i915_private *dev_priv,
|
|||
*/
|
||||
memset(result, 0, sizeof(*result));
|
||||
|
||||
for_each_intel_plane_mask(dev, intel_plane, cstate->base.plane_mask) {
|
||||
for_each_intel_plane_mask(&dev_priv->drm,
|
||||
intel_plane,
|
||||
cstate->base.plane_mask) {
|
||||
int i = skl_wm_plane_id(intel_plane);
|
||||
|
||||
plane = &intel_plane->base;
|
||||
|
@ -3595,7 +3596,7 @@ static int skl_build_pipe_wm(struct intel_crtc_state *cstate,
|
|||
struct skl_pipe_wm *pipe_wm)
|
||||
{
|
||||
struct drm_device *dev = cstate->base.crtc->dev;
|
||||
const struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
const struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
int level, max_level = ilk_wm_max_level(dev);
|
||||
int ret;
|
||||
|
||||
|
@ -3682,7 +3683,7 @@ static void skl_ddb_entry_write(struct drm_i915_private *dev_priv,
|
|||
static void skl_write_wm_values(struct drm_i915_private *dev_priv,
|
||||
const struct skl_wm_values *new)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
struct intel_crtc *crtc;
|
||||
|
||||
for_each_intel_crtc(dev, crtc) {
|
||||
|
@ -3779,7 +3780,7 @@ skl_ddb_allocation_included(const struct skl_ddb_allocation *old,
|
|||
static void skl_flush_wm_values(struct drm_i915_private *dev_priv,
|
||||
struct skl_wm_values *new_values)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
struct skl_ddb_allocation *cur_ddb, *new_ddb;
|
||||
bool reallocated[I915_MAX_PIPES] = {};
|
||||
struct intel_crtc *crtc;
|
||||
|
@ -3879,6 +3880,19 @@ static int skl_update_pipe_wm(struct drm_crtc_state *cstate,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static uint32_t
|
||||
pipes_modified(struct drm_atomic_state *state)
|
||||
{
|
||||
struct drm_crtc *crtc;
|
||||
struct drm_crtc_state *cstate;
|
||||
uint32_t i, ret = 0;
|
||||
|
||||
for_each_crtc_in_state(state, crtc, cstate, i)
|
||||
ret |= drm_crtc_mask(crtc);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int
|
||||
skl_compute_ddb(struct drm_atomic_state *state)
|
||||
{
|
||||
|
@ -3887,7 +3901,7 @@ skl_compute_ddb(struct drm_atomic_state *state)
|
|||
struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
|
||||
struct intel_crtc *intel_crtc;
|
||||
struct skl_ddb_allocation *ddb = &intel_state->wm_results.ddb;
|
||||
unsigned realloc_pipes = dev_priv->active_crtcs;
|
||||
uint32_t realloc_pipes = pipes_modified(state);
|
||||
int ret;
|
||||
|
||||
/*
|
||||
|
@ -4002,7 +4016,7 @@ static void skl_update_wm(struct drm_crtc *crtc)
|
|||
{
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct skl_wm_values *results = &dev_priv->wm.skl_results;
|
||||
struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
|
||||
struct skl_pipe_wm *pipe_wm = &cstate->wm.skl.optimal;
|
||||
|
@ -4043,7 +4057,7 @@ static void ilk_compute_wm_config(struct drm_device *dev,
|
|||
|
||||
static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
|
||||
struct ilk_wm_maximums max;
|
||||
struct intel_wm_config config = {};
|
||||
|
@ -4145,7 +4159,7 @@ static void skl_pipe_wm_active_state(uint32_t val,
|
|||
static void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct skl_wm_values *hw = &dev_priv->wm.skl_hw;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
|
||||
|
@ -4199,7 +4213,7 @@ static void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc)
|
|||
|
||||
void skl_wm_get_hw_state(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct skl_ddb_allocation *ddb = &dev_priv->wm.skl_hw.ddb;
|
||||
struct drm_crtc *crtc;
|
||||
|
||||
|
@ -4219,7 +4233,7 @@ void skl_wm_get_hw_state(struct drm_device *dev)
|
|||
static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct ilk_wm_values *hw = &dev_priv->wm.hw;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
|
||||
|
@ -4423,7 +4437,7 @@ void vlv_wm_get_hw_state(struct drm_device *dev)
|
|||
|
||||
void ilk_wm_get_hw_state(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct ilk_wm_values *hw = &dev_priv->wm.hw;
|
||||
struct drm_crtc *crtc;
|
||||
|
||||
|
@ -4485,7 +4499,7 @@ void ilk_wm_get_hw_state(struct drm_device *dev)
|
|||
*/
|
||||
void intel_update_watermarks(struct drm_crtc *crtc)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = crtc->dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
|
||||
|
||||
if (dev_priv->display.update_wm)
|
||||
dev_priv->display.update_wm(crtc);
|
||||
|
@ -4654,19 +4668,23 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
|
|||
new_power = dev_priv->rps.power;
|
||||
switch (dev_priv->rps.power) {
|
||||
case LOW_POWER:
|
||||
if (val > dev_priv->rps.efficient_freq + 1 && val > dev_priv->rps.cur_freq)
|
||||
if (val > dev_priv->rps.efficient_freq + 1 &&
|
||||
val > dev_priv->rps.cur_freq)
|
||||
new_power = BETWEEN;
|
||||
break;
|
||||
|
||||
case BETWEEN:
|
||||
if (val <= dev_priv->rps.efficient_freq && val < dev_priv->rps.cur_freq)
|
||||
if (val <= dev_priv->rps.efficient_freq &&
|
||||
val < dev_priv->rps.cur_freq)
|
||||
new_power = LOW_POWER;
|
||||
else if (val >= dev_priv->rps.rp0_freq && val > dev_priv->rps.cur_freq)
|
||||
else if (val >= dev_priv->rps.rp0_freq &&
|
||||
val > dev_priv->rps.cur_freq)
|
||||
new_power = HIGH_POWER;
|
||||
break;
|
||||
|
||||
case HIGH_POWER:
|
||||
if (val < (dev_priv->rps.rp1_freq + dev_priv->rps.rp0_freq) >> 1 && val < dev_priv->rps.cur_freq)
|
||||
if (val < (dev_priv->rps.rp1_freq + dev_priv->rps.rp0_freq) >> 1 &&
|
||||
val < dev_priv->rps.cur_freq)
|
||||
new_power = BETWEEN;
|
||||
break;
|
||||
}
|
||||
|
@ -4712,22 +4730,24 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
|
|||
}
|
||||
|
||||
I915_WRITE(GEN6_RP_UP_EI,
|
||||
GT_INTERVAL_FROM_US(dev_priv, ei_up));
|
||||
GT_INTERVAL_FROM_US(dev_priv, ei_up));
|
||||
I915_WRITE(GEN6_RP_UP_THRESHOLD,
|
||||
GT_INTERVAL_FROM_US(dev_priv, (ei_up * threshold_up / 100)));
|
||||
GT_INTERVAL_FROM_US(dev_priv,
|
||||
ei_up * threshold_up / 100));
|
||||
|
||||
I915_WRITE(GEN6_RP_DOWN_EI,
|
||||
GT_INTERVAL_FROM_US(dev_priv, ei_down));
|
||||
GT_INTERVAL_FROM_US(dev_priv, ei_down));
|
||||
I915_WRITE(GEN6_RP_DOWN_THRESHOLD,
|
||||
GT_INTERVAL_FROM_US(dev_priv, (ei_down * threshold_down / 100)));
|
||||
GT_INTERVAL_FROM_US(dev_priv,
|
||||
ei_down * threshold_down / 100));
|
||||
|
||||
I915_WRITE(GEN6_RP_CONTROL,
|
||||
GEN6_RP_MEDIA_TURBO |
|
||||
GEN6_RP_MEDIA_HW_NORMAL_MODE |
|
||||
GEN6_RP_MEDIA_IS_GFX |
|
||||
GEN6_RP_ENABLE |
|
||||
GEN6_RP_UP_BUSY_AVG |
|
||||
GEN6_RP_DOWN_IDLE_AVG);
|
||||
I915_WRITE(GEN6_RP_CONTROL,
|
||||
GEN6_RP_MEDIA_TURBO |
|
||||
GEN6_RP_MEDIA_HW_NORMAL_MODE |
|
||||
GEN6_RP_MEDIA_IS_GFX |
|
||||
GEN6_RP_ENABLE |
|
||||
GEN6_RP_UP_BUSY_AVG |
|
||||
GEN6_RP_DOWN_IDLE_AVG);
|
||||
|
||||
dev_priv->rps.power = new_power;
|
||||
dev_priv->rps.up_threshold = threshold_up;
|
||||
|
@ -4844,12 +4864,27 @@ void gen6_rps_busy(struct drm_i915_private *dev_priv)
|
|||
gen6_rps_reset_ei(dev_priv);
|
||||
I915_WRITE(GEN6_PMINTRMSK,
|
||||
gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq));
|
||||
|
||||
gen6_enable_rps_interrupts(dev_priv);
|
||||
|
||||
/* Ensure we start at the user's desired frequency */
|
||||
intel_set_rps(dev_priv,
|
||||
clamp(dev_priv->rps.cur_freq,
|
||||
dev_priv->rps.min_freq_softlimit,
|
||||
dev_priv->rps.max_freq_softlimit));
|
||||
}
|
||||
mutex_unlock(&dev_priv->rps.hw_lock);
|
||||
}
|
||||
|
||||
void gen6_rps_idle(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
/* Flush our bottom-half so that it does not race with us
|
||||
* setting the idle frequency and so that it is bounded by
|
||||
* our rpm wakeref. And then disable the interrupts to stop any
|
||||
* futher RPS reclocking whilst we are asleep.
|
||||
*/
|
||||
gen6_disable_rps_interrupts(dev_priv);
|
||||
|
||||
mutex_lock(&dev_priv->rps.hw_lock);
|
||||
if (dev_priv->rps.enabled) {
|
||||
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
|
||||
|
@ -4874,7 +4909,7 @@ void gen6_rps_boost(struct drm_i915_private *dev_priv,
|
|||
/* This is intentionally racy! We peek at the state here, then
|
||||
* validate inside the RPS worker.
|
||||
*/
|
||||
if (!(dev_priv->mm.busy &&
|
||||
if (!(dev_priv->gt.awake &&
|
||||
dev_priv->rps.enabled &&
|
||||
dev_priv->rps.cur_freq < dev_priv->rps.max_freq_softlimit))
|
||||
return;
|
||||
|
@ -4890,7 +4925,7 @@ void gen6_rps_boost(struct drm_i915_private *dev_priv,
|
|||
spin_lock_irq(&dev_priv->irq_lock);
|
||||
if (dev_priv->rps.interrupts_enabled) {
|
||||
dev_priv->rps.client_boost = true;
|
||||
queue_work(dev_priv->wq, &dev_priv->rps.work);
|
||||
schedule_work(&dev_priv->rps.work);
|
||||
}
|
||||
spin_unlock_irq(&dev_priv->irq_lock);
|
||||
|
||||
|
@ -4954,14 +4989,15 @@ static void intel_print_rc6_info(struct drm_i915_private *dev_priv, u32 mode)
|
|||
mode = 0;
|
||||
}
|
||||
if (HAS_RC6p(dev_priv))
|
||||
DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s RC6p %s RC6pp %s\n",
|
||||
onoff(mode & GEN6_RC_CTL_RC6_ENABLE),
|
||||
onoff(mode & GEN6_RC_CTL_RC6p_ENABLE),
|
||||
onoff(mode & GEN6_RC_CTL_RC6pp_ENABLE));
|
||||
DRM_DEBUG_DRIVER("Enabling RC6 states: "
|
||||
"RC6 %s RC6p %s RC6pp %s\n",
|
||||
onoff(mode & GEN6_RC_CTL_RC6_ENABLE),
|
||||
onoff(mode & GEN6_RC_CTL_RC6p_ENABLE),
|
||||
onoff(mode & GEN6_RC_CTL_RC6pp_ENABLE));
|
||||
|
||||
else
|
||||
DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s\n",
|
||||
onoff(mode & GEN6_RC_CTL_RC6_ENABLE));
|
||||
DRM_DEBUG_DRIVER("Enabling RC6 states: RC6 %s\n",
|
||||
onoff(mode & GEN6_RC_CTL_RC6_ENABLE));
|
||||
}
|
||||
|
||||
static bool bxt_check_bios_rc6_setup(struct drm_i915_private *dev_priv)
|
||||
|
@ -4969,9 +5005,20 @@ static bool bxt_check_bios_rc6_setup(struct drm_i915_private *dev_priv)
|
|||
struct i915_ggtt *ggtt = &dev_priv->ggtt;
|
||||
bool enable_rc6 = true;
|
||||
unsigned long rc6_ctx_base;
|
||||
u32 rc_ctl;
|
||||
int rc_sw_target;
|
||||
|
||||
rc_ctl = I915_READ(GEN6_RC_CONTROL);
|
||||
rc_sw_target = (I915_READ(GEN6_RC_STATE) & RC_SW_TARGET_STATE_MASK) >>
|
||||
RC_SW_TARGET_STATE_SHIFT;
|
||||
DRM_DEBUG_DRIVER("BIOS enabled RC states: "
|
||||
"HW_CTRL %s HW_RC6 %s SW_TARGET_STATE %x\n",
|
||||
onoff(rc_ctl & GEN6_RC_CTL_HW_ENABLE),
|
||||
onoff(rc_ctl & GEN6_RC_CTL_RC6_ENABLE),
|
||||
rc_sw_target);
|
||||
|
||||
if (!(I915_READ(RC6_LOCATION) & RC6_CTX_IN_DRAM)) {
|
||||
DRM_DEBUG_KMS("RC6 Base location not set properly.\n");
|
||||
DRM_DEBUG_DRIVER("RC6 Base location not set properly.\n");
|
||||
enable_rc6 = false;
|
||||
}
|
||||
|
||||
|
@ -4983,7 +5030,7 @@ static bool bxt_check_bios_rc6_setup(struct drm_i915_private *dev_priv)
|
|||
if (!((rc6_ctx_base >= ggtt->stolen_reserved_base) &&
|
||||
(rc6_ctx_base + PAGE_SIZE <= ggtt->stolen_reserved_base +
|
||||
ggtt->stolen_reserved_size))) {
|
||||
DRM_DEBUG_KMS("RC6 Base address not as expected.\n");
|
||||
DRM_DEBUG_DRIVER("RC6 Base address not as expected.\n");
|
||||
enable_rc6 = false;
|
||||
}
|
||||
|
||||
|
@ -4991,15 +5038,24 @@ static bool bxt_check_bios_rc6_setup(struct drm_i915_private *dev_priv)
|
|||
((I915_READ(PWRCTX_MAXCNT_VCSUNIT0) & IDLE_TIME_MASK) > 1) &&
|
||||
((I915_READ(PWRCTX_MAXCNT_BCSUNIT) & IDLE_TIME_MASK) > 1) &&
|
||||
((I915_READ(PWRCTX_MAXCNT_VECSUNIT) & IDLE_TIME_MASK) > 1))) {
|
||||
DRM_DEBUG_KMS("Engine Idle wait time not set properly.\n");
|
||||
DRM_DEBUG_DRIVER("Engine Idle wait time not set properly.\n");
|
||||
enable_rc6 = false;
|
||||
}
|
||||
|
||||
if (!(I915_READ(GEN6_RC_CONTROL) & (GEN6_RC_CTL_RC6_ENABLE |
|
||||
GEN6_RC_CTL_HW_ENABLE)) &&
|
||||
((I915_READ(GEN6_RC_CONTROL) & GEN6_RC_CTL_HW_ENABLE) ||
|
||||
!(I915_READ(GEN6_RC_STATE) & RC6_STATE))) {
|
||||
DRM_DEBUG_KMS("HW/SW RC6 is not enabled by BIOS.\n");
|
||||
if (!I915_READ(GEN8_PUSHBUS_CONTROL) ||
|
||||
!I915_READ(GEN8_PUSHBUS_ENABLE) ||
|
||||
!I915_READ(GEN8_PUSHBUS_SHIFT)) {
|
||||
DRM_DEBUG_DRIVER("Pushbus not setup properly.\n");
|
||||
enable_rc6 = false;
|
||||
}
|
||||
|
||||
if (!I915_READ(GEN6_GFXPAUSE)) {
|
||||
DRM_DEBUG_DRIVER("GFX pause not setup properly.\n");
|
||||
enable_rc6 = false;
|
||||
}
|
||||
|
||||
if (!I915_READ(GEN8_MISC_CTRL0)) {
|
||||
DRM_DEBUG_DRIVER("GPM control not setup properly.\n");
|
||||
enable_rc6 = false;
|
||||
}
|
||||
|
||||
|
@ -5031,8 +5087,9 @@ int sanitize_rc6_option(struct drm_i915_private *dev_priv, int enable_rc6)
|
|||
mask = INTEL_RC6_ENABLE;
|
||||
|
||||
if ((enable_rc6 & mask) != enable_rc6)
|
||||
DRM_DEBUG_KMS("Adjusting RC6 mask to %d (requested %d, valid %d)\n",
|
||||
enable_rc6 & mask, enable_rc6, mask);
|
||||
DRM_DEBUG_DRIVER("Adjusting RC6 mask to %d "
|
||||
"(requested %d, valid %d)\n",
|
||||
enable_rc6 & mask, enable_rc6, mask);
|
||||
|
||||
return enable_rc6 & mask;
|
||||
}
|
||||
|
@ -5643,7 +5700,7 @@ static void valleyview_setup_pctx(struct drm_i915_private *dev_priv)
|
|||
u32 pcbr;
|
||||
int pctx_size = 24*1024;
|
||||
|
||||
mutex_lock(&dev_priv->dev->struct_mutex);
|
||||
mutex_lock(&dev_priv->drm.struct_mutex);
|
||||
|
||||
pcbr = I915_READ(VLV_PCBR);
|
||||
if (pcbr) {
|
||||
|
@ -5651,7 +5708,7 @@ static void valleyview_setup_pctx(struct drm_i915_private *dev_priv)
|
|||
int pcbr_offset;
|
||||
|
||||
pcbr_offset = (pcbr & (~4095)) - dev_priv->mm.stolen_base;
|
||||
pctx = i915_gem_object_create_stolen_for_preallocated(dev_priv->dev,
|
||||
pctx = i915_gem_object_create_stolen_for_preallocated(&dev_priv->drm,
|
||||
pcbr_offset,
|
||||
I915_GTT_OFFSET_NONE,
|
||||
pctx_size);
|
||||
|
@ -5668,7 +5725,7 @@ static void valleyview_setup_pctx(struct drm_i915_private *dev_priv)
|
|||
* overlap with other ranges, such as the frame buffer, protected
|
||||
* memory, or any other relevant ranges.
|
||||
*/
|
||||
pctx = i915_gem_object_create_stolen(dev_priv->dev, pctx_size);
|
||||
pctx = i915_gem_object_create_stolen(&dev_priv->drm, pctx_size);
|
||||
if (!pctx) {
|
||||
DRM_DEBUG("not enough stolen space for PCTX, disabling\n");
|
||||
goto out;
|
||||
|
@ -5680,7 +5737,7 @@ static void valleyview_setup_pctx(struct drm_i915_private *dev_priv)
|
|||
out:
|
||||
DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
|
||||
dev_priv->vlv_pctx = pctx;
|
||||
mutex_unlock(&dev_priv->dev->struct_mutex);
|
||||
mutex_unlock(&dev_priv->drm.struct_mutex);
|
||||
}
|
||||
|
||||
static void valleyview_cleanup_pctx(struct drm_i915_private *dev_priv)
|
||||
|
@ -6624,9 +6681,9 @@ void intel_enable_gt_powersave(struct drm_i915_private *dev_priv)
|
|||
|
||||
if (IS_IRONLAKE_M(dev_priv)) {
|
||||
ironlake_enable_drps(dev_priv);
|
||||
mutex_lock(&dev_priv->dev->struct_mutex);
|
||||
mutex_lock(&dev_priv->drm.struct_mutex);
|
||||
intel_init_emon(dev_priv);
|
||||
mutex_unlock(&dev_priv->dev->struct_mutex);
|
||||
mutex_unlock(&dev_priv->drm.struct_mutex);
|
||||
} else if (INTEL_INFO(dev_priv)->gen >= 6) {
|
||||
/*
|
||||
* PCU communication is slow and this doesn't need to be
|
||||
|
@ -6657,7 +6714,7 @@ void intel_reset_gt_powersave(struct drm_i915_private *dev_priv)
|
|||
|
||||
static void ibx_init_clock_gating(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
|
||||
/*
|
||||
* On Ibex Peak and Cougar Point, we need to disable clock
|
||||
|
@ -6669,7 +6726,7 @@ static void ibx_init_clock_gating(struct drm_device *dev)
|
|||
|
||||
static void g4x_disable_trickle_feed(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
enum pipe pipe;
|
||||
|
||||
for_each_pipe(dev_priv, pipe) {
|
||||
|
@ -6684,7 +6741,7 @@ static void g4x_disable_trickle_feed(struct drm_device *dev)
|
|||
|
||||
static void ilk_init_lp_watermarks(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
|
||||
I915_WRITE(WM3_LP_ILK, I915_READ(WM3_LP_ILK) & ~WM1_LP_SR_EN);
|
||||
I915_WRITE(WM2_LP_ILK, I915_READ(WM2_LP_ILK) & ~WM1_LP_SR_EN);
|
||||
|
@ -6698,7 +6755,7 @@ static void ilk_init_lp_watermarks(struct drm_device *dev)
|
|||
|
||||
static void ironlake_init_clock_gating(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
|
||||
|
||||
/*
|
||||
|
@ -6772,7 +6829,7 @@ static void ironlake_init_clock_gating(struct drm_device *dev)
|
|||
|
||||
static void cpt_init_clock_gating(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
int pipe;
|
||||
uint32_t val;
|
||||
|
||||
|
@ -6809,7 +6866,7 @@ static void cpt_init_clock_gating(struct drm_device *dev)
|
|||
|
||||
static void gen6_check_mch_setup(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
uint32_t tmp;
|
||||
|
||||
tmp = I915_READ(MCH_SSKPD);
|
||||
|
@ -6820,7 +6877,7 @@ static void gen6_check_mch_setup(struct drm_device *dev)
|
|||
|
||||
static void gen6_init_clock_gating(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
|
||||
|
||||
I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
|
||||
|
@ -6935,7 +6992,7 @@ static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
|
|||
|
||||
static void lpt_init_clock_gating(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
|
||||
/*
|
||||
* TODO: this bit should only be enabled when really needed, then
|
||||
|
@ -6954,7 +7011,7 @@ static void lpt_init_clock_gating(struct drm_device *dev)
|
|||
|
||||
static void lpt_suspend_hw(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
|
||||
if (HAS_PCH_LPT_LP(dev)) {
|
||||
uint32_t val = I915_READ(SOUTH_DSPCLK_GATE_D);
|
||||
|
@ -6989,7 +7046,7 @@ static void gen8_set_l3sqc_credits(struct drm_i915_private *dev_priv,
|
|||
|
||||
static void kabylake_init_clock_gating(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
|
||||
gen9_init_clock_gating(dev);
|
||||
|
||||
|
@ -7010,7 +7067,7 @@ static void kabylake_init_clock_gating(struct drm_device *dev)
|
|||
|
||||
static void skylake_init_clock_gating(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
|
||||
gen9_init_clock_gating(dev);
|
||||
|
||||
|
@ -7025,7 +7082,7 @@ static void skylake_init_clock_gating(struct drm_device *dev)
|
|||
|
||||
static void broadwell_init_clock_gating(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
enum pipe pipe;
|
||||
|
||||
ilk_init_lp_watermarks(dev);
|
||||
|
@ -7076,7 +7133,7 @@ static void broadwell_init_clock_gating(struct drm_device *dev)
|
|||
|
||||
static void haswell_init_clock_gating(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
|
||||
ilk_init_lp_watermarks(dev);
|
||||
|
||||
|
@ -7132,7 +7189,7 @@ static void haswell_init_clock_gating(struct drm_device *dev)
|
|||
|
||||
static void ivybridge_init_clock_gating(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
uint32_t snpcr;
|
||||
|
||||
ilk_init_lp_watermarks(dev);
|
||||
|
@ -7230,7 +7287,7 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
|
|||
|
||||
static void valleyview_init_clock_gating(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
|
||||
/* WaDisableEarlyCull:vlv */
|
||||
I915_WRITE(_3D_CHICKEN3,
|
||||
|
@ -7312,7 +7369,7 @@ static void valleyview_init_clock_gating(struct drm_device *dev)
|
|||
|
||||
static void cherryview_init_clock_gating(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
|
||||
/* WaVSRefCountFullforceMissDisable:chv */
|
||||
/* WaDSRefCountFullforceMissDisable:chv */
|
||||
|
@ -7348,7 +7405,7 @@ static void cherryview_init_clock_gating(struct drm_device *dev)
|
|||
|
||||
static void g4x_init_clock_gating(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
uint32_t dspclk_gate;
|
||||
|
||||
I915_WRITE(RENCLK_GATE_D1, 0);
|
||||
|
@ -7375,7 +7432,7 @@ static void g4x_init_clock_gating(struct drm_device *dev)
|
|||
|
||||
static void crestline_init_clock_gating(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
|
||||
I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
|
||||
I915_WRITE(RENCLK_GATE_D2, 0);
|
||||
|
@ -7391,7 +7448,7 @@ static void crestline_init_clock_gating(struct drm_device *dev)
|
|||
|
||||
static void broadwater_init_clock_gating(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
|
||||
I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
|
||||
I965_RCC_CLOCK_GATE_DISABLE |
|
||||
|
@ -7408,7 +7465,7 @@ static void broadwater_init_clock_gating(struct drm_device *dev)
|
|||
|
||||
static void gen3_init_clock_gating(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
u32 dstate = I915_READ(D_STATE);
|
||||
|
||||
dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
|
||||
|
@ -7433,7 +7490,7 @@ static void gen3_init_clock_gating(struct drm_device *dev)
|
|||
|
||||
static void i85x_init_clock_gating(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
|
||||
I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
|
||||
|
||||
|
@ -7447,7 +7504,7 @@ static void i85x_init_clock_gating(struct drm_device *dev)
|
|||
|
||||
static void i830_init_clock_gating(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
|
||||
I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
|
||||
|
||||
|
@ -7458,7 +7515,7 @@ static void i830_init_clock_gating(struct drm_device *dev)
|
|||
|
||||
void intel_init_clock_gating(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
|
||||
dev_priv->display.init_clock_gating(dev);
|
||||
}
|
||||
|
@ -7526,7 +7583,7 @@ void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
|
|||
/* Set up chip specific power management-related functions */
|
||||
void intel_init_pm(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
|
||||
intel_fbc_init(dev_priv);
|
||||
|
||||
|
@ -7604,46 +7661,59 @@ int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val
|
|||
{
|
||||
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
|
||||
|
||||
if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
|
||||
/* GEN6_PCODE_* are outside of the forcewake domain, we can
|
||||
* use te fw I915_READ variants to reduce the amount of work
|
||||
* required when reading/writing.
|
||||
*/
|
||||
|
||||
if (I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
|
||||
DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed\n");
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
I915_WRITE(GEN6_PCODE_DATA, *val);
|
||||
I915_WRITE(GEN6_PCODE_DATA1, 0);
|
||||
I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
|
||||
I915_WRITE_FW(GEN6_PCODE_DATA, *val);
|
||||
I915_WRITE_FW(GEN6_PCODE_DATA1, 0);
|
||||
I915_WRITE_FW(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
|
||||
|
||||
if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
|
||||
500)) {
|
||||
if (intel_wait_for_register_fw(dev_priv,
|
||||
GEN6_PCODE_MAILBOX, GEN6_PCODE_READY, 0,
|
||||
500)) {
|
||||
DRM_ERROR("timeout waiting for pcode read (%d) to finish\n", mbox);
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
*val = I915_READ(GEN6_PCODE_DATA);
|
||||
I915_WRITE(GEN6_PCODE_DATA, 0);
|
||||
*val = I915_READ_FW(GEN6_PCODE_DATA);
|
||||
I915_WRITE_FW(GEN6_PCODE_DATA, 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u32 mbox, u32 val)
|
||||
int sandybridge_pcode_write(struct drm_i915_private *dev_priv,
|
||||
u32 mbox, u32 val)
|
||||
{
|
||||
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
|
||||
|
||||
if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
|
||||
/* GEN6_PCODE_* are outside of the forcewake domain, we can
|
||||
* use te fw I915_READ variants to reduce the amount of work
|
||||
* required when reading/writing.
|
||||
*/
|
||||
|
||||
if (I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
|
||||
DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed\n");
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
I915_WRITE(GEN6_PCODE_DATA, val);
|
||||
I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
|
||||
I915_WRITE_FW(GEN6_PCODE_DATA, val);
|
||||
I915_WRITE_FW(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
|
||||
|
||||
if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
|
||||
500)) {
|
||||
if (intel_wait_for_register_fw(dev_priv,
|
||||
GEN6_PCODE_MAILBOX, GEN6_PCODE_READY, 0,
|
||||
500)) {
|
||||
DRM_ERROR("timeout waiting for pcode write (%d) to finish\n", mbox);
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
I915_WRITE(GEN6_PCODE_DATA, 0);
|
||||
I915_WRITE_FW(GEN6_PCODE_DATA, 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -7713,7 +7783,7 @@ static void __intel_rps_boost_work(struct work_struct *work)
|
|||
struct request_boost *boost = container_of(work, struct request_boost, work);
|
||||
struct drm_i915_gem_request *req = boost->req;
|
||||
|
||||
if (!i915_gem_request_completed(req, true))
|
||||
if (!i915_gem_request_completed(req))
|
||||
gen6_rps_boost(req->i915, NULL, req->emitted_jiffies);
|
||||
|
||||
i915_gem_request_unreference(req);
|
||||
|
@ -7727,7 +7797,7 @@ void intel_queue_rps_boost_for_request(struct drm_i915_gem_request *req)
|
|||
if (req == NULL || INTEL_GEN(req->i915) < 6)
|
||||
return;
|
||||
|
||||
if (i915_gem_request_completed(req, true))
|
||||
if (i915_gem_request_completed(req))
|
||||
return;
|
||||
|
||||
boost = kmalloc(sizeof(*boost), GFP_ATOMIC);
|
||||
|
@ -7743,7 +7813,7 @@ void intel_queue_rps_boost_for_request(struct drm_i915_gem_request *req)
|
|||
|
||||
void intel_pm_setup(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
|
||||
mutex_init(&dev_priv->rps.hw_lock);
|
||||
spin_lock_init(&dev_priv->rps.client_lock);
|
||||
|
|
|
@ -63,7 +63,7 @@ static bool is_edp_psr(struct intel_dp *intel_dp)
|
|||
|
||||
static bool vlv_is_psr_active_on_pipe(struct drm_device *dev, int pipe)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
uint32_t val;
|
||||
|
||||
val = I915_READ(VLV_PSRSTAT(pipe)) &
|
||||
|
@ -77,7 +77,7 @@ static void intel_psr_write_vsc(struct intel_dp *intel_dp,
|
|||
{
|
||||
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
|
||||
struct drm_device *dev = dig_port->base.base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
|
||||
enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
|
||||
i915_reg_t ctl_reg = HSW_TVIDEO_DIP_CTL(cpu_transcoder);
|
||||
|
@ -107,7 +107,7 @@ static void vlv_psr_setup_vsc(struct intel_dp *intel_dp)
|
|||
{
|
||||
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
|
||||
struct drm_device *dev = intel_dig_port->base.base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
|
||||
enum pipe pipe = to_intel_crtc(crtc)->pipe;
|
||||
uint32_t val;
|
||||
|
@ -173,7 +173,7 @@ static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
|
|||
{
|
||||
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
|
||||
struct drm_device *dev = dig_port->base.base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
uint32_t aux_clock_divider;
|
||||
i915_reg_t aux_ctl_reg;
|
||||
static const uint8_t aux_msg[] = {
|
||||
|
@ -220,7 +220,7 @@ static void vlv_psr_enable_source(struct intel_dp *intel_dp)
|
|||
{
|
||||
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
|
||||
struct drm_device *dev = dig_port->base.base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct drm_crtc *crtc = dig_port->base.base.crtc;
|
||||
enum pipe pipe = to_intel_crtc(crtc)->pipe;
|
||||
|
||||
|
@ -235,7 +235,7 @@ static void vlv_psr_activate(struct intel_dp *intel_dp)
|
|||
{
|
||||
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
|
||||
struct drm_device *dev = dig_port->base.base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct drm_crtc *crtc = dig_port->base.base.crtc;
|
||||
enum pipe pipe = to_intel_crtc(crtc)->pipe;
|
||||
|
||||
|
@ -252,7 +252,7 @@ static void hsw_psr_enable_source(struct intel_dp *intel_dp)
|
|||
{
|
||||
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
|
||||
struct drm_device *dev = dig_port->base.base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
|
||||
uint32_t max_sleep_time = 0x1f;
|
||||
/* Lately it was identified that depending on panel idle frame count
|
||||
|
@ -324,7 +324,7 @@ static bool intel_psr_match_conditions(struct intel_dp *intel_dp)
|
|||
{
|
||||
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
|
||||
struct drm_device *dev = dig_port->base.base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct drm_crtc *crtc = dig_port->base.base.crtc;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
|
||||
|
@ -378,7 +378,7 @@ static void intel_psr_activate(struct intel_dp *intel_dp)
|
|||
{
|
||||
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
|
||||
struct drm_device *dev = intel_dig_port->base.base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
|
||||
WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
|
||||
WARN_ON(dev_priv->psr.active);
|
||||
|
@ -407,7 +407,7 @@ void intel_psr_enable(struct intel_dp *intel_dp)
|
|||
{
|
||||
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
|
||||
struct drm_device *dev = intel_dig_port->base.base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
|
||||
|
||||
if (!HAS_PSR(dev)) {
|
||||
|
@ -494,15 +494,18 @@ static void vlv_psr_disable(struct intel_dp *intel_dp)
|
|||
{
|
||||
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
|
||||
struct drm_device *dev = intel_dig_port->base.base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_crtc *intel_crtc =
|
||||
to_intel_crtc(intel_dig_port->base.base.crtc);
|
||||
uint32_t val;
|
||||
|
||||
if (dev_priv->psr.active) {
|
||||
/* Put VLV PSR back to PSR_state 0 that is PSR Disabled. */
|
||||
if (wait_for((I915_READ(VLV_PSRSTAT(intel_crtc->pipe)) &
|
||||
VLV_EDP_PSR_IN_TRANS) == 0, 1))
|
||||
if (intel_wait_for_register(dev_priv,
|
||||
VLV_PSRSTAT(intel_crtc->pipe),
|
||||
VLV_EDP_PSR_IN_TRANS,
|
||||
0,
|
||||
1))
|
||||
WARN(1, "PSR transition took longer than expected\n");
|
||||
|
||||
val = I915_READ(VLV_PSRCTL(intel_crtc->pipe));
|
||||
|
@ -521,16 +524,18 @@ static void hsw_psr_disable(struct intel_dp *intel_dp)
|
|||
{
|
||||
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
|
||||
struct drm_device *dev = intel_dig_port->base.base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
|
||||
if (dev_priv->psr.active) {
|
||||
I915_WRITE(EDP_PSR_CTL,
|
||||
I915_READ(EDP_PSR_CTL) & ~EDP_PSR_ENABLE);
|
||||
|
||||
/* Wait till PSR is idle */
|
||||
if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL) &
|
||||
EDP_PSR_STATUS_STATE_MASK) == 0,
|
||||
2 * USEC_PER_SEC, 10 * USEC_PER_MSEC))
|
||||
if (intel_wait_for_register(dev_priv,
|
||||
EDP_PSR_STATUS_CTL,
|
||||
EDP_PSR_STATUS_STATE_MASK,
|
||||
0,
|
||||
2000))
|
||||
DRM_ERROR("Timed out waiting for PSR Idle State\n");
|
||||
|
||||
dev_priv->psr.active = false;
|
||||
|
@ -549,7 +554,7 @@ void intel_psr_disable(struct intel_dp *intel_dp)
|
|||
{
|
||||
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
|
||||
struct drm_device *dev = intel_dig_port->base.base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
|
||||
mutex_lock(&dev_priv->psr.lock);
|
||||
if (!dev_priv->psr.enabled) {
|
||||
|
@ -586,14 +591,20 @@ static void intel_psr_work(struct work_struct *work)
|
|||
* and be ready for re-enable.
|
||||
*/
|
||||
if (HAS_DDI(dev_priv)) {
|
||||
if (wait_for((I915_READ(EDP_PSR_STATUS_CTL) &
|
||||
EDP_PSR_STATUS_STATE_MASK) == 0, 50)) {
|
||||
if (intel_wait_for_register(dev_priv,
|
||||
EDP_PSR_STATUS_CTL,
|
||||
EDP_PSR_STATUS_STATE_MASK,
|
||||
0,
|
||||
50)) {
|
||||
DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
if (wait_for((I915_READ(VLV_PSRSTAT(pipe)) &
|
||||
VLV_EDP_PSR_IN_TRANS) == 0, 1)) {
|
||||
if (intel_wait_for_register(dev_priv,
|
||||
VLV_PSRSTAT(pipe),
|
||||
VLV_EDP_PSR_IN_TRANS,
|
||||
0,
|
||||
1)) {
|
||||
DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
|
||||
return;
|
||||
}
|
||||
|
@ -619,7 +630,7 @@ unlock:
|
|||
|
||||
static void intel_psr_exit(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_dp *intel_dp = dev_priv->psr.enabled;
|
||||
struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
|
||||
enum pipe pipe = to_intel_crtc(crtc)->pipe;
|
||||
|
@ -674,7 +685,7 @@ static void intel_psr_exit(struct drm_device *dev)
|
|||
void intel_psr_single_frame_update(struct drm_device *dev,
|
||||
unsigned frontbuffer_bits)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct drm_crtc *crtc;
|
||||
enum pipe pipe;
|
||||
u32 val;
|
||||
|
@ -722,7 +733,7 @@ void intel_psr_single_frame_update(struct drm_device *dev,
|
|||
void intel_psr_invalidate(struct drm_device *dev,
|
||||
unsigned frontbuffer_bits)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct drm_crtc *crtc;
|
||||
enum pipe pipe;
|
||||
|
||||
|
@ -760,7 +771,7 @@ void intel_psr_invalidate(struct drm_device *dev,
|
|||
void intel_psr_flush(struct drm_device *dev,
|
||||
unsigned frontbuffer_bits, enum fb_op_origin origin)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct drm_crtc *crtc;
|
||||
enum pipe pipe;
|
||||
|
||||
|
@ -796,7 +807,7 @@ void intel_psr_flush(struct drm_device *dev,
|
|||
*/
|
||||
void intel_psr_init(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
|
||||
dev_priv->psr_mmio_base = IS_HASWELL(dev_priv) ?
|
||||
HSW_EDP_PSR_BASE : BDW_EDP_PSR_BASE;
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -62,18 +62,6 @@ struct intel_hw_status_page {
|
|||
(i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \
|
||||
GEN8_SEMAPHORE_OFFSET(from, (__ring)->id))
|
||||
|
||||
#define GEN8_RING_SEMAPHORE_INIT(e) do { \
|
||||
if (!dev_priv->semaphore_obj) { \
|
||||
break; \
|
||||
} \
|
||||
(e)->semaphore.signal_ggtt[RCS] = GEN8_SIGNAL_OFFSET((e), RCS); \
|
||||
(e)->semaphore.signal_ggtt[VCS] = GEN8_SIGNAL_OFFSET((e), VCS); \
|
||||
(e)->semaphore.signal_ggtt[BCS] = GEN8_SIGNAL_OFFSET((e), BCS); \
|
||||
(e)->semaphore.signal_ggtt[VECS] = GEN8_SIGNAL_OFFSET((e), VECS); \
|
||||
(e)->semaphore.signal_ggtt[VCS2] = GEN8_SIGNAL_OFFSET((e), VCS2); \
|
||||
(e)->semaphore.signal_ggtt[(e)->id] = MI_SEMAPHORE_SYNC_INVALID; \
|
||||
} while(0)
|
||||
|
||||
enum intel_ring_hangcheck_action {
|
||||
HANGCHECK_IDLE = 0,
|
||||
HANGCHECK_WAIT,
|
||||
|
@ -86,8 +74,8 @@ enum intel_ring_hangcheck_action {
|
|||
|
||||
struct intel_ring_hangcheck {
|
||||
u64 acthd;
|
||||
unsigned long user_interrupts;
|
||||
u32 seqno;
|
||||
unsigned user_interrupts;
|
||||
int score;
|
||||
enum intel_ring_hangcheck_action action;
|
||||
int deadlock;
|
||||
|
@ -141,6 +129,8 @@ struct i915_ctx_workarounds {
|
|||
struct drm_i915_gem_object *obj;
|
||||
};
|
||||
|
||||
struct drm_i915_gem_request;
|
||||
|
||||
struct intel_engine_cs {
|
||||
struct drm_i915_private *i915;
|
||||
const char *name;
|
||||
|
@ -160,6 +150,39 @@ struct intel_engine_cs {
|
|||
struct intel_ringbuffer *buffer;
|
||||
struct list_head buffers;
|
||||
|
||||
/* Rather than have every client wait upon all user interrupts,
|
||||
* with the herd waking after every interrupt and each doing the
|
||||
* heavyweight seqno dance, we delegate the task (of being the
|
||||
* bottom-half of the user interrupt) to the first client. After
|
||||
* every interrupt, we wake up one client, who does the heavyweight
|
||||
* coherent seqno read and either goes back to sleep (if incomplete),
|
||||
* or wakes up all the completed clients in parallel, before then
|
||||
* transferring the bottom-half status to the next client in the queue.
|
||||
*
|
||||
* Compared to walking the entire list of waiters in a single dedicated
|
||||
* bottom-half, we reduce the latency of the first waiter by avoiding
|
||||
* a context switch, but incur additional coherent seqno reads when
|
||||
* following the chain of request breadcrumbs. Since it is most likely
|
||||
* that we have a single client waiting on each seqno, then reducing
|
||||
* the overhead of waking that client is much preferred.
|
||||
*/
|
||||
struct intel_breadcrumbs {
|
||||
struct task_struct *irq_seqno_bh; /* bh for user interrupts */
|
||||
unsigned long irq_wakeups;
|
||||
bool irq_posted;
|
||||
|
||||
spinlock_t lock; /* protects the lists of requests */
|
||||
struct rb_root waiters; /* sorted by retirement, priority */
|
||||
struct rb_root signals; /* sorted by retirement */
|
||||
struct intel_wait *first_wait; /* oldest waiter by retirement */
|
||||
struct task_struct *signaler; /* used for fence signalling */
|
||||
struct drm_i915_gem_request *first_signal;
|
||||
struct timer_list fake_irq; /* used after a missed interrupt */
|
||||
|
||||
bool irq_enabled : 1;
|
||||
bool rpm_wakelock : 1;
|
||||
} breadcrumbs;
|
||||
|
||||
/*
|
||||
* A pool of objects to use as shadow copies of client batch buffers
|
||||
* when the command parser is enabled. Prevents the client from
|
||||
|
@ -170,11 +193,10 @@ struct intel_engine_cs {
|
|||
struct intel_hw_status_page status_page;
|
||||
struct i915_ctx_workarounds wa_ctx;
|
||||
|
||||
unsigned irq_refcount; /* protected by dev_priv->irq_lock */
|
||||
u32 irq_enable_mask; /* bitmask to enable ring interrupt */
|
||||
struct drm_i915_gem_request *trace_irq_req;
|
||||
bool __must_check (*irq_get)(struct intel_engine_cs *ring);
|
||||
void (*irq_put)(struct intel_engine_cs *ring);
|
||||
u32 irq_keep_mask; /* always keep these interrupts */
|
||||
u32 irq_enable_mask; /* bitmask to enable ring interrupt */
|
||||
void (*irq_enable)(struct intel_engine_cs *ring);
|
||||
void (*irq_disable)(struct intel_engine_cs *ring);
|
||||
|
||||
int (*init_hw)(struct intel_engine_cs *ring);
|
||||
|
||||
|
@ -193,9 +215,6 @@ struct intel_engine_cs {
|
|||
* monotonic, even if not coherent.
|
||||
*/
|
||||
void (*irq_seqno_barrier)(struct intel_engine_cs *ring);
|
||||
u32 (*get_seqno)(struct intel_engine_cs *ring);
|
||||
void (*set_seqno)(struct intel_engine_cs *ring,
|
||||
u32 seqno);
|
||||
int (*dispatch_execbuffer)(struct drm_i915_gem_request *req,
|
||||
u64 offset, u32 length,
|
||||
unsigned dispatch_flags);
|
||||
|
@ -272,7 +291,6 @@ struct intel_engine_cs {
|
|||
unsigned int idle_lite_restore_wa;
|
||||
bool disable_lite_restore_wa;
|
||||
u32 ctx_desc_template;
|
||||
u32 irq_keep_mask; /* bitmask for interrupts that should not be masked */
|
||||
int (*emit_request)(struct drm_i915_gem_request *request);
|
||||
int (*emit_flush)(struct drm_i915_gem_request *request,
|
||||
u32 invalidate_domains,
|
||||
|
@ -304,12 +322,9 @@ struct intel_engine_cs {
|
|||
* inspecting request list.
|
||||
*/
|
||||
u32 last_submitted_seqno;
|
||||
unsigned user_interrupts;
|
||||
|
||||
bool gpu_caches_dirty;
|
||||
|
||||
wait_queue_head_t irq_queue;
|
||||
|
||||
struct i915_gem_context *last_context;
|
||||
|
||||
struct intel_ring_hangcheck hangcheck;
|
||||
|
@ -317,7 +332,6 @@ struct intel_engine_cs {
|
|||
struct {
|
||||
struct drm_i915_gem_object *obj;
|
||||
u32 gtt_offset;
|
||||
volatile u32 *cpu_page;
|
||||
} scratch;
|
||||
|
||||
bool needs_cmd_parser;
|
||||
|
@ -348,13 +362,13 @@ struct intel_engine_cs {
|
|||
};
|
||||
|
||||
static inline bool
|
||||
intel_engine_initialized(struct intel_engine_cs *engine)
|
||||
intel_engine_initialized(const struct intel_engine_cs *engine)
|
||||
{
|
||||
return engine->i915 != NULL;
|
||||
}
|
||||
|
||||
static inline unsigned
|
||||
intel_engine_flag(struct intel_engine_cs *engine)
|
||||
intel_engine_flag(const struct intel_engine_cs *engine)
|
||||
{
|
||||
return 1 << engine->id;
|
||||
}
|
||||
|
@ -456,15 +470,14 @@ static inline void intel_ring_advance(struct intel_engine_cs *engine)
|
|||
}
|
||||
int __intel_ring_space(int head, int tail, int size);
|
||||
void intel_ring_update_space(struct intel_ringbuffer *ringbuf);
|
||||
bool intel_engine_stopped(struct intel_engine_cs *engine);
|
||||
|
||||
int __must_check intel_engine_idle(struct intel_engine_cs *engine);
|
||||
void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno);
|
||||
int intel_ring_flush_all_caches(struct drm_i915_gem_request *req);
|
||||
int intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req);
|
||||
|
||||
int intel_init_pipe_control(struct intel_engine_cs *engine, int size);
|
||||
void intel_fini_pipe_control(struct intel_engine_cs *engine);
|
||||
int intel_init_pipe_control(struct intel_engine_cs *engine);
|
||||
|
||||
int intel_init_render_ring_buffer(struct drm_device *dev);
|
||||
int intel_init_bsd_ring_buffer(struct drm_device *dev);
|
||||
|
@ -473,6 +486,10 @@ int intel_init_blt_ring_buffer(struct drm_device *dev);
|
|||
int intel_init_vebox_ring_buffer(struct drm_device *dev);
|
||||
|
||||
u64 intel_ring_get_active_head(struct intel_engine_cs *engine);
|
||||
static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine)
|
||||
{
|
||||
return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
|
||||
}
|
||||
|
||||
int init_workarounds_ring(struct intel_engine_cs *engine);
|
||||
|
||||
|
@ -495,4 +512,62 @@ static inline u32 intel_hws_seqno_address(struct intel_engine_cs *engine)
|
|||
return engine->status_page.gfx_addr + I915_GEM_HWS_INDEX_ADDR;
|
||||
}
|
||||
|
||||
/* intel_breadcrumbs.c -- user interrupt bottom-half for waiters */
|
||||
struct intel_wait {
|
||||
struct rb_node node;
|
||||
struct task_struct *tsk;
|
||||
u32 seqno;
|
||||
};
|
||||
|
||||
struct intel_signal_node {
|
||||
struct rb_node node;
|
||||
struct intel_wait wait;
|
||||
};
|
||||
|
||||
int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine);
|
||||
|
||||
static inline void intel_wait_init(struct intel_wait *wait, u32 seqno)
|
||||
{
|
||||
wait->tsk = current;
|
||||
wait->seqno = seqno;
|
||||
}
|
||||
|
||||
static inline bool intel_wait_complete(const struct intel_wait *wait)
|
||||
{
|
||||
return RB_EMPTY_NODE(&wait->node);
|
||||
}
|
||||
|
||||
bool intel_engine_add_wait(struct intel_engine_cs *engine,
|
||||
struct intel_wait *wait);
|
||||
void intel_engine_remove_wait(struct intel_engine_cs *engine,
|
||||
struct intel_wait *wait);
|
||||
void intel_engine_enable_signaling(struct drm_i915_gem_request *request);
|
||||
|
||||
static inline bool intel_engine_has_waiter(struct intel_engine_cs *engine)
|
||||
{
|
||||
return READ_ONCE(engine->breadcrumbs.irq_seqno_bh);
|
||||
}
|
||||
|
||||
static inline bool intel_engine_wakeup(struct intel_engine_cs *engine)
|
||||
{
|
||||
bool wakeup = false;
|
||||
struct task_struct *tsk = READ_ONCE(engine->breadcrumbs.irq_seqno_bh);
|
||||
/* Note that for this not to dangerously chase a dangling pointer,
|
||||
* the caller is responsible for ensure that the task remain valid for
|
||||
* wake_up_process() i.e. that the RCU grace period cannot expire.
|
||||
*
|
||||
* Also note that tsk is likely to be in !TASK_RUNNING state so an
|
||||
* early test for tsk->state != TASK_RUNNING before wake_up_process()
|
||||
* is unlikely to be beneficial.
|
||||
*/
|
||||
if (tsk)
|
||||
wakeup = wake_up_process(tsk);
|
||||
return wakeup;
|
||||
}
|
||||
|
||||
void intel_engine_enable_fake_irq(struct intel_engine_cs *engine);
|
||||
void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);
|
||||
unsigned int intel_kick_waiters(struct drm_i915_private *i915);
|
||||
unsigned int intel_kick_signalers(struct drm_i915_private *i915);
|
||||
|
||||
#endif /* _INTEL_RINGBUFFER_H_ */
|
||||
|
|
|
@ -287,7 +287,7 @@ void intel_display_set_init_power(struct drm_i915_private *dev_priv,
|
|||
*/
|
||||
static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
|
||||
/*
|
||||
* After we re-enable the power well, if we touch VGA register 0x3d5
|
||||
|
@ -318,7 +318,7 @@ static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv)
|
|||
static void skl_power_well_post_enable(struct drm_i915_private *dev_priv,
|
||||
struct i915_power_well *power_well)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
|
||||
/*
|
||||
* After we re-enable the power well, if we touch VGA register 0x3d5
|
||||
|
@ -365,8 +365,11 @@ static void hsw_set_power_well(struct drm_i915_private *dev_priv,
|
|||
|
||||
if (!is_enabled) {
|
||||
DRM_DEBUG_KMS("Enabling power well\n");
|
||||
if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) &
|
||||
HSW_PWR_WELL_STATE_ENABLED), 20))
|
||||
if (intel_wait_for_register(dev_priv,
|
||||
HSW_PWR_WELL_DRIVER,
|
||||
HSW_PWR_WELL_STATE_ENABLED,
|
||||
HSW_PWR_WELL_STATE_ENABLED,
|
||||
20))
|
||||
DRM_ERROR("Timeout enabling power well\n");
|
||||
hsw_power_well_post_enable(dev_priv);
|
||||
}
|
||||
|
@ -578,6 +581,7 @@ void bxt_enable_dc9(struct drm_i915_private *dev_priv)
|
|||
|
||||
DRM_DEBUG_KMS("Enabling DC9\n");
|
||||
|
||||
intel_power_sequencer_reset(dev_priv);
|
||||
gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9);
|
||||
}
|
||||
|
||||
|
@ -699,8 +703,11 @@ static void skl_set_power_well(struct drm_i915_private *dev_priv,
|
|||
|
||||
switch (power_well->data) {
|
||||
case SKL_DISP_PW_1:
|
||||
if (wait_for((I915_READ(SKL_FUSE_STATUS) &
|
||||
SKL_FUSE_PG0_DIST_STATUS), 1)) {
|
||||
if (intel_wait_for_register(dev_priv,
|
||||
SKL_FUSE_STATUS,
|
||||
SKL_FUSE_PG0_DIST_STATUS,
|
||||
SKL_FUSE_PG0_DIST_STATUS,
|
||||
1)) {
|
||||
DRM_ERROR("PG0 not enabled\n");
|
||||
return;
|
||||
}
|
||||
|
@ -761,12 +768,18 @@ static void skl_set_power_well(struct drm_i915_private *dev_priv,
|
|||
|
||||
if (check_fuse_status) {
|
||||
if (power_well->data == SKL_DISP_PW_1) {
|
||||
if (wait_for((I915_READ(SKL_FUSE_STATUS) &
|
||||
SKL_FUSE_PG1_DIST_STATUS), 1))
|
||||
if (intel_wait_for_register(dev_priv,
|
||||
SKL_FUSE_STATUS,
|
||||
SKL_FUSE_PG1_DIST_STATUS,
|
||||
SKL_FUSE_PG1_DIST_STATUS,
|
||||
1))
|
||||
DRM_ERROR("PG1 distributing status timeout\n");
|
||||
} else if (power_well->data == SKL_DISP_PW_2) {
|
||||
if (wait_for((I915_READ(SKL_FUSE_STATUS) &
|
||||
SKL_FUSE_PG2_DIST_STATUS), 1))
|
||||
if (intel_wait_for_register(dev_priv,
|
||||
SKL_FUSE_STATUS,
|
||||
SKL_FUSE_PG2_DIST_STATUS,
|
||||
SKL_FUSE_PG2_DIST_STATUS,
|
||||
1))
|
||||
DRM_ERROR("PG2 distributing status timeout\n");
|
||||
}
|
||||
}
|
||||
|
@ -917,7 +930,7 @@ static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
|
|||
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
|
||||
|
||||
WARN_ON(dev_priv->cdclk_freq !=
|
||||
dev_priv->display.get_display_clock_speed(dev_priv->dev));
|
||||
dev_priv->display.get_display_clock_speed(&dev_priv->drm));
|
||||
|
||||
gen9_assert_dbuf_enabled(dev_priv);
|
||||
|
||||
|
@ -1075,7 +1088,7 @@ static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
|
|||
*
|
||||
* CHV DPLL B/C have some issues if VGA mode is enabled.
|
||||
*/
|
||||
for_each_pipe(dev_priv->dev, pipe) {
|
||||
for_each_pipe(&dev_priv->drm, pipe) {
|
||||
u32 val = I915_READ(DPLL(pipe));
|
||||
|
||||
val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
|
||||
|
@ -1100,7 +1113,7 @@ static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
|
|||
|
||||
intel_hpd_init(dev_priv);
|
||||
|
||||
i915_redisable_vga_power_on(dev_priv->dev);
|
||||
i915_redisable_vga_power_on(&dev_priv->drm);
|
||||
}
|
||||
|
||||
static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
|
||||
|
@ -1110,9 +1123,9 @@ static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
|
|||
spin_unlock_irq(&dev_priv->irq_lock);
|
||||
|
||||
/* make sure we're done processing display irqs */
|
||||
synchronize_irq(dev_priv->dev->irq);
|
||||
synchronize_irq(dev_priv->drm.irq);
|
||||
|
||||
vlv_power_sequencer_reset(dev_priv);
|
||||
intel_power_sequencer_reset(dev_priv);
|
||||
}
|
||||
|
||||
static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
|
||||
|
@ -1205,7 +1218,6 @@ static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
|
|||
u32 phy_control = dev_priv->chv_phy_control;
|
||||
u32 phy_status = 0;
|
||||
u32 phy_status_mask = 0xffffffff;
|
||||
u32 tmp;
|
||||
|
||||
/*
|
||||
* The BIOS can leave the PHY is some weird state
|
||||
|
@ -1293,10 +1305,14 @@ static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
|
|||
* The PHY may be busy with some initial calibration and whatnot,
|
||||
* so the power state can take a while to actually change.
|
||||
*/
|
||||
if (wait_for((tmp = I915_READ(DISPLAY_PHY_STATUS) & phy_status_mask) == phy_status, 10))
|
||||
WARN(phy_status != tmp,
|
||||
"Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
|
||||
tmp, phy_status, dev_priv->chv_phy_control);
|
||||
if (intel_wait_for_register(dev_priv,
|
||||
DISPLAY_PHY_STATUS,
|
||||
phy_status_mask,
|
||||
phy_status,
|
||||
10))
|
||||
DRM_ERROR("Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
|
||||
I915_READ(DISPLAY_PHY_STATUS) & phy_status_mask,
|
||||
phy_status, dev_priv->chv_phy_control);
|
||||
}
|
||||
|
||||
#undef BITS_SET
|
||||
|
@ -1324,7 +1340,11 @@ static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
|
|||
vlv_set_power_well(dev_priv, power_well, true);
|
||||
|
||||
/* Poll for phypwrgood signal */
|
||||
if (wait_for(I915_READ(DISPLAY_PHY_STATUS) & PHY_POWERGOOD(phy), 1))
|
||||
if (intel_wait_for_register(dev_priv,
|
||||
DISPLAY_PHY_STATUS,
|
||||
PHY_POWERGOOD(phy),
|
||||
PHY_POWERGOOD(phy),
|
||||
1))
|
||||
DRM_ERROR("Display PHY %d is not power up\n", phy);
|
||||
|
||||
mutex_lock(&dev_priv->sb_lock);
|
||||
|
@ -2255,7 +2275,7 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
|
|||
*/
|
||||
void intel_power_domains_fini(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct device *device = &dev_priv->dev->pdev->dev;
|
||||
struct device *device = &dev_priv->drm.pdev->dev;
|
||||
|
||||
/*
|
||||
* The i915.ko module is still not prepared to be loaded when
|
||||
|
@ -2556,7 +2576,7 @@ static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
|
|||
*/
|
||||
void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
struct i915_power_domains *power_domains = &dev_priv->power_domains;
|
||||
|
||||
power_domains->initializing = true;
|
||||
|
@ -2618,7 +2638,7 @@ void intel_power_domains_suspend(struct drm_i915_private *dev_priv)
|
|||
*/
|
||||
void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
struct device *device = &dev->pdev->dev;
|
||||
|
||||
pm_runtime_get_sync(device);
|
||||
|
@ -2639,7 +2659,7 @@ void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
|
|||
*/
|
||||
bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
struct device *device = &dev->pdev->dev;
|
||||
|
||||
if (IS_ENABLED(CONFIG_PM)) {
|
||||
|
@ -2681,7 +2701,7 @@ bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv)
|
|||
*/
|
||||
void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
struct device *device = &dev->pdev->dev;
|
||||
|
||||
assert_rpm_wakelock_held(dev_priv);
|
||||
|
@ -2700,7 +2720,7 @@ void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv)
|
|||
*/
|
||||
void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
struct device *device = &dev->pdev->dev;
|
||||
|
||||
assert_rpm_wakelock_held(dev_priv);
|
||||
|
@ -2723,7 +2743,7 @@ void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
|
|||
*/
|
||||
void intel_runtime_pm_enable(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
struct device *device = &dev->pdev->dev;
|
||||
|
||||
pm_runtime_set_autosuspend_delay(device, 10000); /* 10s */
|
||||
|
|
|
@ -240,7 +240,7 @@ intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo,
|
|||
static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val)
|
||||
{
|
||||
struct drm_device *dev = intel_sdvo->base.base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
u32 bval = val, cval = val;
|
||||
int i;
|
||||
|
||||
|
@ -1195,7 +1195,7 @@ static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
|
|||
static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder)
|
||||
{
|
||||
struct drm_device *dev = intel_encoder->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_crtc *crtc = to_intel_crtc(intel_encoder->base.crtc);
|
||||
const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
|
||||
struct drm_display_mode *mode = &crtc->config->base.mode;
|
||||
|
@ -1330,7 +1330,7 @@ static bool intel_sdvo_get_hw_state(struct intel_encoder *encoder,
|
|||
enum pipe *pipe)
|
||||
{
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
|
||||
u16 active_outputs = 0;
|
||||
u32 tmp;
|
||||
|
@ -1353,7 +1353,7 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
|
|||
struct intel_crtc_state *pipe_config)
|
||||
{
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
|
||||
struct intel_sdvo_dtd dtd;
|
||||
int encoder_pixel_multiplier = 0;
|
||||
|
@ -1436,7 +1436,7 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
|
|||
|
||||
static void intel_disable_sdvo(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
|
||||
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
|
||||
u32 temp;
|
||||
|
@ -1471,7 +1471,7 @@ static void intel_disable_sdvo(struct intel_encoder *encoder)
|
|||
temp &= ~SDVO_ENABLE;
|
||||
intel_sdvo_write_sdvox(intel_sdvo, temp);
|
||||
|
||||
intel_wait_for_vblank_if_active(dev_priv->dev, PIPE_A);
|
||||
intel_wait_for_vblank_if_active(&dev_priv->drm, PIPE_A);
|
||||
intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
|
||||
intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
|
||||
}
|
||||
|
@ -1489,7 +1489,7 @@ static void pch_post_disable_sdvo(struct intel_encoder *encoder)
|
|||
static void intel_enable_sdvo(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
|
||||
u32 temp;
|
||||
|
@ -1633,7 +1633,7 @@ intel_sdvo_get_edid(struct drm_connector *connector)
|
|||
static struct edid *
|
||||
intel_sdvo_get_analog_edid(struct drm_connector *connector)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = connector->dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(connector->dev);
|
||||
|
||||
return drm_get_edid(connector,
|
||||
intel_gmbus_get_adapter(dev_priv,
|
||||
|
@ -1916,7 +1916,7 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
|
|||
static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
|
||||
{
|
||||
struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
|
||||
struct drm_i915_private *dev_priv = connector->dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(connector->dev);
|
||||
struct drm_display_mode *newmode;
|
||||
|
||||
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
|
||||
|
@ -2001,7 +2001,7 @@ intel_sdvo_set_property(struct drm_connector *connector,
|
|||
{
|
||||
struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
|
||||
struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
|
||||
struct drm_i915_private *dev_priv = connector->dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(connector->dev);
|
||||
uint16_t temp_value;
|
||||
uint8_t cmd;
|
||||
int ret;
|
||||
|
@ -2177,6 +2177,21 @@ done:
|
|||
#undef CHECK_PROPERTY
|
||||
}
|
||||
|
||||
static int
|
||||
intel_sdvo_connector_register(struct drm_connector *connector)
|
||||
{
|
||||
struct intel_sdvo *sdvo = intel_attached_sdvo(connector);
|
||||
int ret;
|
||||
|
||||
ret = intel_connector_register(connector);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return sysfs_create_link(&connector->kdev->kobj,
|
||||
&sdvo->ddc.dev.kobj,
|
||||
sdvo->ddc.dev.kobj.name);
|
||||
}
|
||||
|
||||
static void
|
||||
intel_sdvo_connector_unregister(struct drm_connector *connector)
|
||||
{
|
||||
|
@ -2193,6 +2208,7 @@ static const struct drm_connector_funcs intel_sdvo_connector_funcs = {
|
|||
.fill_modes = drm_helper_probe_single_connector_modes,
|
||||
.set_property = intel_sdvo_set_property,
|
||||
.atomic_get_property = intel_connector_atomic_get_property,
|
||||
.late_register = intel_sdvo_connector_register,
|
||||
.early_unregister = intel_sdvo_connector_unregister,
|
||||
.destroy = intel_sdvo_destroy,
|
||||
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
|
||||
|
@ -2322,7 +2338,7 @@ intel_sdvo_is_hdmi_connector(struct intel_sdvo *intel_sdvo, int device)
|
|||
static u8
|
||||
intel_sdvo_get_slave_addr(struct drm_device *dev, struct intel_sdvo *sdvo)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct sdvo_device_mapping *my_mapping, *other_mapping;
|
||||
|
||||
if (sdvo->port == PORT_B) {
|
||||
|
@ -2380,24 +2396,8 @@ intel_sdvo_connector_init(struct intel_sdvo_connector *connector,
|
|||
connector->base.get_hw_state = intel_sdvo_connector_get_hw_state;
|
||||
|
||||
intel_connector_attach_encoder(&connector->base, &encoder->base);
|
||||
ret = drm_connector_register(drm_connector);
|
||||
if (ret < 0)
|
||||
goto err1;
|
||||
|
||||
ret = sysfs_create_link(&drm_connector->kdev->kobj,
|
||||
&encoder->ddc.dev.kobj,
|
||||
encoder->ddc.dev.kobj.name);
|
||||
if (ret < 0)
|
||||
goto err2;
|
||||
|
||||
return 0;
|
||||
|
||||
err2:
|
||||
drm_connector_unregister(drm_connector);
|
||||
err1:
|
||||
drm_connector_cleanup(drm_connector);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -2524,7 +2524,6 @@ intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type)
|
|||
return true;
|
||||
|
||||
err:
|
||||
drm_connector_unregister(connector);
|
||||
intel_sdvo_destroy(connector);
|
||||
return false;
|
||||
}
|
||||
|
@ -2603,7 +2602,6 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
|
|||
return true;
|
||||
|
||||
err:
|
||||
drm_connector_unregister(connector);
|
||||
intel_sdvo_destroy(connector);
|
||||
return false;
|
||||
}
|
||||
|
@ -2954,7 +2952,7 @@ static void assert_sdvo_port_valid(const struct drm_i915_private *dev_priv,
|
|||
bool intel_sdvo_init(struct drm_device *dev,
|
||||
i915_reg_t sdvo_reg, enum port port)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_encoder *intel_encoder;
|
||||
struct intel_sdvo *intel_sdvo;
|
||||
int i;
|
||||
|
|
|
@ -51,7 +51,9 @@ static int vlv_sideband_rw(struct drm_i915_private *dev_priv, u32 devfn,
|
|||
|
||||
WARN_ON(!mutex_is_locked(&dev_priv->sb_lock));
|
||||
|
||||
if (wait_for((I915_READ(VLV_IOSF_DOORBELL_REQ) & IOSF_SB_BUSY) == 0, 5)) {
|
||||
if (intel_wait_for_register(dev_priv,
|
||||
VLV_IOSF_DOORBELL_REQ, IOSF_SB_BUSY, 0,
|
||||
5)) {
|
||||
DRM_DEBUG_DRIVER("IOSF sideband idle wait (%s) timed out\n",
|
||||
is_read ? "read" : "write");
|
||||
return -EAGAIN;
|
||||
|
@ -62,7 +64,9 @@ static int vlv_sideband_rw(struct drm_i915_private *dev_priv, u32 devfn,
|
|||
I915_WRITE(VLV_IOSF_DATA, *val);
|
||||
I915_WRITE(VLV_IOSF_DOORBELL_REQ, cmd);
|
||||
|
||||
if (wait_for((I915_READ(VLV_IOSF_DOORBELL_REQ) & IOSF_SB_BUSY) == 0, 5)) {
|
||||
if (intel_wait_for_register(dev_priv,
|
||||
VLV_IOSF_DOORBELL_REQ, IOSF_SB_BUSY, 0,
|
||||
5)) {
|
||||
DRM_DEBUG_DRIVER("IOSF sideband finish wait (%s) timed out\n",
|
||||
is_read ? "read" : "write");
|
||||
return -ETIMEDOUT;
|
||||
|
@ -202,8 +206,9 @@ u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
|
|||
u32 value = 0;
|
||||
WARN_ON(!mutex_is_locked(&dev_priv->sb_lock));
|
||||
|
||||
if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0,
|
||||
100)) {
|
||||
if (intel_wait_for_register(dev_priv,
|
||||
SBI_CTL_STAT, SBI_BUSY, 0,
|
||||
100)) {
|
||||
DRM_ERROR("timeout waiting for SBI to become ready\n");
|
||||
return 0;
|
||||
}
|
||||
|
@ -216,8 +221,11 @@ u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
|
|||
value = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IORD;
|
||||
I915_WRITE(SBI_CTL_STAT, value | SBI_BUSY);
|
||||
|
||||
if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0,
|
||||
100)) {
|
||||
if (intel_wait_for_register(dev_priv,
|
||||
SBI_CTL_STAT,
|
||||
SBI_BUSY | SBI_RESPONSE_FAIL,
|
||||
0,
|
||||
100)) {
|
||||
DRM_ERROR("timeout waiting for SBI to complete read transaction\n");
|
||||
return 0;
|
||||
}
|
||||
|
@ -232,8 +240,9 @@ void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
|
|||
|
||||
WARN_ON(!mutex_is_locked(&dev_priv->sb_lock));
|
||||
|
||||
if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0,
|
||||
100)) {
|
||||
if (intel_wait_for_register(dev_priv,
|
||||
SBI_CTL_STAT, SBI_BUSY, 0,
|
||||
100)) {
|
||||
DRM_ERROR("timeout waiting for SBI to become ready\n");
|
||||
return;
|
||||
}
|
||||
|
@ -247,8 +256,11 @@ void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
|
|||
tmp = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IOWR;
|
||||
I915_WRITE(SBI_CTL_STAT, SBI_BUSY | tmp);
|
||||
|
||||
if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0,
|
||||
100)) {
|
||||
if (intel_wait_for_register(dev_priv,
|
||||
SBI_CTL_STAT,
|
||||
SBI_BUSY | SBI_RESPONSE_FAIL,
|
||||
0,
|
||||
100)) {
|
||||
DRM_ERROR("timeout waiting for SBI to complete write transaction\n");
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -199,7 +199,7 @@ skl_update_plane(struct drm_plane *drm_plane,
|
|||
const struct intel_plane_state *plane_state)
|
||||
{
|
||||
struct drm_device *dev = drm_plane->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_plane *intel_plane = to_intel_plane(drm_plane);
|
||||
struct drm_framebuffer *fb = plane_state->base.fb;
|
||||
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
|
||||
|
@ -303,7 +303,7 @@ static void
|
|||
skl_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
|
||||
{
|
||||
struct drm_device *dev = dplane->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_plane *intel_plane = to_intel_plane(dplane);
|
||||
const int pipe = intel_plane->pipe;
|
||||
const int plane = intel_plane->plane + 1;
|
||||
|
@ -317,7 +317,7 @@ skl_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
|
|||
static void
|
||||
chv_update_csc(struct intel_plane *intel_plane, uint32_t format)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = intel_plane->base.dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
|
||||
int plane = intel_plane->plane;
|
||||
|
||||
/* Seems RGB data bypasses the CSC always */
|
||||
|
@ -359,7 +359,7 @@ vlv_update_plane(struct drm_plane *dplane,
|
|||
const struct intel_plane_state *plane_state)
|
||||
{
|
||||
struct drm_device *dev = dplane->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_plane *intel_plane = to_intel_plane(dplane);
|
||||
struct drm_framebuffer *fb = plane_state->base.fb;
|
||||
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
|
||||
|
@ -485,7 +485,7 @@ static void
|
|||
vlv_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
|
||||
{
|
||||
struct drm_device *dev = dplane->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_plane *intel_plane = to_intel_plane(dplane);
|
||||
int pipe = intel_plane->pipe;
|
||||
int plane = intel_plane->plane;
|
||||
|
@ -502,7 +502,7 @@ ivb_update_plane(struct drm_plane *plane,
|
|||
const struct intel_plane_state *plane_state)
|
||||
{
|
||||
struct drm_device *dev = plane->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_plane *intel_plane = to_intel_plane(plane);
|
||||
struct drm_framebuffer *fb = plane_state->base.fb;
|
||||
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
|
||||
|
@ -624,7 +624,7 @@ static void
|
|||
ivb_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
|
||||
{
|
||||
struct drm_device *dev = plane->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_plane *intel_plane = to_intel_plane(plane);
|
||||
int pipe = intel_plane->pipe;
|
||||
|
||||
|
@ -643,7 +643,7 @@ ilk_update_plane(struct drm_plane *plane,
|
|||
const struct intel_plane_state *plane_state)
|
||||
{
|
||||
struct drm_device *dev = plane->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_plane *intel_plane = to_intel_plane(plane);
|
||||
struct drm_framebuffer *fb = plane_state->base.fb;
|
||||
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
|
||||
|
@ -753,7 +753,7 @@ static void
|
|||
ilk_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
|
||||
{
|
||||
struct drm_device *dev = plane->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_plane *intel_plane = to_intel_plane(plane);
|
||||
int pipe = intel_plane->pipe;
|
||||
|
||||
|
|
|
@ -826,7 +826,7 @@ static bool
|
|||
intel_tv_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe)
|
||||
{
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
u32 tmp = I915_READ(TV_CTL);
|
||||
|
||||
if (!(tmp & TV_ENC_ENABLE))
|
||||
|
@ -841,7 +841,7 @@ static void
|
|||
intel_enable_tv(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
|
||||
/* Prevents vblank waits from timing out in intel_tv_detect_type() */
|
||||
intel_wait_for_vblank(encoder->base.dev,
|
||||
|
@ -854,7 +854,7 @@ static void
|
|||
intel_disable_tv(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
|
||||
I915_WRITE(TV_CTL, I915_READ(TV_CTL) & ~TV_ENC_ENABLE);
|
||||
}
|
||||
|
@ -1013,7 +1013,7 @@ static void set_color_conversion(struct drm_i915_private *dev_priv,
|
|||
static void intel_tv_pre_enable(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
|
||||
struct intel_tv *intel_tv = enc_to_tv(encoder);
|
||||
const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
|
||||
|
@ -1173,7 +1173,7 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
|
|||
struct drm_crtc *crtc = connector->state->crtc;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
struct drm_device *dev = connector->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
u32 tv_ctl, save_tv_ctl;
|
||||
u32 tv_dac, save_tv_dac;
|
||||
int type;
|
||||
|
@ -1501,6 +1501,7 @@ out:
|
|||
static const struct drm_connector_funcs intel_tv_connector_funcs = {
|
||||
.dpms = drm_atomic_helper_connector_dpms,
|
||||
.detect = intel_tv_detect,
|
||||
.late_register = intel_connector_register,
|
||||
.early_unregister = intel_connector_unregister,
|
||||
.destroy = intel_tv_destroy,
|
||||
.set_property = intel_tv_set_property,
|
||||
|
@ -1522,7 +1523,7 @@ static const struct drm_encoder_funcs intel_tv_enc_funcs = {
|
|||
void
|
||||
intel_tv_init(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct drm_connector *connector;
|
||||
struct intel_tv *intel_tv;
|
||||
struct intel_encoder *intel_encoder;
|
||||
|
@ -1641,5 +1642,4 @@ intel_tv_init(struct drm_device *dev)
|
|||
drm_object_attach_property(&connector->base,
|
||||
dev->mode_config.tv_bottom_margin_property,
|
||||
intel_tv->margin[TV_MARGIN_BOTTOM]);
|
||||
drm_connector_register(connector);
|
||||
}
|
||||
|
|
|
@ -1299,9 +1299,11 @@ static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv)
|
|||
fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
|
||||
FORCEWAKE_MT, FORCEWAKE_MT_ACK);
|
||||
|
||||
spin_lock_irq(&dev_priv->uncore.lock);
|
||||
fw_domains_get_with_thread_status(dev_priv, FORCEWAKE_ALL);
|
||||
ecobus = __raw_i915_read32(dev_priv, ECOBUS);
|
||||
fw_domains_put_with_fifo(dev_priv, FORCEWAKE_ALL);
|
||||
spin_unlock_irq(&dev_priv->uncore.lock);
|
||||
|
||||
if (!(ecobus & FORCEWAKE_MT_ENABLE)) {
|
||||
DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
|
||||
|
@ -1407,7 +1409,7 @@ static const struct register_whitelist {
|
|||
int i915_reg_read_ioctl(struct drm_device *dev,
|
||||
void *data, struct drm_file *file)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct drm_i915_reg_read *reg = data;
|
||||
struct register_whitelist const *entry = whitelist;
|
||||
unsigned size;
|
||||
|
@ -1469,7 +1471,7 @@ static int i915_reset_complete(struct pci_dev *pdev)
|
|||
|
||||
static int i915_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
|
||||
{
|
||||
struct pci_dev *pdev = dev_priv->dev->pdev;
|
||||
struct pci_dev *pdev = dev_priv->drm.pdev;
|
||||
|
||||
/* assert reset for at least 20 usec */
|
||||
pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
|
||||
|
@ -1488,14 +1490,14 @@ static int g4x_reset_complete(struct pci_dev *pdev)
|
|||
|
||||
static int g33_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
|
||||
{
|
||||
struct pci_dev *pdev = dev_priv->dev->pdev;
|
||||
struct pci_dev *pdev = dev_priv->drm.pdev;
|
||||
pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
|
||||
return wait_for(g4x_reset_complete(pdev), 500);
|
||||
}
|
||||
|
||||
static int g4x_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
|
||||
{
|
||||
struct pci_dev *pdev = dev_priv->dev->pdev;
|
||||
struct pci_dev *pdev = dev_priv->drm.pdev;
|
||||
int ret;
|
||||
|
||||
pci_write_config_byte(pdev, I915_GDRST,
|
||||
|
@ -1530,15 +1532,17 @@ static int ironlake_do_reset(struct drm_i915_private *dev_priv,
|
|||
|
||||
I915_WRITE(ILK_GDSR,
|
||||
ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE);
|
||||
ret = wait_for((I915_READ(ILK_GDSR) &
|
||||
ILK_GRDOM_RESET_ENABLE) == 0, 500);
|
||||
ret = intel_wait_for_register(dev_priv,
|
||||
ILK_GDSR, ILK_GRDOM_RESET_ENABLE, 0,
|
||||
500);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
I915_WRITE(ILK_GDSR,
|
||||
ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE);
|
||||
ret = wait_for((I915_READ(ILK_GDSR) &
|
||||
ILK_GRDOM_RESET_ENABLE) == 0, 500);
|
||||
ret = intel_wait_for_register(dev_priv,
|
||||
ILK_GDSR, ILK_GRDOM_RESET_ENABLE, 0,
|
||||
500);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -1551,20 +1555,16 @@ static int ironlake_do_reset(struct drm_i915_private *dev_priv,
|
|||
static int gen6_hw_domain_reset(struct drm_i915_private *dev_priv,
|
||||
u32 hw_domain_mask)
|
||||
{
|
||||
int ret;
|
||||
|
||||
/* GEN6_GDRST is not in the gt power well, no need to check
|
||||
* for fifo space for the write or forcewake the chip for
|
||||
* the read
|
||||
*/
|
||||
__raw_i915_write32(dev_priv, GEN6_GDRST, hw_domain_mask);
|
||||
|
||||
#define ACKED ((__raw_i915_read32(dev_priv, GEN6_GDRST) & hw_domain_mask) == 0)
|
||||
/* Spin waiting for the device to ack the reset requests */
|
||||
ret = wait_for(ACKED, 500);
|
||||
#undef ACKED
|
||||
|
||||
return ret;
|
||||
return intel_wait_for_register_fw(dev_priv,
|
||||
GEN6_GDRST, hw_domain_mask, 0,
|
||||
500);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1609,13 +1609,74 @@ static int gen6_reset_engines(struct drm_i915_private *dev_priv,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int wait_for_register_fw(struct drm_i915_private *dev_priv,
|
||||
i915_reg_t reg,
|
||||
const u32 mask,
|
||||
const u32 value,
|
||||
const unsigned long timeout_ms)
|
||||
/**
|
||||
* intel_wait_for_register_fw - wait until register matches expected state
|
||||
* @dev_priv: the i915 device
|
||||
* @reg: the register to read
|
||||
* @mask: mask to apply to register value
|
||||
* @value: expected value
|
||||
* @timeout_ms: timeout in millisecond
|
||||
*
|
||||
* This routine waits until the target register @reg contains the expected
|
||||
* @value after applying the @mask, i.e. it waits until
|
||||
* (I915_READ_FW(@reg) & @mask) == @value
|
||||
* Otherwise, the wait will timeout after @timeout_ms milliseconds.
|
||||
*
|
||||
* Note that this routine assumes the caller holds forcewake asserted, it is
|
||||
* not suitable for very long waits. See intel_wait_for_register() if you
|
||||
* wish to wait without holding forcewake for the duration (i.e. you expect
|
||||
* the wait to be slow).
|
||||
*
|
||||
* Returns 0 if the register matches the desired condition, or -ETIMEOUT.
|
||||
*/
|
||||
int intel_wait_for_register_fw(struct drm_i915_private *dev_priv,
|
||||
i915_reg_t reg,
|
||||
const u32 mask,
|
||||
const u32 value,
|
||||
const unsigned long timeout_ms)
|
||||
{
|
||||
return wait_for((I915_READ_FW(reg) & mask) == value, timeout_ms);
|
||||
#define done ((I915_READ_FW(reg) & mask) == value)
|
||||
int ret = wait_for_us(done, 2);
|
||||
if (ret)
|
||||
ret = wait_for(done, timeout_ms);
|
||||
return ret;
|
||||
#undef done
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_wait_for_register - wait until register matches expected state
|
||||
* @dev_priv: the i915 device
|
||||
* @reg: the register to read
|
||||
* @mask: mask to apply to register value
|
||||
* @value: expected value
|
||||
* @timeout_ms: timeout in millisecond
|
||||
*
|
||||
* This routine waits until the target register @reg contains the expected
|
||||
* @value after applying the @mask, i.e. it waits until
|
||||
* (I915_READ(@reg) & @mask) == @value
|
||||
* Otherwise, the wait will timeout after @timeout_ms milliseconds.
|
||||
*
|
||||
* Returns 0 if the register matches the desired condition, or -ETIMEOUT.
|
||||
*/
|
||||
int intel_wait_for_register(struct drm_i915_private *dev_priv,
|
||||
i915_reg_t reg,
|
||||
const u32 mask,
|
||||
const u32 value,
|
||||
const unsigned long timeout_ms)
|
||||
{
|
||||
|
||||
unsigned fw =
|
||||
intel_uncore_forcewake_for_reg(dev_priv, reg, FW_REG_READ);
|
||||
int ret;
|
||||
|
||||
intel_uncore_forcewake_get(dev_priv, fw);
|
||||
ret = wait_for_us((I915_READ_FW(reg) & mask) == value, 2);
|
||||
intel_uncore_forcewake_put(dev_priv, fw);
|
||||
if (ret)
|
||||
ret = wait_for((I915_READ_NOTRACE(reg) & mask) == value,
|
||||
timeout_ms);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int gen8_request_engine_reset(struct intel_engine_cs *engine)
|
||||
|
@ -1626,11 +1687,11 @@ static int gen8_request_engine_reset(struct intel_engine_cs *engine)
|
|||
I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base),
|
||||
_MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET));
|
||||
|
||||
ret = wait_for_register_fw(dev_priv,
|
||||
RING_RESET_CTL(engine->mmio_base),
|
||||
RESET_CTL_READY_TO_RESET,
|
||||
RESET_CTL_READY_TO_RESET,
|
||||
700);
|
||||
ret = intel_wait_for_register_fw(dev_priv,
|
||||
RING_RESET_CTL(engine->mmio_base),
|
||||
RESET_CTL_READY_TO_RESET,
|
||||
RESET_CTL_READY_TO_RESET,
|
||||
700);
|
||||
if (ret)
|
||||
DRM_ERROR("%s: reset request timeout\n", engine->name);
|
||||
|
||||
|
|
|
@ -309,6 +309,7 @@
|
|||
INTEL_VGA_DEVICE(0x5906, info), /* ULT GT1 */ \
|
||||
INTEL_VGA_DEVICE(0x590E, info), /* ULX GT1 */ \
|
||||
INTEL_VGA_DEVICE(0x5902, info), /* DT GT1 */ \
|
||||
INTEL_VGA_DEVICE(0x5908, info), /* Halo GT1 */ \
|
||||
INTEL_VGA_DEVICE(0x590B, info), /* Halo GT1 */ \
|
||||
INTEL_VGA_DEVICE(0x590A, info) /* SRV GT1 */
|
||||
|
||||
|
@ -322,15 +323,12 @@
|
|||
INTEL_VGA_DEVICE(0x591D, info) /* WKS GT2 */
|
||||
|
||||
#define INTEL_KBL_GT3_IDS(info) \
|
||||
INTEL_VGA_DEVICE(0x5923, info), /* ULT GT3 */ \
|
||||
INTEL_VGA_DEVICE(0x5926, info), /* ULT GT3 */ \
|
||||
INTEL_VGA_DEVICE(0x592B, info), /* Halo GT3 */ \
|
||||
INTEL_VGA_DEVICE(0x592A, info) /* SRV GT3 */
|
||||
INTEL_VGA_DEVICE(0x5927, info) /* ULT GT3 */
|
||||
|
||||
#define INTEL_KBL_GT4_IDS(info) \
|
||||
INTEL_VGA_DEVICE(0x5932, info), /* DT GT4 */ \
|
||||
INTEL_VGA_DEVICE(0x593B, info), /* Halo GT4 */ \
|
||||
INTEL_VGA_DEVICE(0x593A, info), /* SRV GT4 */ \
|
||||
INTEL_VGA_DEVICE(0x593D, info) /* WKS GT4 */
|
||||
INTEL_VGA_DEVICE(0x593B, info) /* Halo GT4 */
|
||||
|
||||
#define INTEL_KBL_IDS(info) \
|
||||
INTEL_KBL_GT1_IDS(info), \
|
||||
|
|
|
@ -361,6 +361,8 @@ typedef struct drm_i915_irq_wait {
|
|||
#define I915_PARAM_HAS_GPU_RESET 35
|
||||
#define I915_PARAM_HAS_RESOURCE_STREAMER 36
|
||||
#define I915_PARAM_HAS_EXEC_SOFTPIN 37
|
||||
#define I915_PARAM_HAS_POOLED_EU 38
|
||||
#define I915_PARAM_MIN_EU_IN_POOL 39
|
||||
|
||||
typedef struct drm_i915_getparam {
|
||||
__s32 param;
|
||||
|
@ -1171,6 +1173,7 @@ struct drm_i915_gem_context_param {
|
|||
#define I915_CONTEXT_PARAM_BAN_PERIOD 0x1
|
||||
#define I915_CONTEXT_PARAM_NO_ZEROMAP 0x2
|
||||
#define I915_CONTEXT_PARAM_GTT_SIZE 0x3
|
||||
#define I915_CONTEXT_PARAM_NO_ERROR_CAPTURE 0x4
|
||||
__u64 value;
|
||||
};
|
||||
|
||||
|
|
Loading…
Reference in New Issue