2016-06-24 21:00:26 +08:00
|
|
|
/*
|
|
|
|
* Copyright © 2016 Intel Corporation
|
|
|
|
*
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
|
|
* copy of this software and associated documentation files (the "Software"),
|
|
|
|
* to deal in the Software without restriction, including without limitation
|
|
|
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
|
|
* and/or sell copies of the Software, and to permit persons to whom the
|
|
|
|
* Software is furnished to do so, subject to the following conditions:
|
|
|
|
*
|
|
|
|
* The above copyright notice and this permission notice (including the next
|
|
|
|
* paragraph) shall be included in all copies or substantial portions of the
|
|
|
|
* Software.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
|
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
|
|
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
|
|
|
* IN THE SOFTWARE.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
2016-06-24 21:00:27 +08:00
|
|
|
#include <linux/console.h>
|
2016-06-24 21:00:26 +08:00
|
|
|
#include <linux/vga_switcheroo.h>
|
|
|
|
|
2019-01-18 05:03:34 +08:00
|
|
|
#include <drm/drm_drv.h>
|
|
|
|
|
2019-06-13 16:44:16 +08:00
|
|
|
#include "display/intel_fbdev.h"
|
|
|
|
|
2016-06-24 21:00:26 +08:00
|
|
|
#include "i915_drv.h"
|
2019-12-13 23:51:51 +08:00
|
|
|
#include "i915_perf.h"
|
2019-02-28 18:20:33 +08:00
|
|
|
#include "i915_globals.h"
|
2017-02-14 01:15:12 +08:00
|
|
|
#include "i915_selftest.h"
|
2016-06-24 21:00:26 +08:00
|
|
|
|
2019-03-27 22:23:28 +08:00
|
|
|
#define PLATFORM(x) .platform = (x)
|
2018-02-15 16:19:28 +08:00
|
|
|
#define GEN(x) .gen = (x), .gen_mask = BIT((x) - 1)
|
|
|
|
|
2019-03-06 03:29:05 +08:00
|
|
|
#define I845_PIPE_OFFSETS \
|
|
|
|
.pipe_offsets = { \
|
|
|
|
[TRANSCODER_A] = PIPE_A_OFFSET, \
|
|
|
|
}, \
|
|
|
|
.trans_offsets = { \
|
|
|
|
[TRANSCODER_A] = TRANSCODER_A_OFFSET, \
|
|
|
|
}
|
|
|
|
|
|
|
|
#define I9XX_PIPE_OFFSETS \
|
|
|
|
.pipe_offsets = { \
|
|
|
|
[TRANSCODER_A] = PIPE_A_OFFSET, \
|
|
|
|
[TRANSCODER_B] = PIPE_B_OFFSET, \
|
|
|
|
}, \
|
|
|
|
.trans_offsets = { \
|
|
|
|
[TRANSCODER_A] = TRANSCODER_A_OFFSET, \
|
|
|
|
[TRANSCODER_B] = TRANSCODER_B_OFFSET, \
|
|
|
|
}
|
|
|
|
|
|
|
|
#define IVB_PIPE_OFFSETS \
|
|
|
|
.pipe_offsets = { \
|
|
|
|
[TRANSCODER_A] = PIPE_A_OFFSET, \
|
|
|
|
[TRANSCODER_B] = PIPE_B_OFFSET, \
|
|
|
|
[TRANSCODER_C] = PIPE_C_OFFSET, \
|
|
|
|
}, \
|
|
|
|
.trans_offsets = { \
|
|
|
|
[TRANSCODER_A] = TRANSCODER_A_OFFSET, \
|
|
|
|
[TRANSCODER_B] = TRANSCODER_B_OFFSET, \
|
|
|
|
[TRANSCODER_C] = TRANSCODER_C_OFFSET, \
|
|
|
|
}
|
|
|
|
|
|
|
|
#define HSW_PIPE_OFFSETS \
|
2018-11-20 17:23:23 +08:00
|
|
|
.pipe_offsets = { \
|
|
|
|
[TRANSCODER_A] = PIPE_A_OFFSET, \
|
|
|
|
[TRANSCODER_B] = PIPE_B_OFFSET, \
|
|
|
|
[TRANSCODER_C] = PIPE_C_OFFSET, \
|
|
|
|
[TRANSCODER_EDP] = PIPE_EDP_OFFSET, \
|
|
|
|
}, \
|
|
|
|
.trans_offsets = { \
|
|
|
|
[TRANSCODER_A] = TRANSCODER_A_OFFSET, \
|
|
|
|
[TRANSCODER_B] = TRANSCODER_B_OFFSET, \
|
|
|
|
[TRANSCODER_C] = TRANSCODER_C_OFFSET, \
|
|
|
|
[TRANSCODER_EDP] = TRANSCODER_EDP_OFFSET, \
|
|
|
|
}
|
2016-06-24 21:00:26 +08:00
|
|
|
|
2019-03-06 03:29:05 +08:00
|
|
|
#define CHV_PIPE_OFFSETS \
|
2018-11-20 17:23:23 +08:00
|
|
|
.pipe_offsets = { \
|
|
|
|
[TRANSCODER_A] = PIPE_A_OFFSET, \
|
|
|
|
[TRANSCODER_B] = PIPE_B_OFFSET, \
|
|
|
|
[TRANSCODER_C] = CHV_PIPE_C_OFFSET, \
|
|
|
|
}, \
|
|
|
|
.trans_offsets = { \
|
|
|
|
[TRANSCODER_A] = TRANSCODER_A_OFFSET, \
|
|
|
|
[TRANSCODER_B] = TRANSCODER_B_OFFSET, \
|
|
|
|
[TRANSCODER_C] = CHV_TRANSCODER_C_OFFSET, \
|
|
|
|
}
|
2016-06-24 21:00:26 +08:00
|
|
|
|
2019-03-06 03:29:05 +08:00
|
|
|
#define I845_CURSOR_OFFSETS \
|
|
|
|
.cursor_offsets = { \
|
|
|
|
[PIPE_A] = CURSOR_A_OFFSET, \
|
|
|
|
}
|
|
|
|
|
|
|
|
#define I9XX_CURSOR_OFFSETS \
|
|
|
|
.cursor_offsets = { \
|
|
|
|
[PIPE_A] = CURSOR_A_OFFSET, \
|
|
|
|
[PIPE_B] = CURSOR_B_OFFSET, \
|
|
|
|
}
|
|
|
|
|
|
|
|
#define CHV_CURSOR_OFFSETS \
|
|
|
|
.cursor_offsets = { \
|
|
|
|
[PIPE_A] = CURSOR_A_OFFSET, \
|
|
|
|
[PIPE_B] = CURSOR_B_OFFSET, \
|
|
|
|
[PIPE_C] = CHV_CURSOR_C_OFFSET, \
|
|
|
|
}
|
2016-06-24 21:00:26 +08:00
|
|
|
|
|
|
|
#define IVB_CURSOR_OFFSETS \
|
2019-03-06 03:29:05 +08:00
|
|
|
.cursor_offsets = { \
|
|
|
|
[PIPE_A] = CURSOR_A_OFFSET, \
|
|
|
|
[PIPE_B] = IVB_CURSOR_B_OFFSET, \
|
|
|
|
[PIPE_C] = IVB_CURSOR_C_OFFSET, \
|
|
|
|
}
|
2016-06-24 21:00:26 +08:00
|
|
|
|
2019-09-24 15:31:52 +08:00
|
|
|
#define TGL_CURSOR_OFFSETS \
|
|
|
|
.cursor_offsets = { \
|
|
|
|
[PIPE_A] = CURSOR_A_OFFSET, \
|
|
|
|
[PIPE_B] = IVB_CURSOR_B_OFFSET, \
|
|
|
|
[PIPE_C] = IVB_CURSOR_C_OFFSET, \
|
|
|
|
[PIPE_D] = TGL_CURSOR_D_OFFSET, \
|
|
|
|
}
|
|
|
|
|
2019-04-02 04:02:30 +08:00
|
|
|
#define I9XX_COLORS \
|
|
|
|
.color = { .gamma_lut_size = 256 }
|
2019-04-02 04:02:29 +08:00
|
|
|
#define I965_COLORS \
|
|
|
|
.color = { .gamma_lut_size = 129, \
|
|
|
|
.gamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING, \
|
|
|
|
}
|
2019-04-02 04:02:28 +08:00
|
|
|
#define ILK_COLORS \
|
|
|
|
.color = { .gamma_lut_size = 1024 }
|
2019-04-02 04:02:27 +08:00
|
|
|
#define IVB_COLORS \
|
2019-04-04 03:16:33 +08:00
|
|
|
.color = { .degamma_lut_size = 1024, .gamma_lut_size = 1024 }
|
2016-06-24 21:00:26 +08:00
|
|
|
#define CHV_COLORS \
|
2019-01-31 02:10:22 +08:00
|
|
|
.color = { .degamma_lut_size = 65, .gamma_lut_size = 257, \
|
|
|
|
.degamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING, \
|
|
|
|
.gamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING, \
|
|
|
|
}
|
2017-10-03 14:36:52 +08:00
|
|
|
#define GLK_COLORS \
|
2019-02-11 21:50:21 +08:00
|
|
|
.color = { .degamma_lut_size = 33, .gamma_lut_size = 1024, \
|
2019-01-31 02:10:22 +08:00
|
|
|
.degamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING | \
|
|
|
|
DRM_COLOR_LUT_EQUAL_CHANNELS, \
|
|
|
|
}
|
2016-06-24 21:00:26 +08:00
|
|
|
|
2016-11-30 23:43:02 +08:00
|
|
|
/* Keep in gen based order, and chronological order within a gen */
|
2017-10-07 06:18:16 +08:00
|
|
|
|
|
|
|
#define GEN_DEFAULT_PAGE_SIZES \
|
|
|
|
.page_sizes = I915_GTT_PAGE_SIZE_4K
|
|
|
|
|
2019-10-18 17:07:50 +08:00
|
|
|
#define GEN_DEFAULT_REGIONS \
|
2019-10-18 17:07:51 +08:00
|
|
|
.memory_regions = REGION_SMEM | REGION_STOLEN
|
2019-10-18 17:07:50 +08:00
|
|
|
|
2019-03-06 03:29:05 +08:00
|
|
|
#define I830_FEATURES \
|
|
|
|
GEN(2), \
|
|
|
|
.is_mobile = 1, \
|
2019-09-12 04:29:08 +08:00
|
|
|
.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \
|
2019-03-06 03:29:05 +08:00
|
|
|
.display.has_overlay = 1, \
|
|
|
|
.display.cursor_needs_physical = 1, \
|
|
|
|
.display.overlay_needs_physical = 1, \
|
|
|
|
.display.has_gmch = 1, \
|
|
|
|
.gpu_reset_clobbers_display = true, \
|
|
|
|
.hws_needs_physical = 1, \
|
|
|
|
.unfenced_needs_alignment = 1, \
|
|
|
|
.engine_mask = BIT(RCS0), \
|
|
|
|
.has_snoop = true, \
|
|
|
|
.has_coherent_ggtt = false, \
|
|
|
|
I9XX_PIPE_OFFSETS, \
|
|
|
|
I9XX_CURSOR_OFFSETS, \
|
2019-04-02 04:02:30 +08:00
|
|
|
I9XX_COLORS, \
|
2019-10-18 17:07:50 +08:00
|
|
|
GEN_DEFAULT_PAGE_SIZES, \
|
|
|
|
GEN_DEFAULT_REGIONS
|
2019-03-06 03:29:05 +08:00
|
|
|
|
|
|
|
#define I845_FEATURES \
|
2018-02-15 16:19:28 +08:00
|
|
|
GEN(2), \
|
2019-09-12 04:29:08 +08:00
|
|
|
.pipe_mask = BIT(PIPE_A), \
|
2018-12-01 07:20:48 +08:00
|
|
|
.display.has_overlay = 1, \
|
|
|
|
.display.overlay_needs_physical = 1, \
|
2019-02-05 06:25:38 +08:00
|
|
|
.display.has_gmch = 1, \
|
2019-01-03 19:21:04 +08:00
|
|
|
.gpu_reset_clobbers_display = true, \
|
2016-08-18 03:30:56 +08:00
|
|
|
.hws_needs_physical = 1, \
|
2017-03-25 19:32:43 +08:00
|
|
|
.unfenced_needs_alignment = 1, \
|
2019-03-06 02:03:30 +08:00
|
|
|
.engine_mask = BIT(RCS0), \
|
2017-09-06 18:56:53 +08:00
|
|
|
.has_snoop = true, \
|
2018-07-20 18:19:10 +08:00
|
|
|
.has_coherent_ggtt = false, \
|
2019-03-06 03:29:05 +08:00
|
|
|
I845_PIPE_OFFSETS, \
|
|
|
|
I845_CURSOR_OFFSETS, \
|
2019-04-02 04:02:30 +08:00
|
|
|
I9XX_COLORS, \
|
2019-10-18 17:07:50 +08:00
|
|
|
GEN_DEFAULT_PAGE_SIZES, \
|
|
|
|
GEN_DEFAULT_REGIONS
|
2016-08-18 03:30:51 +08:00
|
|
|
|
2019-12-24 16:40:03 +08:00
|
|
|
static const struct intel_device_info i830_info = {
|
2019-03-06 03:29:05 +08:00
|
|
|
I830_FEATURES,
|
2018-02-15 16:19:30 +08:00
|
|
|
PLATFORM(INTEL_I830),
|
2016-06-24 21:00:26 +08:00
|
|
|
};
|
|
|
|
|
2019-12-24 16:40:03 +08:00
|
|
|
static const struct intel_device_info i845g_info = {
|
2019-03-06 03:29:05 +08:00
|
|
|
I845_FEATURES,
|
2018-02-15 16:19:30 +08:00
|
|
|
PLATFORM(INTEL_I845G),
|
2016-06-24 21:00:26 +08:00
|
|
|
};
|
|
|
|
|
2019-12-24 16:40:03 +08:00
|
|
|
static const struct intel_device_info i85x_info = {
|
2019-03-06 03:29:05 +08:00
|
|
|
I830_FEATURES,
|
2018-02-15 16:19:30 +08:00
|
|
|
PLATFORM(INTEL_I85X),
|
2018-12-01 07:20:48 +08:00
|
|
|
.display.has_fbc = 1,
|
2016-06-24 21:00:26 +08:00
|
|
|
};
|
|
|
|
|
2019-12-24 16:40:03 +08:00
|
|
|
static const struct intel_device_info i865g_info = {
|
2019-03-06 03:29:05 +08:00
|
|
|
I845_FEATURES,
|
2018-02-15 16:19:30 +08:00
|
|
|
PLATFORM(INTEL_I865G),
|
2016-06-24 21:00:26 +08:00
|
|
|
};
|
|
|
|
|
2016-08-18 03:30:50 +08:00
|
|
|
#define GEN3_FEATURES \
|
2018-02-15 16:19:28 +08:00
|
|
|
GEN(3), \
|
2019-09-12 04:29:08 +08:00
|
|
|
.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \
|
2019-02-05 06:25:38 +08:00
|
|
|
.display.has_gmch = 1, \
|
2019-01-03 19:21:04 +08:00
|
|
|
.gpu_reset_clobbers_display = true, \
|
2019-03-06 02:03:30 +08:00
|
|
|
.engine_mask = BIT(RCS0), \
|
2017-09-06 18:56:53 +08:00
|
|
|
.has_snoop = true, \
|
2018-07-20 18:19:10 +08:00
|
|
|
.has_coherent_ggtt = true, \
|
2019-03-06 03:29:05 +08:00
|
|
|
I9XX_PIPE_OFFSETS, \
|
|
|
|
I9XX_CURSOR_OFFSETS, \
|
2019-04-02 04:02:30 +08:00
|
|
|
I9XX_COLORS, \
|
2019-10-18 17:07:50 +08:00
|
|
|
GEN_DEFAULT_PAGE_SIZES, \
|
|
|
|
GEN_DEFAULT_REGIONS
|
2016-08-18 03:30:50 +08:00
|
|
|
|
2019-12-24 16:40:03 +08:00
|
|
|
static const struct intel_device_info i915g_info = {
|
2016-08-18 03:30:50 +08:00
|
|
|
GEN3_FEATURES,
|
2018-02-15 16:19:30 +08:00
|
|
|
PLATFORM(INTEL_I915G),
|
2018-07-20 18:19:10 +08:00
|
|
|
.has_coherent_ggtt = false,
|
2018-12-01 07:20:48 +08:00
|
|
|
.display.cursor_needs_physical = 1,
|
|
|
|
.display.has_overlay = 1,
|
|
|
|
.display.overlay_needs_physical = 1,
|
2016-08-18 03:30:56 +08:00
|
|
|
.hws_needs_physical = 1,
|
2017-03-25 19:32:43 +08:00
|
|
|
.unfenced_needs_alignment = 1,
|
2016-06-24 21:00:26 +08:00
|
|
|
};
|
2016-11-30 23:43:02 +08:00
|
|
|
|
2019-12-24 16:40:03 +08:00
|
|
|
static const struct intel_device_info i915gm_info = {
|
2016-08-18 03:30:50 +08:00
|
|
|
GEN3_FEATURES,
|
2018-02-15 16:19:30 +08:00
|
|
|
PLATFORM(INTEL_I915GM),
|
2016-08-18 03:30:50 +08:00
|
|
|
.is_mobile = 1,
|
2018-12-01 07:20:48 +08:00
|
|
|
.display.cursor_needs_physical = 1,
|
|
|
|
.display.has_overlay = 1,
|
|
|
|
.display.overlay_needs_physical = 1,
|
|
|
|
.display.supports_tv = 1,
|
|
|
|
.display.has_fbc = 1,
|
2016-08-18 03:30:56 +08:00
|
|
|
.hws_needs_physical = 1,
|
2017-03-25 19:32:43 +08:00
|
|
|
.unfenced_needs_alignment = 1,
|
2016-06-24 21:00:26 +08:00
|
|
|
};
|
2016-11-30 23:43:02 +08:00
|
|
|
|
2019-12-24 16:40:03 +08:00
|
|
|
static const struct intel_device_info i945g_info = {
|
2016-08-18 03:30:50 +08:00
|
|
|
GEN3_FEATURES,
|
2018-02-15 16:19:30 +08:00
|
|
|
PLATFORM(INTEL_I945G),
|
2018-12-01 07:20:48 +08:00
|
|
|
.display.has_hotplug = 1,
|
|
|
|
.display.cursor_needs_physical = 1,
|
|
|
|
.display.has_overlay = 1,
|
|
|
|
.display.overlay_needs_physical = 1,
|
2016-08-18 03:30:56 +08:00
|
|
|
.hws_needs_physical = 1,
|
2017-03-25 19:32:43 +08:00
|
|
|
.unfenced_needs_alignment = 1,
|
2016-06-24 21:00:26 +08:00
|
|
|
};
|
2016-11-30 23:43:02 +08:00
|
|
|
|
2019-12-24 16:40:03 +08:00
|
|
|
static const struct intel_device_info i945gm_info = {
|
2016-08-18 03:30:50 +08:00
|
|
|
GEN3_FEATURES,
|
2018-02-15 16:19:30 +08:00
|
|
|
PLATFORM(INTEL_I945GM),
|
|
|
|
.is_mobile = 1,
|
2018-12-01 07:20:48 +08:00
|
|
|
.display.has_hotplug = 1,
|
|
|
|
.display.cursor_needs_physical = 1,
|
|
|
|
.display.has_overlay = 1,
|
|
|
|
.display.overlay_needs_physical = 1,
|
|
|
|
.display.supports_tv = 1,
|
|
|
|
.display.has_fbc = 1,
|
2016-08-18 03:30:56 +08:00
|
|
|
.hws_needs_physical = 1,
|
2017-03-25 19:32:43 +08:00
|
|
|
.unfenced_needs_alignment = 1,
|
2016-06-24 21:00:26 +08:00
|
|
|
};
|
|
|
|
|
2019-12-24 16:40:03 +08:00
|
|
|
static const struct intel_device_info g33_info = {
|
2016-11-30 23:43:02 +08:00
|
|
|
GEN3_FEATURES,
|
2018-02-15 16:19:30 +08:00
|
|
|
PLATFORM(INTEL_G33),
|
2018-12-01 07:20:48 +08:00
|
|
|
.display.has_hotplug = 1,
|
|
|
|
.display.has_overlay = 1,
|
2016-11-30 23:43:02 +08:00
|
|
|
};
|
|
|
|
|
2019-12-24 16:40:03 +08:00
|
|
|
static const struct intel_device_info pnv_g_info = {
|
2019-03-26 15:40:54 +08:00
|
|
|
GEN3_FEATURES,
|
|
|
|
PLATFORM(INTEL_PINEVIEW),
|
|
|
|
.display.has_hotplug = 1,
|
|
|
|
.display.has_overlay = 1,
|
|
|
|
};
|
|
|
|
|
2019-12-24 16:40:03 +08:00
|
|
|
static const struct intel_device_info pnv_m_info = {
|
2016-11-30 23:43:02 +08:00
|
|
|
GEN3_FEATURES,
|
2018-02-15 16:19:30 +08:00
|
|
|
PLATFORM(INTEL_PINEVIEW),
|
|
|
|
.is_mobile = 1,
|
2018-12-01 07:20:48 +08:00
|
|
|
.display.has_hotplug = 1,
|
|
|
|
.display.has_overlay = 1,
|
2016-11-30 23:43:02 +08:00
|
|
|
};
|
|
|
|
|
2016-08-18 03:30:49 +08:00
|
|
|
#define GEN4_FEATURES \
|
2018-02-15 16:19:28 +08:00
|
|
|
GEN(4), \
|
2019-09-12 04:29:08 +08:00
|
|
|
.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \
|
2018-12-01 07:20:48 +08:00
|
|
|
.display.has_hotplug = 1, \
|
2019-02-05 06:25:38 +08:00
|
|
|
.display.has_gmch = 1, \
|
2019-01-03 19:21:04 +08:00
|
|
|
.gpu_reset_clobbers_display = true, \
|
2019-03-06 02:03:30 +08:00
|
|
|
.engine_mask = BIT(RCS0), \
|
2017-09-06 18:56:53 +08:00
|
|
|
.has_snoop = true, \
|
2018-07-20 18:19:10 +08:00
|
|
|
.has_coherent_ggtt = true, \
|
2019-03-06 03:29:05 +08:00
|
|
|
I9XX_PIPE_OFFSETS, \
|
|
|
|
I9XX_CURSOR_OFFSETS, \
|
2019-04-02 04:02:29 +08:00
|
|
|
I965_COLORS, \
|
2019-10-18 17:07:50 +08:00
|
|
|
GEN_DEFAULT_PAGE_SIZES, \
|
|
|
|
GEN_DEFAULT_REGIONS
|
2016-08-18 03:30:49 +08:00
|
|
|
|
2019-12-24 16:40:03 +08:00
|
|
|
static const struct intel_device_info i965g_info = {
|
2016-08-18 03:30:49 +08:00
|
|
|
GEN4_FEATURES,
|
2018-02-15 16:19:30 +08:00
|
|
|
PLATFORM(INTEL_I965G),
|
2018-12-01 07:20:48 +08:00
|
|
|
.display.has_overlay = 1,
|
2016-08-18 03:30:56 +08:00
|
|
|
.hws_needs_physical = 1,
|
2017-09-07 03:24:24 +08:00
|
|
|
.has_snoop = false,
|
2016-06-24 21:00:26 +08:00
|
|
|
};
|
|
|
|
|
2019-12-24 16:40:03 +08:00
|
|
|
static const struct intel_device_info i965gm_info = {
|
2016-08-18 03:30:49 +08:00
|
|
|
GEN4_FEATURES,
|
2018-02-15 16:19:30 +08:00
|
|
|
PLATFORM(INTEL_I965GM),
|
2018-12-01 07:20:48 +08:00
|
|
|
.is_mobile = 1,
|
|
|
|
.display.has_fbc = 1,
|
|
|
|
.display.has_overlay = 1,
|
|
|
|
.display.supports_tv = 1,
|
2016-08-18 03:30:56 +08:00
|
|
|
.hws_needs_physical = 1,
|
2017-09-07 03:24:24 +08:00
|
|
|
.has_snoop = false,
|
2016-06-24 21:00:26 +08:00
|
|
|
};
|
|
|
|
|
2019-12-24 16:40:03 +08:00
|
|
|
static const struct intel_device_info g45_info = {
|
2016-08-18 03:30:49 +08:00
|
|
|
GEN4_FEATURES,
|
2018-02-15 16:19:30 +08:00
|
|
|
PLATFORM(INTEL_G45),
|
2019-03-06 02:03:30 +08:00
|
|
|
.engine_mask = BIT(RCS0) | BIT(VCS0),
|
2019-01-03 19:21:04 +08:00
|
|
|
.gpu_reset_clobbers_display = false,
|
2016-06-24 21:00:26 +08:00
|
|
|
};
|
|
|
|
|
2019-12-24 16:40:03 +08:00
|
|
|
static const struct intel_device_info gm45_info = {
|
2016-08-18 03:30:49 +08:00
|
|
|
GEN4_FEATURES,
|
2018-02-15 16:19:30 +08:00
|
|
|
PLATFORM(INTEL_GM45),
|
2018-12-01 07:20:48 +08:00
|
|
|
.is_mobile = 1,
|
|
|
|
.display.has_fbc = 1,
|
|
|
|
.display.supports_tv = 1,
|
2019-03-06 02:03:30 +08:00
|
|
|
.engine_mask = BIT(RCS0) | BIT(VCS0),
|
2019-01-03 19:21:04 +08:00
|
|
|
.gpu_reset_clobbers_display = false,
|
2016-06-24 21:00:26 +08:00
|
|
|
};
|
|
|
|
|
2016-08-18 03:30:47 +08:00
|
|
|
#define GEN5_FEATURES \
|
2018-02-15 16:19:28 +08:00
|
|
|
GEN(5), \
|
2019-09-12 04:29:08 +08:00
|
|
|
.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \
|
2018-12-01 07:20:48 +08:00
|
|
|
.display.has_hotplug = 1, \
|
2019-03-06 02:03:30 +08:00
|
|
|
.engine_mask = BIT(RCS0) | BIT(VCS0), \
|
2017-09-06 18:56:53 +08:00
|
|
|
.has_snoop = true, \
|
2018-07-20 18:19:10 +08:00
|
|
|
.has_coherent_ggtt = true, \
|
2017-12-01 19:30:30 +08:00
|
|
|
/* ilk does support rc6, but we do not implement [power] contexts */ \
|
|
|
|
.has_rc6 = 0, \
|
2019-03-06 03:29:05 +08:00
|
|
|
I9XX_PIPE_OFFSETS, \
|
|
|
|
I9XX_CURSOR_OFFSETS, \
|
2019-04-02 04:02:28 +08:00
|
|
|
ILK_COLORS, \
|
2019-10-18 17:07:50 +08:00
|
|
|
GEN_DEFAULT_PAGE_SIZES, \
|
|
|
|
GEN_DEFAULT_REGIONS
|
2016-08-18 03:30:47 +08:00
|
|
|
|
2019-12-24 16:40:03 +08:00
|
|
|
static const struct intel_device_info ilk_d_info = {
|
2016-08-18 03:30:47 +08:00
|
|
|
GEN5_FEATURES,
|
2018-02-15 16:19:30 +08:00
|
|
|
PLATFORM(INTEL_IRONLAKE),
|
2016-06-24 21:00:26 +08:00
|
|
|
};
|
|
|
|
|
2019-12-24 16:40:03 +08:00
|
|
|
static const struct intel_device_info ilk_m_info = {
|
2016-08-18 03:30:47 +08:00
|
|
|
GEN5_FEATURES,
|
2018-02-15 16:19:30 +08:00
|
|
|
PLATFORM(INTEL_IRONLAKE),
|
2018-12-01 07:20:48 +08:00
|
|
|
.is_mobile = 1,
|
|
|
|
.display.has_fbc = 1,
|
2016-06-24 21:00:26 +08:00
|
|
|
};
|
|
|
|
|
2016-08-18 03:30:38 +08:00
|
|
|
#define GEN6_FEATURES \
|
2018-02-15 16:19:28 +08:00
|
|
|
GEN(6), \
|
2019-09-12 04:29:08 +08:00
|
|
|
.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \
|
2018-12-01 07:20:48 +08:00
|
|
|
.display.has_hotplug = 1, \
|
|
|
|
.display.has_fbc = 1, \
|
2019-03-06 02:03:30 +08:00
|
|
|
.engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \
|
2018-07-20 18:19:10 +08:00
|
|
|
.has_coherent_ggtt = true, \
|
2016-08-18 03:30:38 +08:00
|
|
|
.has_llc = 1, \
|
2016-08-18 03:30:44 +08:00
|
|
|
.has_rc6 = 1, \
|
2016-08-18 03:30:45 +08:00
|
|
|
.has_rc6p = 1, \
|
2019-04-19 21:48:36 +08:00
|
|
|
.has_rps = true, \
|
2019-03-15 06:38:36 +08:00
|
|
|
.ppgtt_type = INTEL_PPGTT_ALIASING, \
|
|
|
|
.ppgtt_size = 31, \
|
2019-03-06 03:29:05 +08:00
|
|
|
I9XX_PIPE_OFFSETS, \
|
|
|
|
I9XX_CURSOR_OFFSETS, \
|
2019-04-02 04:02:28 +08:00
|
|
|
ILK_COLORS, \
|
2019-10-18 17:07:50 +08:00
|
|
|
GEN_DEFAULT_PAGE_SIZES, \
|
|
|
|
GEN_DEFAULT_REGIONS
|
2016-08-18 03:30:38 +08:00
|
|
|
|
2017-08-31 00:12:05 +08:00
|
|
|
#define SNB_D_PLATFORM \
|
|
|
|
GEN6_FEATURES, \
|
2018-02-15 16:19:30 +08:00
|
|
|
PLATFORM(INTEL_SANDYBRIDGE)
|
2017-08-31 00:12:05 +08:00
|
|
|
|
2019-12-24 16:40:03 +08:00
|
|
|
static const struct intel_device_info snb_d_gt1_info = {
|
2017-08-31 00:12:05 +08:00
|
|
|
SNB_D_PLATFORM,
|
|
|
|
.gt = 1,
|
2016-06-24 21:00:26 +08:00
|
|
|
};
|
|
|
|
|
2019-12-24 16:40:03 +08:00
|
|
|
static const struct intel_device_info snb_d_gt2_info = {
|
2017-08-31 00:12:05 +08:00
|
|
|
SNB_D_PLATFORM,
|
|
|
|
.gt = 2,
|
|
|
|
};
|
|
|
|
|
|
|
|
#define SNB_M_PLATFORM \
|
|
|
|
GEN6_FEATURES, \
|
2018-02-15 16:19:30 +08:00
|
|
|
PLATFORM(INTEL_SANDYBRIDGE), \
|
2017-08-31 00:12:05 +08:00
|
|
|
.is_mobile = 1
|
|
|
|
|
|
|
|
|
2019-12-24 16:40:03 +08:00
|
|
|
static const struct intel_device_info snb_m_gt1_info = {
|
2017-08-31 00:12:05 +08:00
|
|
|
SNB_M_PLATFORM,
|
|
|
|
.gt = 1,
|
|
|
|
};
|
|
|
|
|
2019-12-24 16:40:03 +08:00
|
|
|
static const struct intel_device_info snb_m_gt2_info = {
|
2017-08-31 00:12:05 +08:00
|
|
|
SNB_M_PLATFORM,
|
|
|
|
.gt = 2,
|
2016-06-24 21:00:26 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
#define GEN7_FEATURES \
|
2018-02-15 16:19:28 +08:00
|
|
|
GEN(7), \
|
2019-09-12 04:29:08 +08:00
|
|
|
.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), \
|
2018-12-01 07:20:48 +08:00
|
|
|
.display.has_hotplug = 1, \
|
|
|
|
.display.has_fbc = 1, \
|
2019-03-06 02:03:30 +08:00
|
|
|
.engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \
|
2018-07-20 18:19:10 +08:00
|
|
|
.has_coherent_ggtt = true, \
|
2016-06-24 21:00:26 +08:00
|
|
|
.has_llc = 1, \
|
2016-08-18 03:30:44 +08:00
|
|
|
.has_rc6 = 1, \
|
2016-08-18 03:30:45 +08:00
|
|
|
.has_rc6p = 1, \
|
2019-04-19 21:48:36 +08:00
|
|
|
.has_rps = true, \
|
2020-02-24 18:11:20 +08:00
|
|
|
.ppgtt_type = INTEL_PPGTT_ALIASING, \
|
2019-03-15 06:38:36 +08:00
|
|
|
.ppgtt_size = 31, \
|
2019-03-06 03:29:05 +08:00
|
|
|
IVB_PIPE_OFFSETS, \
|
|
|
|
IVB_CURSOR_OFFSETS, \
|
2019-04-02 04:02:27 +08:00
|
|
|
IVB_COLORS, \
|
2019-10-18 17:07:50 +08:00
|
|
|
GEN_DEFAULT_PAGE_SIZES, \
|
|
|
|
GEN_DEFAULT_REGIONS
|
2016-06-24 21:00:26 +08:00
|
|
|
|
2017-08-31 00:12:05 +08:00
|
|
|
#define IVB_D_PLATFORM \
|
|
|
|
GEN7_FEATURES, \
|
2018-02-15 16:19:30 +08:00
|
|
|
PLATFORM(INTEL_IVYBRIDGE), \
|
2017-08-31 00:12:05 +08:00
|
|
|
.has_l3_dpf = 1
|
|
|
|
|
2019-12-24 16:40:03 +08:00
|
|
|
static const struct intel_device_info ivb_d_gt1_info = {
|
2017-08-31 00:12:05 +08:00
|
|
|
IVB_D_PLATFORM,
|
|
|
|
.gt = 1,
|
2016-06-24 21:00:26 +08:00
|
|
|
};
|
|
|
|
|
2019-12-24 16:40:03 +08:00
|
|
|
static const struct intel_device_info ivb_d_gt2_info = {
|
2017-08-31 00:12:05 +08:00
|
|
|
IVB_D_PLATFORM,
|
|
|
|
.gt = 2,
|
|
|
|
};
|
|
|
|
|
|
|
|
#define IVB_M_PLATFORM \
|
|
|
|
GEN7_FEATURES, \
|
2018-02-15 16:19:30 +08:00
|
|
|
PLATFORM(INTEL_IVYBRIDGE), \
|
2017-08-31 00:12:05 +08:00
|
|
|
.is_mobile = 1, \
|
|
|
|
.has_l3_dpf = 1
|
|
|
|
|
2019-12-24 16:40:03 +08:00
|
|
|
static const struct intel_device_info ivb_m_gt1_info = {
|
2017-08-31 00:12:05 +08:00
|
|
|
IVB_M_PLATFORM,
|
|
|
|
.gt = 1,
|
|
|
|
};
|
|
|
|
|
2019-12-24 16:40:03 +08:00
|
|
|
static const struct intel_device_info ivb_m_gt2_info = {
|
2017-08-31 00:12:05 +08:00
|
|
|
IVB_M_PLATFORM,
|
|
|
|
.gt = 2,
|
2016-06-24 21:00:26 +08:00
|
|
|
};
|
|
|
|
|
2019-12-24 16:40:03 +08:00
|
|
|
static const struct intel_device_info ivb_q_info = {
|
2016-06-24 21:00:26 +08:00
|
|
|
GEN7_FEATURES,
|
2018-02-15 16:19:30 +08:00
|
|
|
PLATFORM(INTEL_IVYBRIDGE),
|
2017-08-31 00:12:05 +08:00
|
|
|
.gt = 2,
|
2019-09-12 04:29:08 +08:00
|
|
|
.pipe_mask = 0, /* legal, last one wins */
|
2016-08-18 03:30:54 +08:00
|
|
|
.has_l3_dpf = 1,
|
2016-06-24 21:00:26 +08:00
|
|
|
};
|
|
|
|
|
2019-12-24 16:40:03 +08:00
|
|
|
static const struct intel_device_info vlv_info = {
|
2018-02-15 16:19:30 +08:00
|
|
|
PLATFORM(INTEL_VALLEYVIEW),
|
2018-02-15 16:19:28 +08:00
|
|
|
GEN(7),
|
2016-12-20 05:55:08 +08:00
|
|
|
.is_lp = 1,
|
2019-09-12 04:29:08 +08:00
|
|
|
.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B),
|
2016-12-20 05:55:08 +08:00
|
|
|
.has_runtime_pm = 1,
|
|
|
|
.has_rc6 = 1,
|
2019-04-19 21:48:36 +08:00
|
|
|
.has_rps = true,
|
2019-02-05 06:25:38 +08:00
|
|
|
.display.has_gmch = 1,
|
2018-12-01 07:20:48 +08:00
|
|
|
.display.has_hotplug = 1,
|
2020-02-24 18:11:20 +08:00
|
|
|
.ppgtt_type = INTEL_PPGTT_ALIASING,
|
2019-03-15 06:38:36 +08:00
|
|
|
.ppgtt_size = 31,
|
2017-09-06 18:56:53 +08:00
|
|
|
.has_snoop = true,
|
2018-07-20 18:19:10 +08:00
|
|
|
.has_coherent_ggtt = false,
|
2019-03-06 02:03:30 +08:00
|
|
|
.engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0),
|
2016-12-20 05:55:08 +08:00
|
|
|
.display_mmio_offset = VLV_DISPLAY_BASE,
|
2019-03-06 03:29:05 +08:00
|
|
|
I9XX_PIPE_OFFSETS,
|
|
|
|
I9XX_CURSOR_OFFSETS,
|
2019-04-02 04:02:29 +08:00
|
|
|
I965_COLORS,
|
2017-10-07 06:18:16 +08:00
|
|
|
GEN_DEFAULT_PAGE_SIZES,
|
2019-10-18 17:07:50 +08:00
|
|
|
GEN_DEFAULT_REGIONS,
|
2016-06-24 21:00:26 +08:00
|
|
|
};
|
|
|
|
|
2017-10-03 14:36:51 +08:00
|
|
|
#define G75_FEATURES \
|
2016-06-24 21:00:26 +08:00
|
|
|
GEN7_FEATURES, \
|
2019-03-06 02:03:30 +08:00
|
|
|
.engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \
|
2018-12-01 07:20:48 +08:00
|
|
|
.display.has_ddi = 1, \
|
2016-08-18 03:30:36 +08:00
|
|
|
.has_fpga_dbg = 1, \
|
2018-12-01 07:20:48 +08:00
|
|
|
.display.has_psr = 1, \
|
|
|
|
.display.has_dp_mst = 1, \
|
2016-08-18 03:30:45 +08:00
|
|
|
.has_rc6p = 0 /* RC6p removed-by HSW */, \
|
2019-03-06 03:29:05 +08:00
|
|
|
HSW_PIPE_OFFSETS, \
|
2016-08-18 03:30:39 +08:00
|
|
|
.has_runtime_pm = 1
|
2016-06-24 21:00:26 +08:00
|
|
|
|
2017-08-31 00:12:05 +08:00
|
|
|
#define HSW_PLATFORM \
|
2017-10-03 14:36:51 +08:00
|
|
|
G75_FEATURES, \
|
2018-02-15 16:19:30 +08:00
|
|
|
PLATFORM(INTEL_HASWELL), \
|
2017-08-31 00:12:05 +08:00
|
|
|
.has_l3_dpf = 1
|
|
|
|
|
2019-12-24 16:40:03 +08:00
|
|
|
static const struct intel_device_info hsw_gt1_info = {
|
2017-08-31 00:12:05 +08:00
|
|
|
HSW_PLATFORM,
|
|
|
|
.gt = 1,
|
|
|
|
};
|
|
|
|
|
2019-12-24 16:40:03 +08:00
|
|
|
static const struct intel_device_info hsw_gt2_info = {
|
2017-08-31 00:12:05 +08:00
|
|
|
HSW_PLATFORM,
|
|
|
|
.gt = 2,
|
|
|
|
};
|
|
|
|
|
2019-12-24 16:40:03 +08:00
|
|
|
static const struct intel_device_info hsw_gt3_info = {
|
2017-08-31 00:12:05 +08:00
|
|
|
HSW_PLATFORM,
|
|
|
|
.gt = 3,
|
2016-06-24 21:00:26 +08:00
|
|
|
};
|
|
|
|
|
2017-10-03 14:36:51 +08:00
|
|
|
#define GEN8_FEATURES \
|
|
|
|
G75_FEATURES, \
|
2018-02-15 16:19:29 +08:00
|
|
|
GEN(8), \
|
2016-11-03 16:39:46 +08:00
|
|
|
.has_logical_ring_contexts = 1, \
|
2019-03-15 06:38:37 +08:00
|
|
|
.ppgtt_type = INTEL_PPGTT_FULL, \
|
2019-03-15 06:38:36 +08:00
|
|
|
.ppgtt_size = 48, \
|
drm/i915: Modify error handler for per engine hang recovery
This is a preparatory patch which modifies error handler to do per engine
hang recovery. The actual patch which implements this sequence follows
later in the series. The aim is to prepare existing recovery function to
adapt to this new function where applicable (which fails at this point
because core implementation is lacking) and continue recovery using legacy
full gpu reset.
A helper function is also added to query the availability of engine
reset. A subsequent patch will add the capability to query which type
of reset is present (engine -> full -> no-reset) via the get-param
ioctl.
It has been decided that the error events that are used to notify user of
reset will only be sent in case if full chip reset. In case of just
single (or multiple) engine resets, userspace won't be notified by these
events.
Note that this implementation of engine reset is for i915 directly
submitting to the ELSP, where the driver manages the hang detection,
recovery and resubmission. With GuC submission these tasks are shared
between driver and firmware; i915 will still responsible for detecting a
hang, and when it does it will have to request GuC to reset that Engine and
remind the firmware about the outstanding submissions. This will be
added in different patch.
v2: rebase, advertise engine reset availability in platform definition,
add note about GuC submission.
v3: s/*engine_reset*/*reset_engine*/. (Chris)
Handle reset as 2 level resets, by first going to engine only and fall
backing to full/chip reset as needed, i.e. reset_engine will need the
struct_mutex.
v4: Pass the engine mask to i915_reset. (Chris)
v5: Rebase, update selftests.
v6: Rebase, prepare for mutex-less reset engine.
v7: Pass reset_engine mask as a function parameter, and iterate over the
engine mask for reset_engine. (Chris)
v8: Use i915.reset >=2 in has_reset_engine; remove redundant reset
logging; add a reset-engine-in-progress flag to prevent concurrent
resets, and avoid dual purposing of reset-backoff. (Chris)
v9: Support reset of different engines in parallel (Chris)
v10: Handle reset-engine flag locking better (Chris)
v11: Squash in reporting of per-engine-reset availability.
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Signed-off-by: Ian Lister <ian.lister@intel.com>
Signed-off-by: Tomas Elf <tomas.elf@intel.com>
Signed-off-by: Arun Siluvery <arun.siluvery@linux.intel.com>
Signed-off-by: Michel Thierry <michel.thierry@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20170615201828.23144-4-michel.thierry@intel.com
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: http://patchwork.freedesktop.org/patch/msgid/20170620095751.13127-5-chris@chris-wilson.co.uk
2017-06-20 17:57:46 +08:00
|
|
|
.has_64bit_reloc = 1, \
|
|
|
|
.has_reset_engine = 1
|
2016-06-24 21:00:26 +08:00
|
|
|
|
2017-06-07 00:06:06 +08:00
|
|
|
#define BDW_PLATFORM \
|
2017-10-03 14:36:51 +08:00
|
|
|
GEN8_FEATURES, \
|
2018-02-15 16:19:30 +08:00
|
|
|
PLATFORM(INTEL_BROADWELL)
|
2017-06-07 00:06:06 +08:00
|
|
|
|
2019-12-24 16:40:03 +08:00
|
|
|
static const struct intel_device_info bdw_gt1_info = {
|
2017-08-31 00:12:05 +08:00
|
|
|
BDW_PLATFORM,
|
|
|
|
.gt = 1,
|
|
|
|
};
|
|
|
|
|
2019-12-24 16:40:03 +08:00
|
|
|
static const struct intel_device_info bdw_gt2_info = {
|
2017-06-07 00:06:06 +08:00
|
|
|
BDW_PLATFORM,
|
2017-08-31 00:12:05 +08:00
|
|
|
.gt = 2,
|
|
|
|
};
|
|
|
|
|
2019-12-24 16:40:03 +08:00
|
|
|
static const struct intel_device_info bdw_rsvd_info = {
|
2017-08-31 00:12:05 +08:00
|
|
|
BDW_PLATFORM,
|
|
|
|
.gt = 3,
|
|
|
|
/* According to the device ID those devices are GT3, they were
|
|
|
|
* previously treated as not GT3, keep it like that.
|
|
|
|
*/
|
2016-06-24 21:00:26 +08:00
|
|
|
};
|
|
|
|
|
2019-12-24 16:40:03 +08:00
|
|
|
static const struct intel_device_info bdw_gt3_info = {
|
2017-06-07 00:06:06 +08:00
|
|
|
BDW_PLATFORM,
|
2017-08-31 00:12:05 +08:00
|
|
|
.gt = 3,
|
2019-03-06 02:03:30 +08:00
|
|
|
.engine_mask =
|
|
|
|
BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS1),
|
2016-06-24 21:00:26 +08:00
|
|
|
};
|
|
|
|
|
2019-12-24 16:40:03 +08:00
|
|
|
static const struct intel_device_info chv_info = {
|
2018-02-15 16:19:30 +08:00
|
|
|
PLATFORM(INTEL_CHERRYVIEW),
|
2018-02-15 16:19:28 +08:00
|
|
|
GEN(8),
|
2019-09-12 04:29:08 +08:00
|
|
|
.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
|
2018-12-01 07:20:48 +08:00
|
|
|
.display.has_hotplug = 1,
|
2016-12-19 05:36:26 +08:00
|
|
|
.is_lp = 1,
|
2019-03-06 02:03:30 +08:00
|
|
|
.engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0),
|
2016-11-03 16:39:46 +08:00
|
|
|
.has_64bit_reloc = 1,
|
2016-08-18 03:30:39 +08:00
|
|
|
.has_runtime_pm = 1,
|
2016-08-18 03:30:44 +08:00
|
|
|
.has_rc6 = 1,
|
2019-04-19 21:48:36 +08:00
|
|
|
.has_rps = true,
|
2016-08-18 03:30:53 +08:00
|
|
|
.has_logical_ring_contexts = 1,
|
2019-02-05 06:25:38 +08:00
|
|
|
.display.has_gmch = 1,
|
2019-08-31 01:59:59 +08:00
|
|
|
.ppgtt_type = INTEL_PPGTT_ALIASING,
|
2019-03-15 06:38:36 +08:00
|
|
|
.ppgtt_size = 32,
|
drm/i915: Modify error handler for per engine hang recovery
This is a preparatory patch which modifies error handler to do per engine
hang recovery. The actual patch which implements this sequence follows
later in the series. The aim is to prepare existing recovery function to
adapt to this new function where applicable (which fails at this point
because core implementation is lacking) and continue recovery using legacy
full gpu reset.
A helper function is also added to query the availability of engine
reset. A subsequent patch will add the capability to query which type
of reset is present (engine -> full -> no-reset) via the get-param
ioctl.
It has been decided that the error events that are used to notify user of
reset will only be sent in case if full chip reset. In case of just
single (or multiple) engine resets, userspace won't be notified by these
events.
Note that this implementation of engine reset is for i915 directly
submitting to the ELSP, where the driver manages the hang detection,
recovery and resubmission. With GuC submission these tasks are shared
between driver and firmware; i915 will still responsible for detecting a
hang, and when it does it will have to request GuC to reset that Engine and
remind the firmware about the outstanding submissions. This will be
added in different patch.
v2: rebase, advertise engine reset availability in platform definition,
add note about GuC submission.
v3: s/*engine_reset*/*reset_engine*/. (Chris)
Handle reset as 2 level resets, by first going to engine only and fall
backing to full/chip reset as needed, i.e. reset_engine will need the
struct_mutex.
v4: Pass the engine mask to i915_reset. (Chris)
v5: Rebase, update selftests.
v6: Rebase, prepare for mutex-less reset engine.
v7: Pass reset_engine mask as a function parameter, and iterate over the
engine mask for reset_engine. (Chris)
v8: Use i915.reset >=2 in has_reset_engine; remove redundant reset
logging; add a reset-engine-in-progress flag to prevent concurrent
resets, and avoid dual purposing of reset-backoff. (Chris)
v9: Support reset of different engines in parallel (Chris)
v10: Handle reset-engine flag locking better (Chris)
v11: Squash in reporting of per-engine-reset availability.
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Signed-off-by: Ian Lister <ian.lister@intel.com>
Signed-off-by: Tomas Elf <tomas.elf@intel.com>
Signed-off-by: Arun Siluvery <arun.siluvery@linux.intel.com>
Signed-off-by: Michel Thierry <michel.thierry@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20170615201828.23144-4-michel.thierry@intel.com
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: http://patchwork.freedesktop.org/patch/msgid/20170620095751.13127-5-chris@chris-wilson.co.uk
2017-06-20 17:57:46 +08:00
|
|
|
.has_reset_engine = 1,
|
2017-09-06 18:56:53 +08:00
|
|
|
.has_snoop = true,
|
2018-07-20 18:19:10 +08:00
|
|
|
.has_coherent_ggtt = false,
|
2016-06-24 21:00:26 +08:00
|
|
|
.display_mmio_offset = VLV_DISPLAY_BASE,
|
2019-03-06 03:29:05 +08:00
|
|
|
CHV_PIPE_OFFSETS,
|
|
|
|
CHV_CURSOR_OFFSETS,
|
2016-06-24 21:00:26 +08:00
|
|
|
CHV_COLORS,
|
2019-03-06 03:29:05 +08:00
|
|
|
GEN_DEFAULT_PAGE_SIZES,
|
2019-10-18 17:07:50 +08:00
|
|
|
GEN_DEFAULT_REGIONS,
|
2016-06-24 21:00:26 +08:00
|
|
|
};
|
|
|
|
|
2017-10-07 06:18:16 +08:00
|
|
|
#define GEN9_DEFAULT_PAGE_SIZES \
|
2017-10-07 06:18:32 +08:00
|
|
|
.page_sizes = I915_GTT_PAGE_SIZE_4K | \
|
2019-08-10 03:34:56 +08:00
|
|
|
I915_GTT_PAGE_SIZE_64K
|
2017-10-07 06:18:16 +08:00
|
|
|
|
2017-10-03 14:36:51 +08:00
|
|
|
#define GEN9_FEATURES \
|
|
|
|
GEN8_FEATURES, \
|
2018-02-15 16:19:29 +08:00
|
|
|
GEN(9), \
|
2017-10-07 06:18:16 +08:00
|
|
|
GEN9_DEFAULT_PAGE_SIZES, \
|
drm/i915/execlists: Preemption!
When we write to ELSP, it triggers a context preemption at the earliest
arbitration point (3DPRIMITIVE, some PIPECONTROLs, a few other
operations and the explicit MI_ARB_CHECK). If this is to the same
context, it triggers a LITE_RESTORE where the RING_TAIL is merely
updated (used currently to chain requests from the same context
together, avoiding bubbles). However, if it is to a different context, a
full context-switch is performed and it will start to execute the new
context saving the image of the old for later execution.
Previously we avoided preemption by only submitting a new context when
the old was idle. But now we wish embrace it, and if the new request has
a higher priority than the currently executing request, we write to the
ELSP regardless, thus triggering preemption, but we tell the GPU to
switch to our special preemption context (not the target). In the
context-switch interrupt handler, we know that the previous contexts
have finished execution and so can unwind all the incomplete requests
and compute the new highest priority request to execute.
It would be feasible to avoid the switch-to-idle intermediate by
programming the ELSP with the target context. The difficulty is in
tracking which request that should be whilst maintaining the dependency
change, the error comes in with coalesced requests. As we only track the
most recent request and its priority, we may run into the issue of being
tricked in preempting a high priority request that was followed by a
low priority request from the same context (e.g. for PI); worse still
that earlier request may be our own dependency and the order then broken
by preemption. By injecting the switch-to-idle and then recomputing the
priority queue, we avoid the issue with tracking in-flight coalesced
requests. Having tried the preempt-to-busy approach, and failed to find
a way around the coalesced priority issue, Michal's original proposal to
inject an idle context (based on handling GuC preemption) succeeds.
The current heuristic for deciding when to preempt are only if the new
request is of higher priority, and has the privileged priority of
greater than 0. Note that the scheduler remains unfair!
v2: Disable for gen8 (bdw/bsw) as we need additional w/a for GPGPU.
Since, the feature is now conditional and not always available when we
have a scheduler, make it known via the HAS_SCHEDULER GETPARAM (now a
capability mask).
v3: Stylistic tweaks.
v4: Appease Joonas with a snippet of kerneldoc, only to fuel to fire of
the preempt vs preempting debate.
Suggested-by: Michal Winiarski <michal.winiarski@intel.com>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Michal Winiarski <michal.winiarski@intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Arkadiusz Hiler <arkadiusz.hiler@intel.com>
Cc: Mika Kuoppala <mika.kuoppala@intel.com>
Cc: Ben Widawsky <benjamin.widawsky@intel.com>
Cc: Zhenyu Wang <zhenyuw@linux.intel.com>
Cc: Zhi Wang <zhi.a.wang@intel.com>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20171003203453.15692-8-chris@chris-wilson.co.uk
2017-10-04 04:34:52 +08:00
|
|
|
.has_logical_ring_preemption = 1, \
|
2018-12-01 07:20:48 +08:00
|
|
|
.display.has_csr = 1, \
|
2019-07-25 08:18:06 +08:00
|
|
|
.has_gt_uc = 1, \
|
2019-10-26 08:13:20 +08:00
|
|
|
.display.has_hdcp = 1, \
|
2018-12-01 07:20:48 +08:00
|
|
|
.display.has_ipc = 1, \
|
drm/i915: Manipulate DBuf slices properly
Start manipulating DBuf slices as a mask,
but not as a total number, as current approach
doesn't give us full control on all combinations
of slices, which we might need(like enabling S2
only can't enabled by setting enabled_slices=1).
Removed wrong code from intel_get_ddb_size as
it doesn't match to BSpec. For now still just
use DBuf slice until proper algorithm is implemented.
Other minor code refactoring to get prepared
for major DBuf assignment changes landed:
- As now enabled slices contain a mask
we still need some value which should
reflect how much DBuf slices are supported
by the platform, now device info contains
num_supported_dbuf_slices.
- Removed unneeded assertion as we are now
manipulating slices in a more proper way.
v2: Start using enabled_slices in dev_priv
v3: "enabled_slices" is now "enabled_dbuf_slices_mask",
as this now sits in dev_priv independently.
v4: - Fixed debug print formatting to hex(Matt Roper)
- Optimized dbuf slice updates to be used only
if slice union is different from current conf(Matt Roper)
- Fixed some functions to be static(Matt Roper)
- Created a parameterized version for DBUF_CTL to
simplify DBuf programming cycle(Matt Roper)
- Removed unrequred field from GEN10_FEATURES(Matt Roper)
v5: - Removed redundant programming dbuf slices helper(Ville Syrjälä)
- Started to use parameterized loop for hw readout to get slices
(Ville Syrjälä)
- Added back assertion checking amount of DBUF slices enabled
after DC states 5/6 transition, also added new assertion
as starting from ICL DMC seems to restore the last DBuf
power state set, rather than power up all dbuf slices
as assertion was previously expecting(Ville Syrjälä)
v6: - Now using enum for DBuf slices in this patch (Ville Syrjälä)
- Removed gen11_assert_dbuf_enabled and put gen9_assert_dbuf_enabled
back, as we really need to have a single unified assert here
however currently enabling always slice 1 is enforced by BSpec,
so we will have to OR enabled slices mask with 1 in order
to be consistent with BSpec, that way we can unify that
assertion and against the actual state from the driver, but
not some hardcoded value.(concluded with Ville)
- Remove parameterized DBUF_CTL version, to extract it to another
patch.(Ville Syrjälä)
v7:
- Removed unneeded hardcoded return value for older gens from
intel_enabled_dbuf_slices_mask - this now is handled in a
unified manner since device info anyway returns max dbuf slices
as 1 for older platforms(Matthew Roper)
- Now using INTEL_INFO(dev_priv)->num_supported_dbuf_slices instead
of intel_dbuf_max_slices function as it is trivial(Matthew Roper)
v8: - Fixed icl_dbuf_disable to disable all dbufs still(Ville Syrjälä)
v9: - Renamed _DBUF_CTL_S to DBUF_CTL_S(Ville Syrjälä)
- Now using power_domain mutex to protect from race condition, which
can occur because intel_dbuf_slices_update might be running in
parallel to gen9_dc_off_power_well_enable being called from
intel_dp_detect for instance, which causes assertion triggered by
race condition, as gen9_assert_dbuf_enabled might preempt this
when registers were already updated, while dev_priv was not.
Reviewed-by: Matt Roper <matthew.d.roper@intel.com>
Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Signed-off-by: Stanislav Lisovskiy <stanislav.lisovskiy@intel.com>
Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200202230630.8975-6-stanislav.lisovskiy@intel.com
2020-02-03 07:06:29 +08:00
|
|
|
.ddb_size = 896, \
|
|
|
|
.num_supported_dbuf_slices = 1
|
2017-06-07 00:06:06 +08:00
|
|
|
|
2017-10-03 14:36:51 +08:00
|
|
|
#define SKL_PLATFORM \
|
|
|
|
GEN9_FEATURES, \
|
2018-02-15 16:19:30 +08:00
|
|
|
PLATFORM(INTEL_SKYLAKE)
|
2017-10-03 14:36:51 +08:00
|
|
|
|
2019-12-24 16:40:03 +08:00
|
|
|
static const struct intel_device_info skl_gt1_info = {
|
2017-06-07 00:06:06 +08:00
|
|
|
SKL_PLATFORM,
|
2017-08-31 00:12:05 +08:00
|
|
|
.gt = 1,
|
2016-06-24 21:00:26 +08:00
|
|
|
};
|
|
|
|
|
2019-12-24 16:40:03 +08:00
|
|
|
static const struct intel_device_info skl_gt2_info = {
|
2017-06-07 00:06:06 +08:00
|
|
|
SKL_PLATFORM,
|
2017-08-31 00:12:05 +08:00
|
|
|
.gt = 2,
|
|
|
|
};
|
|
|
|
|
|
|
|
#define SKL_GT3_PLUS_PLATFORM \
|
|
|
|
SKL_PLATFORM, \
|
2019-03-06 02:03:30 +08:00
|
|
|
.engine_mask = \
|
|
|
|
BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS1)
|
2017-08-31 00:12:05 +08:00
|
|
|
|
|
|
|
|
2019-12-24 16:40:03 +08:00
|
|
|
static const struct intel_device_info skl_gt3_info = {
|
2017-08-31 00:12:05 +08:00
|
|
|
SKL_GT3_PLUS_PLATFORM,
|
|
|
|
.gt = 3,
|
|
|
|
};
|
|
|
|
|
2019-12-24 16:40:03 +08:00
|
|
|
static const struct intel_device_info skl_gt4_info = {
|
2017-08-31 00:12:05 +08:00
|
|
|
SKL_GT3_PLUS_PLATFORM,
|
|
|
|
.gt = 4,
|
2016-06-24 21:00:26 +08:00
|
|
|
};
|
|
|
|
|
2016-12-01 17:33:16 +08:00
|
|
|
#define GEN9_LP_FEATURES \
|
2018-02-15 16:19:28 +08:00
|
|
|
GEN(9), \
|
2016-11-10 23:23:09 +08:00
|
|
|
.is_lp = 1, \
|
drm/i915: Manipulate DBuf slices properly
Start manipulating DBuf slices as a mask,
but not as a total number, as current approach
doesn't give us full control on all combinations
of slices, which we might need(like enabling S2
only can't enabled by setting enabled_slices=1).
Removed wrong code from intel_get_ddb_size as
it doesn't match to BSpec. For now still just
use DBuf slice until proper algorithm is implemented.
Other minor code refactoring to get prepared
for major DBuf assignment changes landed:
- As now enabled slices contain a mask
we still need some value which should
reflect how much DBuf slices are supported
by the platform, now device info contains
num_supported_dbuf_slices.
- Removed unneeded assertion as we are now
manipulating slices in a more proper way.
v2: Start using enabled_slices in dev_priv
v3: "enabled_slices" is now "enabled_dbuf_slices_mask",
as this now sits in dev_priv independently.
v4: - Fixed debug print formatting to hex(Matt Roper)
- Optimized dbuf slice updates to be used only
if slice union is different from current conf(Matt Roper)
- Fixed some functions to be static(Matt Roper)
- Created a parameterized version for DBUF_CTL to
simplify DBuf programming cycle(Matt Roper)
- Removed unrequred field from GEN10_FEATURES(Matt Roper)
v5: - Removed redundant programming dbuf slices helper(Ville Syrjälä)
- Started to use parameterized loop for hw readout to get slices
(Ville Syrjälä)
- Added back assertion checking amount of DBUF slices enabled
after DC states 5/6 transition, also added new assertion
as starting from ICL DMC seems to restore the last DBuf
power state set, rather than power up all dbuf slices
as assertion was previously expecting(Ville Syrjälä)
v6: - Now using enum for DBuf slices in this patch (Ville Syrjälä)
- Removed gen11_assert_dbuf_enabled and put gen9_assert_dbuf_enabled
back, as we really need to have a single unified assert here
however currently enabling always slice 1 is enforced by BSpec,
so we will have to OR enabled slices mask with 1 in order
to be consistent with BSpec, that way we can unify that
assertion and against the actual state from the driver, but
not some hardcoded value.(concluded with Ville)
- Remove parameterized DBUF_CTL version, to extract it to another
patch.(Ville Syrjälä)
v7:
- Removed unneeded hardcoded return value for older gens from
intel_enabled_dbuf_slices_mask - this now is handled in a
unified manner since device info anyway returns max dbuf slices
as 1 for older platforms(Matthew Roper)
- Now using INTEL_INFO(dev_priv)->num_supported_dbuf_slices instead
of intel_dbuf_max_slices function as it is trivial(Matthew Roper)
v8: - Fixed icl_dbuf_disable to disable all dbufs still(Ville Syrjälä)
v9: - Renamed _DBUF_CTL_S to DBUF_CTL_S(Ville Syrjälä)
- Now using power_domain mutex to protect from race condition, which
can occur because intel_dbuf_slices_update might be running in
parallel to gen9_dc_off_power_well_enable being called from
intel_dp_detect for instance, which causes assertion triggered by
race condition, as gen9_assert_dbuf_enabled might preempt this
when registers were already updated, while dev_priv was not.
Reviewed-by: Matt Roper <matthew.d.roper@intel.com>
Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Signed-off-by: Stanislav Lisovskiy <stanislav.lisovskiy@intel.com>
Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200202230630.8975-6-stanislav.lisovskiy@intel.com
2020-02-03 07:06:29 +08:00
|
|
|
.num_supported_dbuf_slices = 1, \
|
2018-12-01 07:20:48 +08:00
|
|
|
.display.has_hotplug = 1, \
|
2019-03-06 02:03:30 +08:00
|
|
|
.engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \
|
2019-09-12 04:29:08 +08:00
|
|
|
.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), \
|
2016-12-01 17:33:16 +08:00
|
|
|
.has_64bit_reloc = 1, \
|
2018-12-01 07:20:48 +08:00
|
|
|
.display.has_ddi = 1, \
|
2016-12-01 17:33:16 +08:00
|
|
|
.has_fpga_dbg = 1, \
|
2018-12-01 07:20:48 +08:00
|
|
|
.display.has_fbc = 1, \
|
2019-10-26 08:13:20 +08:00
|
|
|
.display.has_hdcp = 1, \
|
2018-12-01 07:20:48 +08:00
|
|
|
.display.has_psr = 1, \
|
2016-12-01 17:33:16 +08:00
|
|
|
.has_runtime_pm = 1, \
|
2018-12-01 07:20:48 +08:00
|
|
|
.display.has_csr = 1, \
|
2016-12-01 17:33:16 +08:00
|
|
|
.has_rc6 = 1, \
|
2019-04-19 21:48:36 +08:00
|
|
|
.has_rps = true, \
|
2018-12-01 07:20:48 +08:00
|
|
|
.display.has_dp_mst = 1, \
|
2016-12-01 17:33:16 +08:00
|
|
|
.has_logical_ring_contexts = 1, \
|
drm/i915/execlists: Preemption!
When we write to ELSP, it triggers a context preemption at the earliest
arbitration point (3DPRIMITIVE, some PIPECONTROLs, a few other
operations and the explicit MI_ARB_CHECK). If this is to the same
context, it triggers a LITE_RESTORE where the RING_TAIL is merely
updated (used currently to chain requests from the same context
together, avoiding bubbles). However, if it is to a different context, a
full context-switch is performed and it will start to execute the new
context saving the image of the old for later execution.
Previously we avoided preemption by only submitting a new context when
the old was idle. But now we wish embrace it, and if the new request has
a higher priority than the currently executing request, we write to the
ELSP regardless, thus triggering preemption, but we tell the GPU to
switch to our special preemption context (not the target). In the
context-switch interrupt handler, we know that the previous contexts
have finished execution and so can unwind all the incomplete requests
and compute the new highest priority request to execute.
It would be feasible to avoid the switch-to-idle intermediate by
programming the ELSP with the target context. The difficulty is in
tracking which request that should be whilst maintaining the dependency
change, the error comes in with coalesced requests. As we only track the
most recent request and its priority, we may run into the issue of being
tricked in preempting a high priority request that was followed by a
low priority request from the same context (e.g. for PI); worse still
that earlier request may be our own dependency and the order then broken
by preemption. By injecting the switch-to-idle and then recomputing the
priority queue, we avoid the issue with tracking in-flight coalesced
requests. Having tried the preempt-to-busy approach, and failed to find
a way around the coalesced priority issue, Michal's original proposal to
inject an idle context (based on handling GuC preemption) succeeds.
The current heuristic for deciding when to preempt are only if the new
request is of higher priority, and has the privileged priority of
greater than 0. Note that the scheduler remains unfair!
v2: Disable for gen8 (bdw/bsw) as we need additional w/a for GPGPU.
Since, the feature is now conditional and not always available when we
have a scheduler, make it known via the HAS_SCHEDULER GETPARAM (now a
capability mask).
v3: Stylistic tweaks.
v4: Appease Joonas with a snippet of kerneldoc, only to fuel to fire of
the preempt vs preempting debate.
Suggested-by: Michal Winiarski <michal.winiarski@intel.com>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Michal Winiarski <michal.winiarski@intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Arkadiusz Hiler <arkadiusz.hiler@intel.com>
Cc: Mika Kuoppala <mika.kuoppala@intel.com>
Cc: Ben Widawsky <benjamin.widawsky@intel.com>
Cc: Zhenyu Wang <zhenyuw@linux.intel.com>
Cc: Zhi Wang <zhi.a.wang@intel.com>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20171003203453.15692-8-chris@chris-wilson.co.uk
2017-10-04 04:34:52 +08:00
|
|
|
.has_logical_ring_preemption = 1, \
|
2019-07-25 08:18:06 +08:00
|
|
|
.has_gt_uc = 1, \
|
2019-03-15 06:38:37 +08:00
|
|
|
.ppgtt_type = INTEL_PPGTT_FULL, \
|
2019-03-15 06:38:36 +08:00
|
|
|
.ppgtt_size = 48, \
|
drm/i915: Modify error handler for per engine hang recovery
This is a preparatory patch which modifies error handler to do per engine
hang recovery. The actual patch which implements this sequence follows
later in the series. The aim is to prepare existing recovery function to
adapt to this new function where applicable (which fails at this point
because core implementation is lacking) and continue recovery using legacy
full gpu reset.
A helper function is also added to query the availability of engine
reset. A subsequent patch will add the capability to query which type
of reset is present (engine -> full -> no-reset) via the get-param
ioctl.
It has been decided that the error events that are used to notify user of
reset will only be sent in case if full chip reset. In case of just
single (or multiple) engine resets, userspace won't be notified by these
events.
Note that this implementation of engine reset is for i915 directly
submitting to the ELSP, where the driver manages the hang detection,
recovery and resubmission. With GuC submission these tasks are shared
between driver and firmware; i915 will still responsible for detecting a
hang, and when it does it will have to request GuC to reset that Engine and
remind the firmware about the outstanding submissions. This will be
added in different patch.
v2: rebase, advertise engine reset availability in platform definition,
add note about GuC submission.
v3: s/*engine_reset*/*reset_engine*/. (Chris)
Handle reset as 2 level resets, by first going to engine only and fall
backing to full/chip reset as needed, i.e. reset_engine will need the
struct_mutex.
v4: Pass the engine mask to i915_reset. (Chris)
v5: Rebase, update selftests.
v6: Rebase, prepare for mutex-less reset engine.
v7: Pass reset_engine mask as a function parameter, and iterate over the
engine mask for reset_engine. (Chris)
v8: Use i915.reset >=2 in has_reset_engine; remove redundant reset
logging; add a reset-engine-in-progress flag to prevent concurrent
resets, and avoid dual purposing of reset-backoff. (Chris)
v9: Support reset of different engines in parallel (Chris)
v10: Handle reset-engine flag locking better (Chris)
v11: Squash in reporting of per-engine-reset availability.
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Signed-off-by: Ian Lister <ian.lister@intel.com>
Signed-off-by: Tomas Elf <tomas.elf@intel.com>
Signed-off-by: Arun Siluvery <arun.siluvery@linux.intel.com>
Signed-off-by: Michel Thierry <michel.thierry@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20170615201828.23144-4-michel.thierry@intel.com
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: http://patchwork.freedesktop.org/patch/msgid/20170620095751.13127-5-chris@chris-wilson.co.uk
2017-06-20 17:57:46 +08:00
|
|
|
.has_reset_engine = 1, \
|
2017-09-06 18:56:53 +08:00
|
|
|
.has_snoop = true, \
|
2018-07-20 18:19:10 +08:00
|
|
|
.has_coherent_ggtt = false, \
|
2018-12-01 07:20:48 +08:00
|
|
|
.display.has_ipc = 1, \
|
2019-03-06 03:29:05 +08:00
|
|
|
HSW_PIPE_OFFSETS, \
|
2016-12-01 17:33:16 +08:00
|
|
|
IVB_CURSOR_OFFSETS, \
|
2019-04-02 04:02:27 +08:00
|
|
|
IVB_COLORS, \
|
2019-10-18 17:07:50 +08:00
|
|
|
GEN9_DEFAULT_PAGE_SIZES, \
|
|
|
|
GEN_DEFAULT_REGIONS
|
2016-12-01 17:33:16 +08:00
|
|
|
|
2019-12-24 16:40:03 +08:00
|
|
|
static const struct intel_device_info bxt_info = {
|
2016-12-01 17:33:16 +08:00
|
|
|
GEN9_LP_FEATURES,
|
2018-02-15 16:19:30 +08:00
|
|
|
PLATFORM(INTEL_BROXTON),
|
2016-09-15 17:31:10 +08:00
|
|
|
.ddb_size = 512,
|
2016-06-24 21:00:26 +08:00
|
|
|
};
|
|
|
|
|
2019-12-24 16:40:03 +08:00
|
|
|
static const struct intel_device_info glk_info = {
|
2016-11-14 22:25:26 +08:00
|
|
|
GEN9_LP_FEATURES,
|
2018-02-15 16:19:30 +08:00
|
|
|
PLATFORM(INTEL_GEMINILAKE),
|
2016-11-14 22:25:26 +08:00
|
|
|
.ddb_size = 1024,
|
2017-10-03 14:36:52 +08:00
|
|
|
GLK_COLORS,
|
2016-11-14 22:25:26 +08:00
|
|
|
};
|
|
|
|
|
2017-06-07 00:06:06 +08:00
|
|
|
#define KBL_PLATFORM \
|
2017-10-03 14:36:51 +08:00
|
|
|
GEN9_FEATURES, \
|
2018-02-15 16:19:30 +08:00
|
|
|
PLATFORM(INTEL_KABYLAKE)
|
2017-06-07 00:06:06 +08:00
|
|
|
|
2019-12-24 16:40:03 +08:00
|
|
|
static const struct intel_device_info kbl_gt1_info = {
|
2017-06-07 00:06:06 +08:00
|
|
|
KBL_PLATFORM,
|
2017-08-31 00:12:05 +08:00
|
|
|
.gt = 1,
|
|
|
|
};
|
|
|
|
|
2019-12-24 16:40:03 +08:00
|
|
|
static const struct intel_device_info kbl_gt2_info = {
|
2017-08-31 00:12:05 +08:00
|
|
|
KBL_PLATFORM,
|
|
|
|
.gt = 2,
|
2016-06-24 21:00:26 +08:00
|
|
|
};
|
|
|
|
|
2019-12-24 16:40:03 +08:00
|
|
|
static const struct intel_device_info kbl_gt3_info = {
|
2017-06-07 00:06:06 +08:00
|
|
|
KBL_PLATFORM,
|
2017-08-31 00:12:05 +08:00
|
|
|
.gt = 3,
|
2019-03-06 02:03:30 +08:00
|
|
|
.engine_mask =
|
|
|
|
BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS1),
|
2016-06-24 21:00:26 +08:00
|
|
|
};
|
|
|
|
|
2017-06-08 23:49:58 +08:00
|
|
|
#define CFL_PLATFORM \
|
2017-10-03 14:36:51 +08:00
|
|
|
GEN9_FEATURES, \
|
2018-02-15 16:19:30 +08:00
|
|
|
PLATFORM(INTEL_COFFEELAKE)
|
2017-06-08 23:49:58 +08:00
|
|
|
|
2019-12-24 16:40:03 +08:00
|
|
|
static const struct intel_device_info cfl_gt1_info = {
|
2017-08-31 00:12:05 +08:00
|
|
|
CFL_PLATFORM,
|
|
|
|
.gt = 1,
|
|
|
|
};
|
|
|
|
|
2019-12-24 16:40:03 +08:00
|
|
|
static const struct intel_device_info cfl_gt2_info = {
|
2017-06-08 23:49:58 +08:00
|
|
|
CFL_PLATFORM,
|
2017-08-31 00:12:05 +08:00
|
|
|
.gt = 2,
|
2017-06-08 23:49:58 +08:00
|
|
|
};
|
|
|
|
|
2019-12-24 16:40:03 +08:00
|
|
|
static const struct intel_device_info cfl_gt3_info = {
|
2017-06-08 23:49:58 +08:00
|
|
|
CFL_PLATFORM,
|
2017-08-31 00:12:05 +08:00
|
|
|
.gt = 3,
|
2019-03-06 02:03:30 +08:00
|
|
|
.engine_mask =
|
|
|
|
BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS1),
|
2017-06-08 23:49:58 +08:00
|
|
|
};
|
|
|
|
|
2017-10-03 14:36:51 +08:00
|
|
|
#define GEN10_FEATURES \
|
|
|
|
GEN9_FEATURES, \
|
2018-02-15 16:19:29 +08:00
|
|
|
GEN(10), \
|
2017-10-03 14:36:51 +08:00
|
|
|
.ddb_size = 1024, \
|
2019-10-26 08:13:23 +08:00
|
|
|
.display.has_dsc = 1, \
|
2018-08-01 18:47:21 +08:00
|
|
|
.has_coherent_ggtt = false, \
|
2017-10-03 14:36:52 +08:00
|
|
|
GLK_COLORS
|
2017-10-03 14:36:51 +08:00
|
|
|
|
2019-12-24 16:40:03 +08:00
|
|
|
static const struct intel_device_info cnl_info = {
|
2017-10-03 14:36:51 +08:00
|
|
|
GEN10_FEATURES,
|
2018-02-15 16:19:30 +08:00
|
|
|
PLATFORM(INTEL_CANNONLAKE),
|
2017-08-31 00:12:05 +08:00
|
|
|
.gt = 2,
|
2017-06-07 04:30:30 +08:00
|
|
|
};
|
|
|
|
|
2019-08-10 03:34:56 +08:00
|
|
|
#define GEN11_DEFAULT_PAGE_SIZES \
|
|
|
|
.page_sizes = I915_GTT_PAGE_SIZE_4K | \
|
|
|
|
I915_GTT_PAGE_SIZE_64K | \
|
|
|
|
I915_GTT_PAGE_SIZE_2M
|
|
|
|
|
2018-01-12 02:00:04 +08:00
|
|
|
#define GEN11_FEATURES \
|
|
|
|
GEN10_FEATURES, \
|
2019-08-10 03:34:56 +08:00
|
|
|
GEN11_DEFAULT_PAGE_SIZES, \
|
2018-11-20 17:23:23 +08:00
|
|
|
.pipe_offsets = { \
|
|
|
|
[TRANSCODER_A] = PIPE_A_OFFSET, \
|
|
|
|
[TRANSCODER_B] = PIPE_B_OFFSET, \
|
|
|
|
[TRANSCODER_C] = PIPE_C_OFFSET, \
|
|
|
|
[TRANSCODER_EDP] = PIPE_EDP_OFFSET, \
|
|
|
|
[TRANSCODER_DSI_0] = PIPE_DSI0_OFFSET, \
|
|
|
|
[TRANSCODER_DSI_1] = PIPE_DSI1_OFFSET, \
|
|
|
|
}, \
|
|
|
|
.trans_offsets = { \
|
|
|
|
[TRANSCODER_A] = TRANSCODER_A_OFFSET, \
|
|
|
|
[TRANSCODER_B] = TRANSCODER_B_OFFSET, \
|
|
|
|
[TRANSCODER_C] = TRANSCODER_C_OFFSET, \
|
|
|
|
[TRANSCODER_EDP] = TRANSCODER_EDP_OFFSET, \
|
|
|
|
[TRANSCODER_DSI_0] = TRANSCODER_DSI0_OFFSET, \
|
|
|
|
[TRANSCODER_DSI_1] = TRANSCODER_DSI1_OFFSET, \
|
|
|
|
}, \
|
2018-02-15 16:19:28 +08:00
|
|
|
GEN(11), \
|
2018-01-12 02:00:04 +08:00
|
|
|
.ddb_size = 2048, \
|
drm/i915: Manipulate DBuf slices properly
Start manipulating DBuf slices as a mask,
but not as a total number, as current approach
doesn't give us full control on all combinations
of slices, which we might need(like enabling S2
only can't enabled by setting enabled_slices=1).
Removed wrong code from intel_get_ddb_size as
it doesn't match to BSpec. For now still just
use DBuf slice until proper algorithm is implemented.
Other minor code refactoring to get prepared
for major DBuf assignment changes landed:
- As now enabled slices contain a mask
we still need some value which should
reflect how much DBuf slices are supported
by the platform, now device info contains
num_supported_dbuf_slices.
- Removed unneeded assertion as we are now
manipulating slices in a more proper way.
v2: Start using enabled_slices in dev_priv
v3: "enabled_slices" is now "enabled_dbuf_slices_mask",
as this now sits in dev_priv independently.
v4: - Fixed debug print formatting to hex(Matt Roper)
- Optimized dbuf slice updates to be used only
if slice union is different from current conf(Matt Roper)
- Fixed some functions to be static(Matt Roper)
- Created a parameterized version for DBUF_CTL to
simplify DBuf programming cycle(Matt Roper)
- Removed unrequred field from GEN10_FEATURES(Matt Roper)
v5: - Removed redundant programming dbuf slices helper(Ville Syrjälä)
- Started to use parameterized loop for hw readout to get slices
(Ville Syrjälä)
- Added back assertion checking amount of DBUF slices enabled
after DC states 5/6 transition, also added new assertion
as starting from ICL DMC seems to restore the last DBuf
power state set, rather than power up all dbuf slices
as assertion was previously expecting(Ville Syrjälä)
v6: - Now using enum for DBuf slices in this patch (Ville Syrjälä)
- Removed gen11_assert_dbuf_enabled and put gen9_assert_dbuf_enabled
back, as we really need to have a single unified assert here
however currently enabling always slice 1 is enforced by BSpec,
so we will have to OR enabled slices mask with 1 in order
to be consistent with BSpec, that way we can unify that
assertion and against the actual state from the driver, but
not some hardcoded value.(concluded with Ville)
- Remove parameterized DBUF_CTL version, to extract it to another
patch.(Ville Syrjälä)
v7:
- Removed unneeded hardcoded return value for older gens from
intel_enabled_dbuf_slices_mask - this now is handled in a
unified manner since device info anyway returns max dbuf slices
as 1 for older platforms(Matthew Roper)
- Now using INTEL_INFO(dev_priv)->num_supported_dbuf_slices instead
of intel_dbuf_max_slices function as it is trivial(Matthew Roper)
v8: - Fixed icl_dbuf_disable to disable all dbufs still(Ville Syrjälä)
v9: - Renamed _DBUF_CTL_S to DBUF_CTL_S(Ville Syrjälä)
- Now using power_domain mutex to protect from race condition, which
can occur because intel_dbuf_slices_update might be running in
parallel to gen9_dc_off_power_well_enable being called from
intel_dp_detect for instance, which causes assertion triggered by
race condition, as gen9_assert_dbuf_enabled might preempt this
when registers were already updated, while dev_priv was not.
Reviewed-by: Matt Roper <matthew.d.roper@intel.com>
Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Signed-off-by: Stanislav Lisovskiy <stanislav.lisovskiy@intel.com>
Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200202230630.8975-6-stanislav.lisovskiy@intel.com
2020-02-03 07:06:29 +08:00
|
|
|
.num_supported_dbuf_slices = 2, \
|
2019-02-11 21:50:25 +08:00
|
|
|
.has_logical_ring_elsq = 1, \
|
2019-06-12 14:45:00 +08:00
|
|
|
.color = { .degamma_lut_size = 33, .gamma_lut_size = 262145 }
|
2018-01-12 02:00:04 +08:00
|
|
|
|
2019-12-24 16:40:03 +08:00
|
|
|
static const struct intel_device_info icl_info = {
|
2018-01-12 02:00:04 +08:00
|
|
|
GEN11_FEATURES,
|
2018-02-15 16:19:30 +08:00
|
|
|
PLATFORM(INTEL_ICELAKE),
|
2019-03-06 02:03:30 +08:00
|
|
|
.engine_mask =
|
|
|
|
BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2),
|
2018-01-12 02:00:04 +08:00
|
|
|
};
|
|
|
|
|
2019-12-24 16:40:03 +08:00
|
|
|
static const struct intel_device_info ehl_info = {
|
2019-03-23 01:58:42 +08:00
|
|
|
GEN11_FEATURES,
|
2019-03-23 01:58:43 +08:00
|
|
|
PLATFORM(INTEL_ELKHARTLAKE),
|
2019-05-06 21:48:01 +08:00
|
|
|
.require_force_probe = 1,
|
2019-06-15 05:37:49 +08:00
|
|
|
.engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VCS0) | BIT(VECS0),
|
2019-03-23 01:58:42 +08:00
|
|
|
.ppgtt_size = 36,
|
|
|
|
};
|
|
|
|
|
2019-07-12 01:30:56 +08:00
|
|
|
#define GEN12_FEATURES \
|
|
|
|
GEN11_FEATURES, \
|
|
|
|
GEN(12), \
|
|
|
|
.pipe_offsets = { \
|
|
|
|
[TRANSCODER_A] = PIPE_A_OFFSET, \
|
|
|
|
[TRANSCODER_B] = PIPE_B_OFFSET, \
|
|
|
|
[TRANSCODER_C] = PIPE_C_OFFSET, \
|
|
|
|
[TRANSCODER_D] = PIPE_D_OFFSET, \
|
|
|
|
[TRANSCODER_DSI_0] = PIPE_DSI0_OFFSET, \
|
|
|
|
[TRANSCODER_DSI_1] = PIPE_DSI1_OFFSET, \
|
|
|
|
}, \
|
|
|
|
.trans_offsets = { \
|
|
|
|
[TRANSCODER_A] = TRANSCODER_A_OFFSET, \
|
|
|
|
[TRANSCODER_B] = TRANSCODER_B_OFFSET, \
|
|
|
|
[TRANSCODER_C] = TRANSCODER_C_OFFSET, \
|
|
|
|
[TRANSCODER_D] = TRANSCODER_D_OFFSET, \
|
|
|
|
[TRANSCODER_DSI_0] = TRANSCODER_DSI0_OFFSET, \
|
|
|
|
[TRANSCODER_DSI_1] = TRANSCODER_DSI1_OFFSET, \
|
2019-07-31 02:04:06 +08:00
|
|
|
}, \
|
2019-09-24 15:31:52 +08:00
|
|
|
TGL_CURSOR_OFFSETS, \
|
2019-09-20 19:59:29 +08:00
|
|
|
.has_global_mocs = 1, \
|
|
|
|
.display.has_dsb = 1
|
2019-07-12 01:30:56 +08:00
|
|
|
|
2019-12-24 16:40:03 +08:00
|
|
|
static const struct intel_device_info tgl_info = {
|
2019-07-12 01:30:56 +08:00
|
|
|
GEN12_FEATURES,
|
|
|
|
PLATFORM(INTEL_TIGERLAKE),
|
2019-09-12 04:29:08 +08:00
|
|
|
.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
|
2019-07-12 01:30:56 +08:00
|
|
|
.require_force_probe = 1,
|
2019-07-12 13:57:06 +08:00
|
|
|
.display.has_modular_fia = 1,
|
2019-07-12 01:30:56 +08:00
|
|
|
.engine_mask =
|
|
|
|
BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2),
|
|
|
|
};
|
|
|
|
|
2019-10-25 03:51:20 +08:00
|
|
|
#define GEN12_DGFX_FEATURES \
|
|
|
|
GEN12_FEATURES, \
|
|
|
|
.is_dgfx = 1
|
|
|
|
|
2018-02-15 16:19:28 +08:00
|
|
|
#undef GEN
|
2018-02-15 16:19:30 +08:00
|
|
|
#undef PLATFORM
|
2018-02-15 16:19:28 +08:00
|
|
|
|
2016-06-24 21:00:26 +08:00
|
|
|
/*
|
|
|
|
* Make sure any device matches here are from most specific to most
|
|
|
|
* general. For example, since the Quanta match is based on the subsystem
|
|
|
|
* and subvendor IDs, we need it to come before the more general IVB
|
|
|
|
* PCI ID matches, otherwise we'll use the wrong info struct above.
|
|
|
|
*/
|
|
|
|
static const struct pci_device_id pciidlist[] = {
|
2019-12-24 16:40:03 +08:00
|
|
|
INTEL_I830_IDS(&i830_info),
|
|
|
|
INTEL_I845G_IDS(&i845g_info),
|
|
|
|
INTEL_I85X_IDS(&i85x_info),
|
|
|
|
INTEL_I865G_IDS(&i865g_info),
|
|
|
|
INTEL_I915G_IDS(&i915g_info),
|
|
|
|
INTEL_I915GM_IDS(&i915gm_info),
|
|
|
|
INTEL_I945G_IDS(&i945g_info),
|
|
|
|
INTEL_I945GM_IDS(&i945gm_info),
|
|
|
|
INTEL_I965G_IDS(&i965g_info),
|
|
|
|
INTEL_G33_IDS(&g33_info),
|
|
|
|
INTEL_I965GM_IDS(&i965gm_info),
|
|
|
|
INTEL_GM45_IDS(&gm45_info),
|
|
|
|
INTEL_G45_IDS(&g45_info),
|
|
|
|
INTEL_PINEVIEW_G_IDS(&pnv_g_info),
|
|
|
|
INTEL_PINEVIEW_M_IDS(&pnv_m_info),
|
|
|
|
INTEL_IRONLAKE_D_IDS(&ilk_d_info),
|
|
|
|
INTEL_IRONLAKE_M_IDS(&ilk_m_info),
|
|
|
|
INTEL_SNB_D_GT1_IDS(&snb_d_gt1_info),
|
|
|
|
INTEL_SNB_D_GT2_IDS(&snb_d_gt2_info),
|
|
|
|
INTEL_SNB_M_GT1_IDS(&snb_m_gt1_info),
|
|
|
|
INTEL_SNB_M_GT2_IDS(&snb_m_gt2_info),
|
|
|
|
INTEL_IVB_Q_IDS(&ivb_q_info), /* must be first IVB */
|
|
|
|
INTEL_IVB_M_GT1_IDS(&ivb_m_gt1_info),
|
|
|
|
INTEL_IVB_M_GT2_IDS(&ivb_m_gt2_info),
|
|
|
|
INTEL_IVB_D_GT1_IDS(&ivb_d_gt1_info),
|
|
|
|
INTEL_IVB_D_GT2_IDS(&ivb_d_gt2_info),
|
|
|
|
INTEL_HSW_GT1_IDS(&hsw_gt1_info),
|
|
|
|
INTEL_HSW_GT2_IDS(&hsw_gt2_info),
|
|
|
|
INTEL_HSW_GT3_IDS(&hsw_gt3_info),
|
|
|
|
INTEL_VLV_IDS(&vlv_info),
|
|
|
|
INTEL_BDW_GT1_IDS(&bdw_gt1_info),
|
|
|
|
INTEL_BDW_GT2_IDS(&bdw_gt2_info),
|
|
|
|
INTEL_BDW_GT3_IDS(&bdw_gt3_info),
|
|
|
|
INTEL_BDW_RSVD_IDS(&bdw_rsvd_info),
|
|
|
|
INTEL_CHV_IDS(&chv_info),
|
|
|
|
INTEL_SKL_GT1_IDS(&skl_gt1_info),
|
|
|
|
INTEL_SKL_GT2_IDS(&skl_gt2_info),
|
|
|
|
INTEL_SKL_GT3_IDS(&skl_gt3_info),
|
|
|
|
INTEL_SKL_GT4_IDS(&skl_gt4_info),
|
|
|
|
INTEL_BXT_IDS(&bxt_info),
|
|
|
|
INTEL_GLK_IDS(&glk_info),
|
|
|
|
INTEL_KBL_GT1_IDS(&kbl_gt1_info),
|
|
|
|
INTEL_KBL_GT2_IDS(&kbl_gt2_info),
|
|
|
|
INTEL_KBL_GT3_IDS(&kbl_gt3_info),
|
|
|
|
INTEL_KBL_GT4_IDS(&kbl_gt3_info),
|
|
|
|
INTEL_AML_KBL_GT2_IDS(&kbl_gt2_info),
|
|
|
|
INTEL_CFL_S_GT1_IDS(&cfl_gt1_info),
|
|
|
|
INTEL_CFL_S_GT2_IDS(&cfl_gt2_info),
|
|
|
|
INTEL_CFL_H_GT1_IDS(&cfl_gt1_info),
|
|
|
|
INTEL_CFL_H_GT2_IDS(&cfl_gt2_info),
|
|
|
|
INTEL_CFL_U_GT2_IDS(&cfl_gt2_info),
|
|
|
|
INTEL_CFL_U_GT3_IDS(&cfl_gt3_info),
|
|
|
|
INTEL_WHL_U_GT1_IDS(&cfl_gt1_info),
|
|
|
|
INTEL_WHL_U_GT2_IDS(&cfl_gt2_info),
|
|
|
|
INTEL_AML_CFL_GT2_IDS(&cfl_gt2_info),
|
|
|
|
INTEL_WHL_U_GT3_IDS(&cfl_gt3_info),
|
|
|
|
INTEL_CML_GT1_IDS(&cfl_gt1_info),
|
|
|
|
INTEL_CML_GT2_IDS(&cfl_gt2_info),
|
|
|
|
INTEL_CML_U_GT1_IDS(&cfl_gt1_info),
|
|
|
|
INTEL_CML_U_GT2_IDS(&cfl_gt2_info),
|
|
|
|
INTEL_CNL_IDS(&cnl_info),
|
|
|
|
INTEL_ICL_11_IDS(&icl_info),
|
|
|
|
INTEL_EHL_IDS(&ehl_info),
|
|
|
|
INTEL_TGL_12_IDS(&tgl_info),
|
2016-06-24 21:00:26 +08:00
|
|
|
{0, 0, 0}
|
|
|
|
};
|
|
|
|
MODULE_DEVICE_TABLE(pci, pciidlist);
|
|
|
|
|
2017-02-14 01:15:12 +08:00
|
|
|
static void i915_pci_remove(struct pci_dev *pdev)
|
|
|
|
{
|
2019-08-06 15:42:19 +08:00
|
|
|
struct drm_i915_private *i915;
|
2018-07-16 16:03:31 +08:00
|
|
|
|
2019-08-06 15:42:19 +08:00
|
|
|
i915 = pci_get_drvdata(pdev);
|
|
|
|
if (!i915) /* driver load aborted, nothing to cleanup */
|
2018-07-16 16:03:31 +08:00
|
|
|
return;
|
2017-02-14 01:15:12 +08:00
|
|
|
|
2019-08-06 15:42:19 +08:00
|
|
|
i915_driver_remove(i915);
|
2018-07-16 16:03:31 +08:00
|
|
|
pci_set_drvdata(pdev, NULL);
|
2019-08-06 15:42:19 +08:00
|
|
|
|
|
|
|
drm_dev_put(&i915->drm);
|
2017-02-14 01:15:12 +08:00
|
|
|
}
|
|
|
|
|
2019-05-06 21:48:01 +08:00
|
|
|
/* is device_id present in comma separated list of ids */
|
|
|
|
static bool force_probe(u16 device_id, const char *devices)
|
|
|
|
{
|
|
|
|
char *s, *p, *tok;
|
|
|
|
bool ret;
|
|
|
|
|
|
|
|
if (!devices || !*devices)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
/* match everything */
|
|
|
|
if (strcmp(devices, "*") == 0)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
s = kstrdup(devices, GFP_KERNEL);
|
|
|
|
if (!s)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
for (p = s, ret = false; (tok = strsep(&p, ",")) != NULL; ) {
|
|
|
|
u16 val;
|
|
|
|
|
|
|
|
if (kstrtou16(tok, 16, &val) == 0 && val == device_id) {
|
|
|
|
ret = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
kfree(s);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2016-06-24 21:00:26 +08:00
|
|
|
static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|
|
|
{
|
|
|
|
struct intel_device_info *intel_info =
|
|
|
|
(struct intel_device_info *) ent->driver_data;
|
2017-02-14 01:15:12 +08:00
|
|
|
int err;
|
2016-06-24 21:00:26 +08:00
|
|
|
|
2019-05-06 21:48:01 +08:00
|
|
|
if (intel_info->require_force_probe &&
|
|
|
|
!force_probe(pdev->device, i915_modparams.force_probe)) {
|
2020-02-07 21:50:48 +08:00
|
|
|
dev_info(&pdev->dev,
|
2020-01-31 17:34:16 +08:00
|
|
|
"Your graphics device %04x is not properly supported by the driver in this\n"
|
2019-05-06 21:48:01 +08:00
|
|
|
"kernel version. To force driver probe anyway, use i915.force_probe=%04x\n"
|
|
|
|
"module parameter or CONFIG_DRM_I915_FORCE_PROBE=%04x configuration option,\n"
|
|
|
|
"or (recommended) check for kernel updates.\n",
|
|
|
|
pdev->device, pdev->device, pdev->device);
|
2016-06-24 21:00:26 +08:00
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Only bind to function 0 of the device. Early generations
|
|
|
|
* used function 1 as a placeholder for multi-head. This causes
|
|
|
|
* us confusion instead, especially on the systems where both
|
|
|
|
* functions have the same PCI-ID!
|
|
|
|
*/
|
|
|
|
if (PCI_FUNC(pdev->devfn))
|
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* apple-gmux is needed on dual GPU MacBook Pro
|
|
|
|
* to probe the panel if we're the inactive GPU.
|
|
|
|
*/
|
|
|
|
if (vga_switcheroo_client_probe_defer(pdev))
|
|
|
|
return -EPROBE_DEFER;
|
|
|
|
|
2019-07-12 19:24:26 +08:00
|
|
|
err = i915_driver_probe(pdev, ent);
|
2017-02-14 01:15:12 +08:00
|
|
|
if (err)
|
|
|
|
return err;
|
2016-06-24 21:00:26 +08:00
|
|
|
|
2019-08-06 15:42:19 +08:00
|
|
|
if (i915_inject_probe_failure(pci_get_drvdata(pdev))) {
|
2018-07-16 16:03:31 +08:00
|
|
|
i915_pci_remove(pdev);
|
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
|
2017-02-14 01:15:12 +08:00
|
|
|
err = i915_live_selftests(pdev);
|
|
|
|
if (err) {
|
|
|
|
i915_pci_remove(pdev);
|
|
|
|
return err > 0 ? -ENOTTY : err;
|
|
|
|
}
|
2016-06-24 21:00:26 +08:00
|
|
|
|
2019-11-12 01:27:16 +08:00
|
|
|
err = i915_perf_selftests(pdev);
|
|
|
|
if (err) {
|
|
|
|
i915_pci_remove(pdev);
|
|
|
|
return err > 0 ? -ENOTTY : err;
|
|
|
|
}
|
|
|
|
|
2017-02-14 01:15:12 +08:00
|
|
|
return 0;
|
2016-06-24 21:00:26 +08:00
|
|
|
}
|
|
|
|
|
2016-06-24 21:00:27 +08:00
|
|
|
static struct pci_driver i915_pci_driver = {
|
2016-06-24 21:00:26 +08:00
|
|
|
.name = DRIVER_NAME,
|
|
|
|
.id_table = pciidlist,
|
|
|
|
.probe = i915_pci_probe,
|
|
|
|
.remove = i915_pci_remove,
|
|
|
|
.driver.pm = &i915_pm_ops,
|
|
|
|
};
|
2016-06-24 21:00:27 +08:00
|
|
|
|
|
|
|
static int __init i915_init(void)
|
|
|
|
{
|
|
|
|
bool use_kms = true;
|
2017-02-14 01:15:12 +08:00
|
|
|
int err;
|
|
|
|
|
2019-02-28 18:20:33 +08:00
|
|
|
err = i915_globals_init();
|
|
|
|
if (err)
|
|
|
|
return err;
|
2019-02-05 21:00:04 +08:00
|
|
|
|
2017-02-14 01:15:12 +08:00
|
|
|
err = i915_mock_selftests();
|
|
|
|
if (err)
|
|
|
|
return err > 0 ? 0 : err;
|
2016-06-24 21:00:27 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Enable KMS by default, unless explicitly overriden by
|
|
|
|
* either the i915.modeset prarameter or by the
|
|
|
|
* vga_text_mode_force boot option.
|
|
|
|
*/
|
|
|
|
|
2017-09-20 03:38:44 +08:00
|
|
|
if (i915_modparams.modeset == 0)
|
2016-06-24 21:00:27 +08:00
|
|
|
use_kms = false;
|
|
|
|
|
2017-09-20 03:38:44 +08:00
|
|
|
if (vgacon_text_force() && i915_modparams.modeset == -1)
|
2016-06-24 21:00:27 +08:00
|
|
|
use_kms = false;
|
|
|
|
|
|
|
|
if (!use_kms) {
|
|
|
|
/* Silently fail loading to not upset userspace. */
|
|
|
|
DRM_DEBUG_DRIVER("KMS disabled.\n");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-12-13 23:51:51 +08:00
|
|
|
err = pci_register_driver(&i915_pci_driver);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
i915_perf_sysctl_register();
|
|
|
|
return 0;
|
2016-06-24 21:00:27 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void __exit i915_exit(void)
|
|
|
|
{
|
|
|
|
if (!i915_pci_driver.driver.owner)
|
|
|
|
return;
|
|
|
|
|
2019-12-13 23:51:51 +08:00
|
|
|
i915_perf_sysctl_unregister();
|
2016-06-24 21:00:27 +08:00
|
|
|
pci_unregister_driver(&i915_pci_driver);
|
2019-02-28 18:20:33 +08:00
|
|
|
i915_globals_exit();
|
2016-06-24 21:00:27 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
module_init(i915_init);
|
|
|
|
module_exit(i915_exit);
|
|
|
|
|
|
|
|
MODULE_AUTHOR("Tungsten Graphics, Inc.");
|
|
|
|
MODULE_AUTHOR("Intel Corporation");
|
|
|
|
|
|
|
|
MODULE_DESCRIPTION(DRIVER_DESC);
|
|
|
|
MODULE_LICENSE("GPL and additional rights");
|