Merge branch 'drm-next' of git://people.freedesktop.org/~airlied/linux

Pull drm updates from Dave Airlie:
 "This is the main drm merge window pull request, changes all over the
  place, mostly normal levels of churn.

  Highlights:

  Core drm:
     More cleanups, fix race on connector/encoder naming, docs updates,
     object locking rework in prep for atomic modeset

  i915:
     mipi DSI support, valleyview power fixes, cursor size fixes,
     execlist refactoring, vblank improvements, userptr support, OOM
     handling improvements

  radeon:
     GPUVM tuning and large page size support, gart fixes, deep color
     HDMI support, HDMI audio cleanups

  nouveau:
     - displayport rework should fix lots of issues
     - initial gk20a support
     - gk110b support
     - gk208 fixes

  exynos:
     probe order fixes, HDMI changes, IPP consolidation

  msm:
     debugfs updates, misc fixes

  ast:
     ast2400 support, sync with UMS driver

  tegra:
     cleanups, hdmi + hw cursor for Tegra 124.

  panel:
     fixes existing panels add some new ones.

  ipuv3:
     moved from staging to drivers/gpu"

* 'drm-next' of git://people.freedesktop.org/~airlied/linux: (761 commits)
  drm/nouveau/disp/dp: fix tmds passthrough on dp connector
  drm/nouveau/dp: probe dpcd to determine connectedness
  drm/nv50-: trigger update after all connectors disabled
  drm/nv50-: prepare for attaching a SOR to multiple heads
  drm/gf119-/disp: fix debug output on update failure
  drm/nouveau/disp/dp: make use of postcursor when its available
  drm/g94-/disp/dp: take max pullup value across all lanes
  drm/nouveau/bios/dp: parse lane postcursor data
  drm/nouveau/dp: fix support for dpms
  drm/nouveau: register a drm_dp_aux channel for each dp connector
  drm/g94-/disp: add method to power-off dp lanes
  drm/nouveau/disp/dp: maintain link in response to hpd signal
  drm/g94-/disp: bash and wait for something after changing lane power regs
  drm/nouveau/disp/dp: split link config/power into two steps
  drm/nv50/disp: train PIOR-attached DP from second supervisor
  drm/nouveau/disp/dp: make use of existing output data for link training
  drm/gf119/disp: start removing direct vbios parsing from supervisor
  drm/nv50/disp: start removing direct vbios parsing from supervisor
  drm/nouveau/disp/dp: maintain receiver caps in response to hpd signal
  drm/nouveau/disp/dp: create subclass for dp outputs
  ...
This commit is contained in:
Linus Torvalds 2014-06-12 11:32:30 -07:00
commit 682b7c1c8e
447 changed files with 25099 additions and 9854 deletions

File diff suppressed because it is too large Load Diff

View File

@ -36,7 +36,7 @@
#define DPI 72
#define VFREQ 60 /* Hz */
#define TIMING_NAME "Linux XGA"
#define ESTABLISHED_TIMINGS_BITS 0x08 /* Bit 3 -> 1024x768 @60 Hz */
#define ESTABLISHED_TIMING2_BITS 0x08 /* Bit 3 -> 1024x768 @60 Hz */
#define HSYNC_POL 0
#define VSYNC_POL 0
#define CRC 0x55

View File

@ -36,7 +36,7 @@
#define DPI 72
#define VFREQ 60 /* Hz */
#define TIMING_NAME "Linux SXGA"
#define ESTABLISHED_TIMINGS_BITS 0x00 /* none */
/* No ESTABLISHED_TIMINGx_BITS */
#define HSYNC_POL 1
#define VSYNC_POL 1
#define CRC 0xa0

View File

@ -36,7 +36,7 @@
#define DPI 72
#define VFREQ 60 /* Hz */
#define TIMING_NAME "Linux UXGA"
#define ESTABLISHED_TIMINGS_BITS 0x00 /* none */
/* No ESTABLISHED_TIMINGx_BITS */
#define HSYNC_POL 1
#define VSYNC_POL 1
#define CRC 0x9d

View File

@ -36,7 +36,7 @@
#define DPI 96
#define VFREQ 60 /* Hz */
#define TIMING_NAME "Linux WSXGA"
#define ESTABLISHED_TIMINGS_BITS 0x00 /* none */
/* No ESTABLISHED_TIMINGx_BITS */
#define HSYNC_POL 1
#define VSYNC_POL 1
#define CRC 0x26

View File

@ -36,7 +36,7 @@
#define DPI 96
#define VFREQ 60 /* Hz */
#define TIMING_NAME "Linux FHD"
#define ESTABLISHED_TIMINGS_BITS 0x00 /* none */
/* No ESTABLISHED_TIMINGx_BITS */
#define HSYNC_POL 1
#define VSYNC_POL 1
#define CRC 0x05

View File

@ -0,0 +1,41 @@
/*
800x600.S: EDID data set for standard 800x600 60 Hz monitor
Copyright (C) 2011 Carsten Emde <C.Emde@osadl.org>
Copyright (C) 2014 Linaro Limited
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
*/
/* EDID */
#define VERSION 1
#define REVISION 3
/* Display */
#define CLOCK 40000 /* kHz */
#define XPIX 800
#define YPIX 600
#define XY_RATIO XY_RATIO_4_3
#define XBLANK 256
#define YBLANK 28
#define XOFFSET 40
#define XPULSE 128
#define YOFFSET (63+1)
#define YPULSE (63+4)
#define DPI 72
#define VFREQ 60 /* Hz */
#define TIMING_NAME "Linux SVGA"
#define ESTABLISHED_TIMING1_BITS 0x01 /* Bit 0: 800x600 @ 60Hz */
#define HSYNC_POL 1
#define VSYNC_POL 1
#define CRC 0xc2
#include "edid.S"

View File

@ -18,7 +18,7 @@ CONFIG_DRM_LOAD_EDID_FIRMWARE was introduced. It allows to provide an
individually prepared or corrected EDID data set in the /lib/firmware
directory from where it is loaded via the firmware interface. The code
(see drivers/gpu/drm/drm_edid_load.c) contains built-in data sets for
commonly used screen resolutions (1024x768, 1280x1024, 1600x1200,
commonly used screen resolutions (800x600, 1024x768, 1280x1024, 1600x1200,
1680x1050, 1920x1080) as binary blobs, but the kernel source tree does
not contain code to create these data. In order to elucidate the origin
of the built-in binary EDID blobs and to facilitate the creation of

View File

@ -33,6 +33,17 @@
#define XY_RATIO_5_4 0b10
#define XY_RATIO_16_9 0b11
/* Provide defaults for the timing bits */
#ifndef ESTABLISHED_TIMING1_BITS
#define ESTABLISHED_TIMING1_BITS 0x00
#endif
#ifndef ESTABLISHED_TIMING2_BITS
#define ESTABLISHED_TIMING2_BITS 0x00
#endif
#ifndef ESTABLISHED_TIMING3_BITS
#define ESTABLISHED_TIMING3_BITS 0x00
#endif
#define mfgname2id(v1,v2,v3) \
((((v1-'@')&0x1f)<<10)+(((v2-'@')&0x1f)<<5)+((v3-'@')&0x1f))
#define swap16(v1) ((v1>>8)+((v1&0xff)<<8))
@ -139,7 +150,7 @@ white_x_y_msb: .byte 0x50,0x54
Bit 2 640x480 @ 75 Hz
Bit 1 800x600 @ 56 Hz
Bit 0 800x600 @ 60 Hz */
estbl_timing1: .byte 0x00
estbl_timing1: .byte ESTABLISHED_TIMING1_BITS
/* Bit 7 800x600 @ 72 Hz
Bit 6 800x600 @ 75 Hz
@ -149,11 +160,11 @@ estbl_timing1: .byte 0x00
Bit 2 1024x768 @ 72 Hz
Bit 1 1024x768 @ 75 Hz
Bit 0 1280x1024 @ 75 Hz */
estbl_timing2: .byte ESTABLISHED_TIMINGS_BITS
estbl_timing2: .byte ESTABLISHED_TIMING2_BITS
/* Bit 7 1152x870 @ 75 Hz (Apple Macintosh II)
Bits 6-0 Other manufacturer-specific display mod */
estbl_timing3: .byte 0x00
estbl_timing3: .byte ESTABLISHED_TIMING3_BITS
/* Standard timing */
/* X resolution, less 31, divided by 8 (256-2288 pixels) */

View File

@ -136,6 +136,7 @@ of the following host1x client modules:
- compatible: "nvidia,tegra<chip>-hdmi"
- reg: Physical base address and length of the controller's registers.
- interrupts: The interrupt outputs from the controller.
- hdmi-supply: supply for the +5V HDMI connector pin
- vdd-supply: regulator for supply voltage
- pll-supply: regulator for PLL
- clocks: Must contain an entry for each entry in clock-names.
@ -180,6 +181,7 @@ of the following host1x client modules:
See ../reset/reset.txt for details.
- reset-names: Must include the following entries:
- dsi
- avdd-dsi-supply: phandle of a supply that powers the DSI controller
- nvidia,mipi-calibrate: Should contain a phandle and a specifier specifying
which pads are used by this DSI output and need to be calibrated. See also
../mipi/nvidia,tegra114-mipi.txt.

View File

@ -0,0 +1,7 @@
AU Optronics Corporation 13.3" WXGA (1366x768) TFT LCD panel
Required properties:
- compatible: should be "auo,b133xtn01"
This binding is compatible with the simple-panel binding, which is specified
in simple-panel.txt in this directory.

View File

@ -0,0 +1,7 @@
Emerging Display Technology Corp. 5.7" VGA TFT LCD panel
Required properties:
- compatible: should be "edt,et057090dhu"
This binding is compatible with the simple-panel binding, which is specified
in simple-panel.txt in this directory.

View File

@ -0,0 +1,10 @@
Emerging Display Technology Corp. ET070080DH6 7.0" WVGA TFT LCD panel
Required properties:
- compatible: should be "edt,et070080dh6"
This panel is the same as ETM0700G0DH6 except for the touchscreen.
ET070080DH6 is the model with resistive touch.
This binding is compatible with the simple-panel binding, which is specified
in simple-panel.txt in this directory.

View File

@ -0,0 +1,10 @@
Emerging Display Technology Corp. ETM0700G0DH6 7.0" WVGA TFT LCD panel
Required properties:
- compatible: should be "edt,etm0700g0dh6"
This panel is the same as ET070080DH6 except for the touchscreen.
ETM0700G0DH6 is the model with capacitive multitouch.
This binding is compatible with the simple-panel binding, which is specified
in simple-panel.txt in this directory.

View File

@ -62,6 +62,10 @@ Optional properties for dp-controller:
-hsync-active-high:
HSYNC polarity configuration.
High if defined, Low if not defined
-samsung,hpd-gpio:
Hotplug detect GPIO.
Indicates which GPIO should be used for hotplug
detection
Example:

View File

@ -5,6 +5,7 @@ Required properties:
1) "samsung,exynos5-hdmi" <DEPRECATED>
2) "samsung,exynos4210-hdmi"
3) "samsung,exynos4212-hdmi"
4) "samsung,exynos5420-hdmi"
- reg: physical base address of the hdmi and length of memory mapped
region.
- interrupts: interrupt number to the cpu.
@ -27,6 +28,7 @@ Required properties:
"hdmi", "sclk_hdmi", "sclk_pixel", "sclk_hdmiphy" and "mout_hdmi".
- ddc: phandle to the hdmi ddc node
- phy: phandle to the hdmi phy node
- samsung,syscon-phandle: phandle for system controller node for PMU.
Example:
@ -37,4 +39,5 @@ Example:
hpd-gpio = <&gpx3 7 1>;
ddc = <&hdmi_ddc_node>;
phy = <&hdmi_phy_node>;
samsung,syscon-phandle = <&pmu_system_controller>;
};

View File

@ -2952,6 +2952,7 @@ L: dri-devel@lists.freedesktop.org
T: git git://people.freedesktop.org/~airlied/linux
S: Maintained
F: drivers/gpu/drm/
F: drivers/gpu/vga/
F: include/drm/
F: include/uapi/drm/

View File

@ -419,7 +419,7 @@ static size_t __init gen6_stolen_size(int num, int slot, int func)
return gmch_ctrl << 25; /* 32 MB units */
}
static size_t gen8_stolen_size(int num, int slot, int func)
static size_t __init gen8_stolen_size(int num, int slot, int func)
{
u16 gmch_ctrl;
@ -429,48 +429,73 @@ static size_t gen8_stolen_size(int num, int slot, int func)
return gmch_ctrl << 25; /* 32 MB units */
}
static size_t __init chv_stolen_size(int num, int slot, int func)
{
u16 gmch_ctrl;
gmch_ctrl = read_pci_config_16(num, slot, func, SNB_GMCH_CTRL);
gmch_ctrl >>= SNB_GMCH_GMS_SHIFT;
gmch_ctrl &= SNB_GMCH_GMS_MASK;
/*
* 0x0 to 0x10: 32MB increments starting at 0MB
* 0x11 to 0x16: 4MB increments starting at 8MB
* 0x17 to 0x1d: 4MB increments start at 36MB
*/
if (gmch_ctrl < 0x11)
return gmch_ctrl << 25;
else if (gmch_ctrl < 0x17)
return (gmch_ctrl - 0x11 + 2) << 22;
else
return (gmch_ctrl - 0x17 + 9) << 22;
}
struct intel_stolen_funcs {
size_t (*size)(int num, int slot, int func);
u32 (*base)(int num, int slot, int func, size_t size);
};
static const struct intel_stolen_funcs i830_stolen_funcs = {
static const struct intel_stolen_funcs i830_stolen_funcs __initconst = {
.base = i830_stolen_base,
.size = i830_stolen_size,
};
static const struct intel_stolen_funcs i845_stolen_funcs = {
static const struct intel_stolen_funcs i845_stolen_funcs __initconst = {
.base = i845_stolen_base,
.size = i830_stolen_size,
};
static const struct intel_stolen_funcs i85x_stolen_funcs = {
static const struct intel_stolen_funcs i85x_stolen_funcs __initconst = {
.base = i85x_stolen_base,
.size = gen3_stolen_size,
};
static const struct intel_stolen_funcs i865_stolen_funcs = {
static const struct intel_stolen_funcs i865_stolen_funcs __initconst = {
.base = i865_stolen_base,
.size = gen3_stolen_size,
};
static const struct intel_stolen_funcs gen3_stolen_funcs = {
static const struct intel_stolen_funcs gen3_stolen_funcs __initconst = {
.base = intel_stolen_base,
.size = gen3_stolen_size,
};
static const struct intel_stolen_funcs gen6_stolen_funcs = {
static const struct intel_stolen_funcs gen6_stolen_funcs __initconst = {
.base = intel_stolen_base,
.size = gen6_stolen_size,
};
static const struct intel_stolen_funcs gen8_stolen_funcs = {
static const struct intel_stolen_funcs gen8_stolen_funcs __initconst = {
.base = intel_stolen_base,
.size = gen8_stolen_size,
};
static struct pci_device_id intel_stolen_ids[] __initdata = {
static const struct intel_stolen_funcs chv_stolen_funcs __initconst = {
.base = intel_stolen_base,
.size = chv_stolen_size,
};
static const struct pci_device_id intel_stolen_ids[] __initconst = {
INTEL_I830_IDS(&i830_stolen_funcs),
INTEL_I845G_IDS(&i845_stolen_funcs),
INTEL_I85X_IDS(&i85x_stolen_funcs),
@ -496,7 +521,8 @@ static struct pci_device_id intel_stolen_ids[] __initdata = {
INTEL_HSW_D_IDS(&gen6_stolen_funcs),
INTEL_HSW_M_IDS(&gen6_stolen_funcs),
INTEL_BDW_M_IDS(&gen8_stolen_funcs),
INTEL_BDW_D_IDS(&gen8_stolen_funcs)
INTEL_BDW_D_IDS(&gen8_stolen_funcs),
INTEL_CHV_IDS(&chv_stolen_funcs),
};
static void __init intel_graphics_stolen(int num, int slot, int func)

View File

@ -1,2 +1,3 @@
obj-y += drm/ vga/
obj-$(CONFIG_TEGRA_HOST1X) += host1x/
obj-$(CONFIG_IMX_IPUV3_CORE) += ipu-v3/

View File

@ -83,6 +83,8 @@ config DRM_KMS_CMA_HELPER
source "drivers/gpu/drm/i2c/Kconfig"
source "drivers/gpu/drm/bridge/Kconfig"
config DRM_TDFX
tristate "3dfx Banshee/Voodoo3+"
depends on DRM && PCI
@ -199,5 +201,3 @@ source "drivers/gpu/drm/msm/Kconfig"
source "drivers/gpu/drm/tegra/Kconfig"
source "drivers/gpu/drm/panel/Kconfig"
source "drivers/gpu/drm/bridge/Kconfig"

View File

@ -14,7 +14,7 @@ drm-y := drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \
drm_info.o drm_debugfs.o drm_encoder_slave.o \
drm_trace_points.o drm_global.o drm_prime.o \
drm_rect.o drm_vma_manager.o drm_flip_work.o \
drm_plane_helper.o
drm_modeset_lock.o
drm-$(CONFIG_COMPAT) += drm_ioc32.o
drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o
@ -23,7 +23,8 @@ drm-$(CONFIG_DRM_PANEL) += drm_panel.o
drm-usb-y := drm_usb.o
drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_probe_helper.o
drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_probe_helper.o \
drm_plane_helper.o
drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o
drm_kms_helper-$(CONFIG_DRM_KMS_FB_HELPER) += drm_fb_helper.o
drm_kms_helper-$(CONFIG_DRM_KMS_CMA_HELPER) += drm_fb_cma_helper.o

View File

@ -173,7 +173,7 @@ static int armada_drm_load(struct drm_device *dev, unsigned long flags)
if (ret)
goto err_kms;
ret = drm_irq_install(dev);
ret = drm_irq_install(dev, platform_get_irq(dev->platformdev, 0));
if (ret)
goto err_kms;
@ -402,7 +402,7 @@ static struct platform_driver armada_drm_platform_driver = {
static int __init armada_drm_init(void)
{
armada_drm_driver.num_ioctls = DRM_ARRAY_SIZE(armada_ioctls);
armada_drm_driver.num_ioctls = ARRAY_SIZE(armada_ioctls);
return platform_driver_register(&armada_drm_platform_driver);
}
module_init(armada_drm_init);

View File

@ -181,10 +181,8 @@ void armada_fbdev_lastclose(struct drm_device *dev)
{
struct armada_private *priv = dev->dev_private;
drm_modeset_lock_all(dev);
if (priv->fbdev)
drm_fb_helper_restore_fbdev_mode(priv->fbdev);
drm_modeset_unlock_all(dev);
drm_fb_helper_restore_fbdev_mode_unlocked(priv->fbdev);
}
void armada_fbdev_fini(struct drm_device *dev)

View File

@ -433,7 +433,6 @@ armada_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
if (dobj->obj.filp) {
struct address_space *mapping;
gfp_t gfp;
int count;
count = dobj->obj.size / PAGE_SIZE;
@ -441,12 +440,11 @@ armada_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
goto free_sgt;
mapping = file_inode(dobj->obj.filp)->i_mapping;
gfp = mapping_gfp_mask(mapping);
for_each_sg(sgt->sgl, sg, count, i) {
struct page *page;
page = shmem_read_mapping_page_gfp(mapping, i, gfp);
page = shmem_read_mapping_page(mapping, i);
if (IS_ERR(page)) {
num = i;
goto release;

View File

@ -4,6 +4,6 @@
ccflags-y := -Iinclude/drm
ast-y := ast_drv.o ast_main.o ast_mode.o ast_fb.o ast_ttm.o ast_post.o
ast-y := ast_drv.o ast_main.o ast_mode.o ast_fb.o ast_ttm.o ast_post.o ast_dp501.o
obj-$(CONFIG_DRM_AST) := ast.o
obj-$(CONFIG_DRM_AST) := ast.o

View File

@ -0,0 +1,410 @@
#include <linux/firmware.h>
#include <drm/drmP.h>
#include "ast_drv.h"
MODULE_FIRMWARE("ast_dp501_fw.bin");
int ast_load_dp501_microcode(struct drm_device *dev)
{
struct ast_private *ast = dev->dev_private;
static char *fw_name = "ast_dp501_fw.bin";
int err;
err = request_firmware(&ast->dp501_fw, fw_name, dev->dev);
if (err)
return err;
return 0;
}
static void send_ack(struct ast_private *ast)
{
u8 sendack;
sendack = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9b, 0xff);
sendack |= 0x80;
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9b, 0x00, sendack);
}
static void send_nack(struct ast_private *ast)
{
u8 sendack;
sendack = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9b, 0xff);
sendack &= ~0x80;
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9b, 0x00, sendack);
}
static bool wait_ack(struct ast_private *ast)
{
u8 waitack;
u32 retry = 0;
do {
waitack = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd2, 0xff);
waitack &= 0x80;
udelay(100);
} while ((!waitack) && (retry++ < 1000));
if (retry < 1000)
return true;
else
return false;
}
static bool wait_nack(struct ast_private *ast)
{
u8 waitack;
u32 retry = 0;
do {
waitack = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd2, 0xff);
waitack &= 0x80;
udelay(100);
} while ((waitack) && (retry++ < 1000));
if (retry < 1000)
return true;
else
return false;
}
static void set_cmd_trigger(struct ast_private *ast)
{
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9b, ~0x40, 0x40);
}
static void clear_cmd_trigger(struct ast_private *ast)
{
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9b, ~0x40, 0x00);
}
#if 0
static bool wait_fw_ready(struct ast_private *ast)
{
u8 waitready;
u32 retry = 0;
do {
waitready = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd2, 0xff);
waitready &= 0x40;
udelay(100);
} while ((!waitready) && (retry++ < 1000));
if (retry < 1000)
return true;
else
return false;
}
#endif
static bool ast_write_cmd(struct drm_device *dev, u8 data)
{
struct ast_private *ast = dev->dev_private;
int retry = 0;
if (wait_nack(ast)) {
send_nack(ast);
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9a, 0x00, data);
send_ack(ast);
set_cmd_trigger(ast);
do {
if (wait_ack(ast)) {
clear_cmd_trigger(ast);
send_nack(ast);
return true;
}
} while (retry++ < 100);
}
clear_cmd_trigger(ast);
send_nack(ast);
return false;
}
static bool ast_write_data(struct drm_device *dev, u8 data)
{
struct ast_private *ast = dev->dev_private;
if (wait_nack(ast)) {
send_nack(ast);
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9a, 0x00, data);
send_ack(ast);
if (wait_ack(ast)) {
send_nack(ast);
return true;
}
}
send_nack(ast);
return false;
}
#if 0
static bool ast_read_data(struct drm_device *dev, u8 *data)
{
struct ast_private *ast = dev->dev_private;
u8 tmp;
*data = 0;
if (wait_ack(ast) == false)
return false;
tmp = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd3, 0xff);
*data = tmp;
if (wait_nack(ast) == false) {
send_nack(ast);
return false;
}
send_nack(ast);
return true;
}
static void clear_cmd(struct ast_private *ast)
{
send_nack(ast);
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9a, 0x00, 0x00);
}
#endif
void ast_set_dp501_video_output(struct drm_device *dev, u8 mode)
{
ast_write_cmd(dev, 0x40);
ast_write_data(dev, mode);
msleep(10);
}
static u32 get_fw_base(struct ast_private *ast)
{
return ast_mindwm(ast, 0x1e6e2104) & 0x7fffffff;
}
bool ast_backup_fw(struct drm_device *dev, u8 *addr, u32 size)
{
struct ast_private *ast = dev->dev_private;
u32 i, data;
u32 boot_address;
data = ast_mindwm(ast, 0x1e6e2100) & 0x01;
if (data) {
boot_address = get_fw_base(ast);
for (i = 0; i < size; i += 4)
*(u32 *)(addr + i) = ast_mindwm(ast, boot_address + i);
return true;
}
return false;
}
bool ast_launch_m68k(struct drm_device *dev)
{
struct ast_private *ast = dev->dev_private;
u32 i, data, len = 0;
u32 boot_address;
u8 *fw_addr = NULL;
u8 jreg;
data = ast_mindwm(ast, 0x1e6e2100) & 0x01;
if (!data) {
if (ast->dp501_fw_addr) {
fw_addr = ast->dp501_fw_addr;
len = 32*1024;
} else if (ast->dp501_fw) {
fw_addr = (u8 *)ast->dp501_fw->data;
len = ast->dp501_fw->size;
}
/* Get BootAddress */
ast_moutdwm(ast, 0x1e6e2000, 0x1688a8a8);
data = ast_mindwm(ast, 0x1e6e0004);
switch (data & 0x03) {
case 0:
boot_address = 0x44000000;
break;
default:
case 1:
boot_address = 0x48000000;
break;
case 2:
boot_address = 0x50000000;
break;
case 3:
boot_address = 0x60000000;
break;
}
boot_address -= 0x200000; /* -2MB */
/* copy image to buffer */
for (i = 0; i < len; i += 4) {
data = *(u32 *)(fw_addr + i);
ast_moutdwm(ast, boot_address + i, data);
}
/* Init SCU */
ast_moutdwm(ast, 0x1e6e2000, 0x1688a8a8);
/* Launch FW */
ast_moutdwm(ast, 0x1e6e2104, 0x80000000 + boot_address);
ast_moutdwm(ast, 0x1e6e2100, 1);
/* Update Scratch */
data = ast_mindwm(ast, 0x1e6e2040) & 0xfffff1ff; /* D[11:9] = 100b: UEFI handling */
data |= 0x800;
ast_moutdwm(ast, 0x1e6e2040, data);
jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x99, 0xfc); /* D[1:0]: Reserved Video Buffer */
jreg |= 0x02;
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x99, jreg);
}
return true;
}
u8 ast_get_dp501_max_clk(struct drm_device *dev)
{
struct ast_private *ast = dev->dev_private;
u32 boot_address, offset, data;
u8 linkcap[4], linkrate, linklanes, maxclk = 0xff;
boot_address = get_fw_base(ast);
/* validate FW version */
offset = 0xf000;
data = ast_mindwm(ast, boot_address + offset);
if ((data & 0xf0) != 0x10) /* version: 1x */
return maxclk;
/* Read Link Capability */
offset = 0xf014;
*(u32 *)linkcap = ast_mindwm(ast, boot_address + offset);
if (linkcap[2] == 0) {
linkrate = linkcap[0];
linklanes = linkcap[1];
data = (linkrate == 0x0a) ? (90 * linklanes) : (54 * linklanes);
if (data > 0xff)
data = 0xff;
maxclk = (u8)data;
}
return maxclk;
}
bool ast_dp501_read_edid(struct drm_device *dev, u8 *ediddata)
{
struct ast_private *ast = dev->dev_private;
u32 i, boot_address, offset, data;
boot_address = get_fw_base(ast);
/* validate FW version */
offset = 0xf000;
data = ast_mindwm(ast, boot_address + offset);
if ((data & 0xf0) != 0x10)
return false;
/* validate PnP Monitor */
offset = 0xf010;
data = ast_mindwm(ast, boot_address + offset);
if (!(data & 0x01))
return false;
/* Read EDID */
offset = 0xf020;
for (i = 0; i < 128; i += 4) {
data = ast_mindwm(ast, boot_address + offset + i);
*(u32 *)(ediddata + i) = data;
}
return true;
}
static bool ast_init_dvo(struct drm_device *dev)
{
struct ast_private *ast = dev->dev_private;
u8 jreg;
u32 data;
ast_write32(ast, 0xf004, 0x1e6e0000);
ast_write32(ast, 0xf000, 0x1);
ast_write32(ast, 0x12000, 0x1688a8a8);
jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff);
if (!(jreg & 0x80)) {
/* Init SCU DVO Settings */
data = ast_read32(ast, 0x12008);
/* delay phase */
data &= 0xfffff8ff;
data |= 0x00000500;
ast_write32(ast, 0x12008, data);
if (ast->chip == AST2300) {
data = ast_read32(ast, 0x12084);
/* multi-pins for DVO single-edge */
data |= 0xfffe0000;
ast_write32(ast, 0x12084, data);
data = ast_read32(ast, 0x12088);
/* multi-pins for DVO single-edge */
data |= 0x000fffff;
ast_write32(ast, 0x12088, data);
data = ast_read32(ast, 0x12090);
/* multi-pins for DVO single-edge */
data &= 0xffffffcf;
data |= 0x00000020;
ast_write32(ast, 0x12090, data);
} else { /* AST2400 */
data = ast_read32(ast, 0x12088);
/* multi-pins for DVO single-edge */
data |= 0x30000000;
ast_write32(ast, 0x12088, data);
data = ast_read32(ast, 0x1208c);
/* multi-pins for DVO single-edge */
data |= 0x000000cf;
ast_write32(ast, 0x1208c, data);
data = ast_read32(ast, 0x120a4);
/* multi-pins for DVO single-edge */
data |= 0xffff0000;
ast_write32(ast, 0x120a4, data);
data = ast_read32(ast, 0x120a8);
/* multi-pins for DVO single-edge */
data |= 0x0000000f;
ast_write32(ast, 0x120a8, data);
data = ast_read32(ast, 0x12094);
/* multi-pins for DVO single-edge */
data |= 0x00000002;
ast_write32(ast, 0x12094, data);
}
}
/* Force to DVO */
data = ast_read32(ast, 0x1202c);
data &= 0xfffbffff;
ast_write32(ast, 0x1202c, data);
/* Init VGA DVO Settings */
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa3, 0xcf, 0x80);
return true;
}
void ast_init_3rdtx(struct drm_device *dev)
{
struct ast_private *ast = dev->dev_private;
u8 jreg;
u32 data;
if (ast->chip == AST2300 || ast->chip == AST2400) {
jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd1, 0xff);
switch (jreg & 0x0e) {
case 0x04:
ast_init_dvo(dev);
break;
case 0x08:
ast_launch_m68k(dev);
break;
case 0x0c:
ast_init_dvo(dev);
break;
default:
if (ast->tx_chip_type == AST_TX_SIL164)
ast_init_dvo(dev);
else {
ast_write32(ast, 0x12000, 0x1688a8a8);
data = ast_read32(ast, 0x1202c);
data &= 0xfffcffff;
ast_write32(ast, 0, data);
}
}
}
}

View File

@ -94,9 +94,7 @@ static int ast_drm_thaw(struct drm_device *dev)
ast_post_gpu(dev);
drm_mode_config_reset(dev);
drm_modeset_lock_all(dev);
drm_helper_resume_force_mode(dev);
drm_modeset_unlock_all(dev);
console_lock();
ast_fbdev_set_suspend(dev, 0);
@ -198,7 +196,6 @@ static const struct file_operations ast_fops = {
static struct drm_driver driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM,
.dev_priv_size = 0,
.load = ast_driver_load,
.unload = ast_driver_unload,

View File

@ -61,9 +61,17 @@ enum ast_chip {
AST2200,
AST2150,
AST2300,
AST2400,
AST1180,
};
enum ast_tx_chip {
AST_TX_NONE,
AST_TX_SIL164,
AST_TX_ITE66121,
AST_TX_DP501,
};
#define AST_DRAM_512Mx16 0
#define AST_DRAM_1Gx16 1
#define AST_DRAM_512Mx32 2
@ -102,6 +110,12 @@ struct ast_private {
* we have. */
struct ttm_bo_kmap_obj cache_kmap;
int next_cursor;
bool support_wide_screen;
enum ast_tx_chip tx_chip_type;
u8 dp501_maxclk;
u8 *dp501_fw_addr;
const struct firmware *dp501_fw; /* dp501 fw */
};
int ast_driver_load(struct drm_device *dev, unsigned long flags);
@ -368,4 +382,14 @@ int ast_mmap(struct file *filp, struct vm_area_struct *vma);
/* ast post */
void ast_post_gpu(struct drm_device *dev);
u32 ast_mindwm(struct ast_private *ast, u32 r);
void ast_moutdwm(struct ast_private *ast, u32 r, u32 v);
/* ast dp501 */
int ast_load_dp501_microcode(struct drm_device *dev);
void ast_set_dp501_video_output(struct drm_device *dev, u8 mode);
bool ast_launch_m68k(struct drm_device *dev);
bool ast_backup_fw(struct drm_device *dev, u8 *addr, u32 size);
bool ast_dp501_read_edid(struct drm_device *dev, u8 *ediddata);
u8 ast_get_dp501_max_clk(struct drm_device *dev);
void ast_init_3rdtx(struct drm_device *dev);
#endif

View File

@ -66,12 +66,16 @@ uint8_t ast_get_index_reg_mask(struct ast_private *ast,
static int ast_detect_chip(struct drm_device *dev)
{
struct ast_private *ast = dev->dev_private;
uint32_t data, jreg;
if (dev->pdev->device == PCI_CHIP_AST1180) {
ast->chip = AST1100;
DRM_INFO("AST 1180 detected\n");
} else {
if (dev->pdev->revision >= 0x20) {
if (dev->pdev->revision >= 0x30) {
ast->chip = AST2400;
DRM_INFO("AST 2400 detected\n");
} else if (dev->pdev->revision >= 0x20) {
ast->chip = AST2300;
DRM_INFO("AST 2300 detected\n");
} else if (dev->pdev->revision >= 0x10) {
@ -104,6 +108,59 @@ static int ast_detect_chip(struct drm_device *dev)
DRM_INFO("AST 2000 detected\n");
}
}
switch (ast->chip) {
case AST1180:
ast->support_wide_screen = true;
break;
case AST2000:
ast->support_wide_screen = false;
break;
default:
jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff);
if (!(jreg & 0x80))
ast->support_wide_screen = true;
else if (jreg & 0x01)
ast->support_wide_screen = true;
else {
ast->support_wide_screen = false;
ast_write32(ast, 0xf004, 0x1e6e0000);
ast_write32(ast, 0xf000, 0x1);
data = ast_read32(ast, 0x1207c);
data &= 0x300;
if (ast->chip == AST2300 && data == 0x0) /* ast1300 */
ast->support_wide_screen = true;
if (ast->chip == AST2400 && data == 0x100) /* ast1400 */
ast->support_wide_screen = true;
}
break;
}
ast->tx_chip_type = AST_TX_NONE;
jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa3, 0xff);
if (jreg & 0x80)
ast->tx_chip_type = AST_TX_SIL164;
if ((ast->chip == AST2300) || (ast->chip == AST2400)) {
jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd1, 0xff);
switch (jreg) {
case 0x04:
ast->tx_chip_type = AST_TX_SIL164;
break;
case 0x08:
ast->dp501_fw_addr = kzalloc(32*1024, GFP_KERNEL);
if (ast->dp501_fw_addr) {
/* backup firmware */
if (ast_backup_fw(dev, ast->dp501_fw_addr, 32*1024)) {
kfree(ast->dp501_fw_addr);
ast->dp501_fw_addr = NULL;
}
}
/* fallthrough */
case 0x0c:
ast->tx_chip_type = AST_TX_DP501;
}
}
return 0;
}
@ -129,7 +186,7 @@ static int ast_get_dram_info(struct drm_device *dev)
else
ast->dram_bus_width = 32;
if (ast->chip == AST2300) {
if (ast->chip == AST2300 || ast->chip == AST2400) {
switch (data & 0x03) {
case 0:
ast->dram_type = AST_DRAM_512Mx16;
@ -257,17 +314,32 @@ static u32 ast_get_vram_info(struct drm_device *dev)
{
struct ast_private *ast = dev->dev_private;
u8 jreg;
u32 vram_size;
ast_open_key(ast);
vram_size = AST_VIDMEM_DEFAULT_SIZE;
jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xaa, 0xff);
switch (jreg & 3) {
case 0: return AST_VIDMEM_SIZE_8M;
case 1: return AST_VIDMEM_SIZE_16M;
case 2: return AST_VIDMEM_SIZE_32M;
case 3: return AST_VIDMEM_SIZE_64M;
case 0: vram_size = AST_VIDMEM_SIZE_8M; break;
case 1: vram_size = AST_VIDMEM_SIZE_16M; break;
case 2: vram_size = AST_VIDMEM_SIZE_32M; break;
case 3: vram_size = AST_VIDMEM_SIZE_64M; break;
}
return AST_VIDMEM_DEFAULT_SIZE;
jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x99, 0xff);
switch (jreg & 0x03) {
case 1:
vram_size -= 0x100000;
break;
case 2:
vram_size -= 0x200000;
break;
case 3:
vram_size -= 0x400000;
break;
}
return vram_size;
}
int ast_driver_load(struct drm_device *dev, unsigned long flags)
@ -316,6 +388,7 @@ int ast_driver_load(struct drm_device *dev, unsigned long flags)
if (ast->chip == AST2100 ||
ast->chip == AST2200 ||
ast->chip == AST2300 ||
ast->chip == AST2400 ||
ast->chip == AST1180) {
dev->mode_config.max_width = 1920;
dev->mode_config.max_height = 2048;
@ -343,6 +416,7 @@ int ast_driver_unload(struct drm_device *dev)
{
struct ast_private *ast = dev->dev_private;
kfree(ast->dp501_fw_addr);
ast_mode_fini(dev);
ast_fbdev_fini(dev);
drm_mode_config_cleanup(dev);
@ -411,16 +485,13 @@ static void ast_bo_unref(struct ast_bo **bo)
tbo = &((*bo)->bo);
ttm_bo_unref(&tbo);
if (tbo == NULL)
*bo = NULL;
*bo = NULL;
}
void ast_gem_free_object(struct drm_gem_object *obj)
{
struct ast_bo *ast_bo = gem_to_ast_bo(obj);
if (!ast_bo)
return;
ast_bo_unref(&ast_bo);
}

View File

@ -115,11 +115,17 @@ static bool ast_get_vbios_mode_info(struct drm_crtc *crtc, struct drm_display_mo
else
vbios_mode->enh_table = &res_1280x1024[refresh_rate_index];
break;
case 1360:
vbios_mode->enh_table = &res_1360x768[refresh_rate_index];
break;
case 1440:
vbios_mode->enh_table = &res_1440x900[refresh_rate_index];
break;
case 1600:
vbios_mode->enh_table = &res_1600x1200[refresh_rate_index];
if (crtc->mode.crtc_vdisplay == 900)
vbios_mode->enh_table = &res_1600x900[refresh_rate_index];
else
vbios_mode->enh_table = &res_1600x1200[refresh_rate_index];
break;
case 1680:
vbios_mode->enh_table = &res_1680x1050[refresh_rate_index];
@ -175,14 +181,17 @@ static bool ast_get_vbios_mode_info(struct drm_crtc *crtc, struct drm_display_mo
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x8d, refresh_rate_index & 0xff);
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x8e, mode_id & 0xff);
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x91, 0xa8);
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x92, crtc->primary->fb->bits_per_pixel);
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x93, adjusted_mode->clock / 1000);
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x94, adjusted_mode->crtc_hdisplay);
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x95, adjusted_mode->crtc_hdisplay >> 8);
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x91, 0x00);
if (vbios_mode->enh_table->flags & NewModeInfo) {
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x91, 0xa8);
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x92, crtc->primary->fb->bits_per_pixel);
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x93, adjusted_mode->clock / 1000);
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x94, adjusted_mode->crtc_hdisplay);
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x95, adjusted_mode->crtc_hdisplay >> 8);
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x96, adjusted_mode->crtc_vdisplay);
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x97, adjusted_mode->crtc_vdisplay >> 8);
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x96, adjusted_mode->crtc_vdisplay);
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x97, adjusted_mode->crtc_vdisplay >> 8);
}
}
return true;
@ -389,7 +398,7 @@ static void ast_set_ext_reg(struct drm_crtc *crtc, struct drm_display_mode *mode
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa8, 0xfd, jregA8);
/* Set Threshold */
if (ast->chip == AST2300) {
if (ast->chip == AST2300 || ast->chip == AST2400) {
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa7, 0x78);
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa6, 0x60);
} else if (ast->chip == AST2100 ||
@ -451,9 +460,13 @@ static void ast_crtc_dpms(struct drm_crtc *crtc, int mode)
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x1, 0xdf, 0);
if (ast->tx_chip_type == AST_TX_DP501)
ast_set_dp501_video_output(crtc->dev, 1);
ast_crtc_load_lut(crtc);
break;
case DRM_MODE_DPMS_OFF:
if (ast->tx_chip_type == AST_TX_DP501)
ast_set_dp501_video_output(crtc->dev, 0);
ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x1, 0xdf, 0x20);
break;
}
@ -729,10 +742,24 @@ static int ast_encoder_init(struct drm_device *dev)
static int ast_get_modes(struct drm_connector *connector)
{
struct ast_connector *ast_connector = to_ast_connector(connector);
struct ast_private *ast = connector->dev->dev_private;
struct edid *edid;
int ret;
bool flags = false;
if (ast->tx_chip_type == AST_TX_DP501) {
ast->dp501_maxclk = 0xff;
edid = kmalloc(128, GFP_KERNEL);
if (!edid)
return -ENOMEM;
edid = drm_get_edid(connector, &ast_connector->i2c->adapter);
flags = ast_dp501_read_edid(connector->dev, (u8 *)edid);
if (flags)
ast->dp501_maxclk = ast_get_dp501_max_clk(connector->dev);
else
kfree(edid);
}
if (!flags)
edid = drm_get_edid(connector, &ast_connector->i2c->adapter);
if (edid) {
drm_mode_connector_update_edid_property(&ast_connector->base, edid);
ret = drm_add_edid_modes(connector, edid);
@ -746,7 +773,56 @@ static int ast_get_modes(struct drm_connector *connector)
static int ast_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
return MODE_OK;
struct ast_private *ast = connector->dev->dev_private;
int flags = MODE_NOMODE;
uint32_t jtemp;
if (ast->support_wide_screen) {
if ((mode->hdisplay == 1680) && (mode->vdisplay == 1050))
return MODE_OK;
if ((mode->hdisplay == 1280) && (mode->vdisplay == 800))
return MODE_OK;
if ((mode->hdisplay == 1440) && (mode->vdisplay == 900))
return MODE_OK;
if ((mode->hdisplay == 1360) && (mode->vdisplay == 768))
return MODE_OK;
if ((mode->hdisplay == 1600) && (mode->vdisplay == 900))
return MODE_OK;
if ((ast->chip == AST2100) || (ast->chip == AST2200) || (ast->chip == AST2300) || (ast->chip == AST2400) || (ast->chip == AST1180)) {
if ((mode->hdisplay == 1920) && (mode->vdisplay == 1080))
return MODE_OK;
if ((mode->hdisplay == 1920) && (mode->vdisplay == 1200)) {
jtemp = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd1, 0xff);
if (jtemp & 0x01)
return MODE_NOMODE;
else
return MODE_OK;
}
}
}
switch (mode->hdisplay) {
case 640:
if (mode->vdisplay == 480) flags = MODE_OK;
break;
case 800:
if (mode->vdisplay == 600) flags = MODE_OK;
break;
case 1024:
if (mode->vdisplay == 768) flags = MODE_OK;
break;
case 1280:
if (mode->vdisplay == 1024) flags = MODE_OK;
break;
case 1600:
if (mode->vdisplay == 1200) flags = MODE_OK;
break;
default:
return flags;
}
return flags;
}
static void ast_connector_destroy(struct drm_connector *connector)

File diff suppressed because it is too large Load Diff

View File

@ -42,7 +42,7 @@
#define HBorder 0x00000020
#define VBorder 0x00000010
#define WideScreenMode 0x00000100
#define NewModeInfo 0x00000200
/* DCLK Index */
#define VCLK25_175 0x00
@ -67,6 +67,11 @@
#define VCLK106_5 0x12
#define VCLK146_25 0x13
#define VCLK148_5 0x14
#define VCLK71 0x15
#define VCLK88_75 0x16
#define VCLK119 0x17
#define VCLK85_5 0x18
#define VCLK97_75 0x19
static struct ast_vbios_dclk_info dclk_table[] = {
{0x2C, 0xE7, 0x03}, /* 00: VCLK25_175 */
@ -90,6 +95,10 @@ static struct ast_vbios_dclk_info dclk_table[] = {
{0x28, 0x49, 0x80}, /* 12: VCLK106.5 */
{0x37, 0x49, 0x80}, /* 13: VCLK146.25 */
{0x1f, 0x45, 0x80}, /* 14: VCLK148.5 */
{0x47, 0x6c, 0x80}, /* 15: VCLK71 */
{0x25, 0x65, 0x80}, /* 16: VCLK88.75 */
{0x77, 0x58, 0x80}, /* 17: VCLK119 */
{0x32, 0x67, 0x80}, /* 18: VCLK85_5 */
};
static struct ast_vbios_stdtable vbios_stdtable[] = {
@ -225,41 +234,63 @@ static struct ast_vbios_enhtable res_1600x1200[] = {
(SyncPP | Charx8Dot), 0xFF, 1, 0x33 },
};
static struct ast_vbios_enhtable res_1920x1200[] = {
{2080, 1920, 48, 32, 1235, 1200, 3, 6, VCLK154, /* 60Hz */
(SyncNP | Charx8Dot), 60, 1, 0x34 },
{2080, 1920, 48, 32, 1235, 1200, 3, 6, VCLK154, /* 60Hz */
(SyncNP | Charx8Dot), 0xFF, 1, 0x34 },
/* 16:9 */
static struct ast_vbios_enhtable res_1360x768[] = {
{1792, 1360, 64,112, 795, 768, 3, 6, VCLK85_5, /* 60Hz */
(SyncPP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 1, 0x39 },
{1792, 1360, 64,112, 795, 768, 3, 6, VCLK85_5, /* end */
(SyncPP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 0xFF, 1, 0x39 },
};
static struct ast_vbios_enhtable res_1600x900[] = {
{1760, 1600, 48, 32, 926, 900, 3, 5, VCLK97_75, /* 60Hz CVT RB */
(SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 1, 0x3A },
{1760, 1600, 48, 32, 926, 900, 3, 5, VCLK97_75, /* end */
(SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 0xFF, 1, 0x3A }
};
static struct ast_vbios_enhtable res_1920x1080[] = {
{2200, 1920, 88, 44, 1125, 1080, 4, 5, VCLK148_5, /* 60Hz */
(SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 1, 0x38 },
{2200, 1920, 88, 44, 1125, 1080, 4, 5, VCLK148_5, /* 60Hz */
(SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 0xFF, 1, 0x38 },
};
/* 16:10 */
static struct ast_vbios_enhtable res_1280x800[] = {
{1440, 1280, 48, 32, 823, 800, 3, 6, VCLK71, /* 60Hz RB */
(SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 1, 35 },
{1680, 1280, 72,128, 831, 800, 3, 6, VCLK83_5, /* 60Hz */
(SyncPN | Charx8Dot | LineCompareOff | WideScreenMode), 60, 1, 0x35 },
(SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 1, 0x35 },
{1680, 1280, 72,128, 831, 800, 3, 6, VCLK83_5, /* 60Hz */
(SyncPN | Charx8Dot | LineCompareOff | WideScreenMode), 0xFF, 1, 0x35 },
(SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 0xFF, 1, 0x35 },
};
static struct ast_vbios_enhtable res_1440x900[] = {
{1600, 1440, 48, 32, 926, 900, 3, 6, VCLK88_75, /* 60Hz RB */
(SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 1, 0x36 },
{1904, 1440, 80,152, 934, 900, 3, 6, VCLK106_5, /* 60Hz */
(SyncPN | Charx8Dot | LineCompareOff | WideScreenMode), 60, 1, 0x36 },
(SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 1, 0x36 },
{1904, 1440, 80,152, 934, 900, 3, 6, VCLK106_5, /* 60Hz */
(SyncPN | Charx8Dot | LineCompareOff | WideScreenMode), 0xFF, 1, 0x36 },
(SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 0xFF, 1, 0x36 },
};
static struct ast_vbios_enhtable res_1680x1050[] = {
{1840, 1680, 48, 32, 1080, 1050, 3, 6, VCLK119, /* 60Hz RB */
(SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 1, 0x37 },
{2240, 1680,104,176, 1089, 1050, 3, 6, VCLK146_25, /* 60Hz */
(SyncPN | Charx8Dot | LineCompareOff | WideScreenMode), 60, 1, 0x37 },
(SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 1, 0x37 },
{2240, 1680,104,176, 1089, 1050, 3, 6, VCLK146_25, /* 60Hz */
(SyncPN | Charx8Dot | LineCompareOff | WideScreenMode), 0xFF, 1, 0x37 },
(SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 0xFF, 1, 0x37 },
};
/* HDTV */
static struct ast_vbios_enhtable res_1920x1080[] = {
{2200, 1920, 88, 44, 1125, 1080, 4, 5, VCLK148_5, /* 60Hz */
(SyncNP | Charx8Dot | LineCompareOff | WideScreenMode), 60, 1, 0x38 },
{2200, 1920, 88, 44, 1125, 1080, 4, 5, VCLK148_5, /* 60Hz */
(SyncNP | Charx8Dot | LineCompareOff | WideScreenMode), 0xFF, 1, 0x38 },
static struct ast_vbios_enhtable res_1920x1200[] = {
{2080, 1920, 48, 32, 1235, 1200, 3, 6, VCLK154, /* 60Hz */
(SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 1, 0x34 },
{2080, 1920, 48, 32, 1235, 1200, 3, 6, VCLK154, /* 60Hz */
(SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 0xFF, 1, 0x34 },
};
#endif

View File

@ -434,17 +434,13 @@ static void bochs_bo_unref(struct bochs_bo **bo)
tbo = &((*bo)->bo);
ttm_bo_unref(&tbo);
if (tbo == NULL)
*bo = NULL;
*bo = NULL;
}
void bochs_gem_free_object(struct drm_gem_object *obj)
{
struct bochs_bo *bochs_bo = gem_to_bochs_bo(obj);
if (!bochs_bo)
return;
bochs_bo_unref(&bochs_bo);
}

View File

@ -225,12 +225,6 @@ out:
return num_modes;
}
static int ptn3460_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
return MODE_OK;
}
struct drm_encoder *ptn3460_best_encoder(struct drm_connector *connector)
{
struct ptn3460_bridge *ptn_bridge;
@ -242,7 +236,6 @@ struct drm_encoder *ptn3460_best_encoder(struct drm_connector *connector)
struct drm_connector_helper_funcs ptn3460_connector_helper_funcs = {
.get_modes = ptn3460_get_modes,
.mode_valid = ptn3460_mode_valid,
.best_encoder = ptn3460_best_encoder,
};

View File

@ -264,17 +264,13 @@ static void cirrus_bo_unref(struct cirrus_bo **bo)
tbo = &((*bo)->bo);
ttm_bo_unref(&tbo);
if (tbo == NULL)
*bo = NULL;
*bo = NULL;
}
void cirrus_gem_free_object(struct drm_gem_object *obj)
{
struct cirrus_bo *cirrus_bo = gem_to_cirrus_bo(obj);
if (!cirrus_bo)
return;
cirrus_bo_unref(&cirrus_bo);
}

View File

@ -505,13 +505,6 @@ static int cirrus_vga_get_modes(struct drm_connector *connector)
return count;
}
static int cirrus_vga_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
/* Any mode we've added is valid */
return MODE_OK;
}
static struct drm_encoder *cirrus_connector_best_encoder(struct drm_connector
*connector)
{
@ -546,7 +539,6 @@ static void cirrus_connector_destroy(struct drm_connector *connector)
struct drm_connector_helper_funcs cirrus_vga_connector_helper_funcs = {
.get_modes = cirrus_vga_get_modes,
.mode_valid = cirrus_vga_mode_valid,
.best_encoder = cirrus_connector_best_encoder,
};

View File

@ -363,7 +363,7 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
list->master = dev->primary->master;
*maplist = list;
return 0;
}
}
int drm_addmap(struct drm_device * dev, resource_size_t offset,
unsigned int size, enum drm_map_type type,
@ -656,13 +656,13 @@ int drm_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request)
DRM_DEBUG("zone invalid\n");
return -EINVAL;
}
spin_lock(&dev->count_lock);
spin_lock(&dev->buf_lock);
if (dev->buf_use) {
spin_unlock(&dev->count_lock);
spin_unlock(&dev->buf_lock);
return -EBUSY;
}
atomic_inc(&dev->buf_alloc);
spin_unlock(&dev->count_lock);
spin_unlock(&dev->buf_lock);
mutex_lock(&dev->struct_mutex);
entry = &dma->bufs[order];
@ -805,13 +805,13 @@ int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
total = PAGE_SIZE << page_order;
spin_lock(&dev->count_lock);
spin_lock(&dev->buf_lock);
if (dev->buf_use) {
spin_unlock(&dev->count_lock);
spin_unlock(&dev->buf_lock);
return -EBUSY;
}
atomic_inc(&dev->buf_alloc);
spin_unlock(&dev->count_lock);
spin_unlock(&dev->buf_lock);
mutex_lock(&dev->struct_mutex);
entry = &dma->bufs[order];
@ -1015,13 +1015,13 @@ static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request
if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
return -EINVAL;
spin_lock(&dev->count_lock);
spin_lock(&dev->buf_lock);
if (dev->buf_use) {
spin_unlock(&dev->count_lock);
spin_unlock(&dev->buf_lock);
return -EBUSY;
}
atomic_inc(&dev->buf_alloc);
spin_unlock(&dev->count_lock);
spin_unlock(&dev->buf_lock);
mutex_lock(&dev->struct_mutex);
entry = &dma->bufs[order];
@ -1175,7 +1175,7 @@ int drm_addbufs(struct drm_device *dev, void *data,
* \param arg pointer to a drm_buf_info structure.
* \return zero on success or a negative number on failure.
*
* Increments drm_device::buf_use while holding the drm_device::count_lock
* Increments drm_device::buf_use while holding the drm_device::buf_lock
* lock, preventing of allocating more buffers after this call. Information
* about each requested buffer is then copied into user space.
*/
@ -1196,13 +1196,13 @@ int drm_infobufs(struct drm_device *dev, void *data,
if (!dma)
return -EINVAL;
spin_lock(&dev->count_lock);
spin_lock(&dev->buf_lock);
if (atomic_read(&dev->buf_alloc)) {
spin_unlock(&dev->count_lock);
spin_unlock(&dev->buf_lock);
return -EBUSY;
}
++dev->buf_use; /* Can't allocate more after this call */
spin_unlock(&dev->count_lock);
spin_unlock(&dev->buf_lock);
for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
if (dma->bufs[i].buf_count)
@ -1381,13 +1381,13 @@ int drm_mapbufs(struct drm_device *dev, void *data,
if (!dma)
return -EINVAL;
spin_lock(&dev->count_lock);
spin_lock(&dev->buf_lock);
if (atomic_read(&dev->buf_alloc)) {
spin_unlock(&dev->count_lock);
spin_unlock(&dev->buf_lock);
return -EBUSY;
}
dev->buf_use++; /* Can't allocate more after this call */
spin_unlock(&dev->count_lock);
spin_unlock(&dev->buf_lock);
if (request->count >= dma->buf_count) {
if ((dev->agp && (dma->flags & _DRM_DMA_USE_AGP))

View File

@ -131,14 +131,14 @@ drm_clflush_sg(struct sg_table *st)
EXPORT_SYMBOL(drm_clflush_sg);
void
drm_clflush_virt_range(char *addr, unsigned long length)
drm_clflush_virt_range(void *addr, unsigned long length)
{
#if defined(CONFIG_X86)
if (cpu_has_clflush) {
char *end = addr + length;
void *end = addr + length;
mb();
for (; addr < end; addr += boot_cpu_data.x86_clflush_size)
clflush(addr);
clflushopt(addr);
clflushopt(end - 1);
mb();
return;

View File

@ -37,6 +37,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_modeset_lock.h>
#include "drm_crtc_internal.h"
@ -50,12 +51,42 @@
*/
void drm_modeset_lock_all(struct drm_device *dev)
{
struct drm_crtc *crtc;
struct drm_mode_config *config = &dev->mode_config;
struct drm_modeset_acquire_ctx *ctx;
int ret;
mutex_lock(&dev->mode_config.mutex);
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (WARN_ON(!ctx))
return;
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
mutex_lock_nest_lock(&crtc->mutex, &dev->mode_config.mutex);
mutex_lock(&config->mutex);
drm_modeset_acquire_init(ctx, 0);
retry:
ret = drm_modeset_lock(&config->connection_mutex, ctx);
if (ret)
goto fail;
ret = drm_modeset_lock_all_crtcs(dev, ctx);
if (ret)
goto fail;
WARN_ON(config->acquire_ctx);
/* now we hold the locks, so now that it is safe, stash the
* ctx for drm_modeset_unlock_all():
*/
config->acquire_ctx = ctx;
drm_warn_on_modeset_not_all_locked(dev);
return;
fail:
if (ret == -EDEADLK) {
drm_modeset_backoff(ctx);
goto retry;
}
}
EXPORT_SYMBOL(drm_modeset_lock_all);
@ -67,10 +98,17 @@ EXPORT_SYMBOL(drm_modeset_lock_all);
*/
void drm_modeset_unlock_all(struct drm_device *dev)
{
struct drm_crtc *crtc;
struct drm_mode_config *config = &dev->mode_config;
struct drm_modeset_acquire_ctx *ctx = config->acquire_ctx;
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
mutex_unlock(&crtc->mutex);
if (WARN_ON(!ctx))
return;
config->acquire_ctx = NULL;
drm_modeset_drop_locks(ctx);
drm_modeset_acquire_fini(ctx);
kfree(ctx);
mutex_unlock(&dev->mode_config.mutex);
}
@ -91,8 +129,9 @@ void drm_warn_on_modeset_not_all_locked(struct drm_device *dev)
return;
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
WARN_ON(!mutex_is_locked(&crtc->mutex));
WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
}
EXPORT_SYMBOL(drm_warn_on_modeset_not_all_locked);
@ -227,6 +266,7 @@ static const struct drm_prop_enum_list drm_encoder_enum_list[] =
{ DRM_MODE_ENCODER_TVDAC, "TV" },
{ DRM_MODE_ENCODER_VIRTUAL, "Virtual" },
{ DRM_MODE_ENCODER_DSI, "DSI" },
{ DRM_MODE_ENCODER_DPMST, "DP MST" },
};
static const struct drm_prop_enum_list drm_subpixel_enum_list[] =
@ -255,46 +295,6 @@ void drm_connector_ida_destroy(void)
ida_destroy(&drm_connector_enum_list[i].ida);
}
/**
* drm_get_encoder_name - return a string for encoder
* @encoder: encoder to compute name of
*
* Note that the buffer used by this function is globally shared and owned by
* the function itself.
*
* FIXME: This isn't really multithreading safe.
*/
const char *drm_get_encoder_name(const struct drm_encoder *encoder)
{
static char buf[32];
snprintf(buf, 32, "%s-%d",
drm_encoder_enum_list[encoder->encoder_type].name,
encoder->base.id);
return buf;
}
EXPORT_SYMBOL(drm_get_encoder_name);
/**
* drm_get_connector_name - return a string for connector
* @connector: connector to compute name of
*
* Note that the buffer used by this function is globally shared and owned by
* the function itself.
*
* FIXME: This isn't really multithreading safe.
*/
const char *drm_get_connector_name(const struct drm_connector *connector)
{
static char buf[32];
snprintf(buf, 32, "%s-%d",
drm_connector_enum_list[connector->connector_type].name,
connector->connector_type_id);
return buf;
}
EXPORT_SYMBOL(drm_get_connector_name);
/**
* drm_get_connector_status_name - return a string for connector status
* @status: connector status to compute name of
@ -409,6 +409,21 @@ void drm_mode_object_put(struct drm_device *dev,
mutex_unlock(&dev->mode_config.idr_mutex);
}
static struct drm_mode_object *_object_find(struct drm_device *dev,
uint32_t id, uint32_t type)
{
struct drm_mode_object *obj = NULL;
mutex_lock(&dev->mode_config.idr_mutex);
obj = idr_find(&dev->mode_config.crtc_idr, id);
if (!obj || (type != DRM_MODE_OBJECT_ANY && obj->type != type) ||
(obj->id != id))
obj = NULL;
mutex_unlock(&dev->mode_config.idr_mutex);
return obj;
}
/**
* drm_mode_object_find - look up a drm object with static lifetime
* @dev: drm device
@ -416,7 +431,9 @@ void drm_mode_object_put(struct drm_device *dev,
* @type: type of the mode object
*
* Note that framebuffers cannot be looked up with this functions - since those
* are reference counted, they need special treatment.
* are reference counted, they need special treatment. Even with
* DRM_MODE_OBJECT_ANY (although that will simply return NULL
* rather than WARN_ON()).
*/
struct drm_mode_object *drm_mode_object_find(struct drm_device *dev,
uint32_t id, uint32_t type)
@ -426,13 +443,10 @@ struct drm_mode_object *drm_mode_object_find(struct drm_device *dev,
/* Framebuffers are reference counted and need their own lookup
* function.*/
WARN_ON(type == DRM_MODE_OBJECT_FB);
mutex_lock(&dev->mode_config.idr_mutex);
obj = idr_find(&dev->mode_config.crtc_idr, id);
if (!obj || (obj->type != type) || (obj->id != id))
obj = _object_find(dev, id, type);
/* don't leak out unref'd fb's */
if (obj && (obj->type == DRM_MODE_OBJECT_FB))
obj = NULL;
mutex_unlock(&dev->mode_config.idr_mutex);
return obj;
}
EXPORT_SYMBOL(drm_mode_object_find);
@ -538,7 +552,7 @@ EXPORT_SYMBOL(drm_framebuffer_lookup);
*/
void drm_framebuffer_unreference(struct drm_framebuffer *fb)
{
DRM_DEBUG("FB ID: %d\n", fb->base.id);
DRM_DEBUG("%p: FB ID: %d (%d)\n", fb, fb->base.id, atomic_read(&fb->refcount.refcount));
kref_put(&fb->refcount, drm_framebuffer_free);
}
EXPORT_SYMBOL(drm_framebuffer_unreference);
@ -551,7 +565,7 @@ EXPORT_SYMBOL(drm_framebuffer_unreference);
*/
void drm_framebuffer_reference(struct drm_framebuffer *fb)
{
DRM_DEBUG("FB ID: %d\n", fb->base.id);
DRM_DEBUG("%p: FB ID: %d (%d)\n", fb, fb->base.id, atomic_read(&fb->refcount.refcount));
kref_get(&fb->refcount);
}
EXPORT_SYMBOL(drm_framebuffer_reference);
@ -563,7 +577,7 @@ static void drm_framebuffer_free_bug(struct kref *kref)
static void __drm_framebuffer_unreference(struct drm_framebuffer *fb)
{
DRM_DEBUG("FB ID: %d\n", fb->base.id);
DRM_DEBUG("%p: FB ID: %d (%d)\n", fb, fb->base.id, atomic_read(&fb->refcount.refcount));
kref_put(&fb->refcount, drm_framebuffer_free_bug);
}
@ -691,6 +705,8 @@ void drm_framebuffer_remove(struct drm_framebuffer *fb)
}
EXPORT_SYMBOL(drm_framebuffer_remove);
DEFINE_WW_CLASS(crtc_ww_class);
/**
* drm_crtc_init_with_planes - Initialise a new CRTC object with
* specified primary and cursor planes.
@ -710,6 +726,7 @@ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
void *cursor,
const struct drm_crtc_funcs *funcs)
{
struct drm_mode_config *config = &dev->mode_config;
int ret;
crtc->dev = dev;
@ -717,8 +734,9 @@ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
crtc->invert_dimensions = false;
drm_modeset_lock_all(dev);
mutex_init(&crtc->mutex);
mutex_lock_nest_lock(&crtc->mutex, &dev->mode_config.mutex);
drm_modeset_lock_init(&crtc->mutex);
/* dropped by _unlock_all(): */
drm_modeset_lock(&crtc->mutex, config->acquire_ctx);
ret = drm_mode_object_get(dev, &crtc->base, DRM_MODE_OBJECT_CRTC);
if (ret)
@ -726,8 +744,8 @@ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
crtc->base.properties = &crtc->properties;
list_add_tail(&crtc->head, &dev->mode_config.crtc_list);
dev->mode_config.num_crtc++;
list_add_tail(&crtc->head, &config->crtc_list);
config->num_crtc++;
crtc->primary = primary;
if (primary)
@ -755,6 +773,8 @@ void drm_crtc_cleanup(struct drm_crtc *crtc)
kfree(crtc->gamma_store);
crtc->gamma_store = NULL;
drm_modeset_lock_fini(&crtc->mutex);
drm_mode_object_put(dev, &crtc->base);
list_del(&crtc->head);
dev->mode_config.num_crtc--;
@ -824,7 +844,7 @@ int drm_connector_init(struct drm_device *dev,
ret = drm_mode_object_get(dev, &connector->base, DRM_MODE_OBJECT_CONNECTOR);
if (ret)
goto out;
goto out_unlock;
connector->base.properties = &connector->properties;
connector->dev = dev;
@ -834,9 +854,17 @@ int drm_connector_init(struct drm_device *dev,
ida_simple_get(connector_ida, 1, 0, GFP_KERNEL);
if (connector->connector_type_id < 0) {
ret = connector->connector_type_id;
drm_mode_object_put(dev, &connector->base);
goto out;
goto out_put;
}
connector->name =
kasprintf(GFP_KERNEL, "%s-%d",
drm_connector_enum_list[connector_type].name,
connector->connector_type_id);
if (!connector->name) {
ret = -ENOMEM;
goto out_put;
}
INIT_LIST_HEAD(&connector->probed_modes);
INIT_LIST_HEAD(&connector->modes);
connector->edid_blob_ptr = NULL;
@ -853,7 +881,11 @@ int drm_connector_init(struct drm_device *dev,
drm_object_attach_property(&connector->base,
dev->mode_config.dpms_property, 0);
out:
out_put:
if (ret)
drm_mode_object_put(dev, &connector->base);
out_unlock:
drm_modeset_unlock_all(dev);
return ret;
@ -881,6 +913,8 @@ void drm_connector_cleanup(struct drm_connector *connector)
connector->connector_type_id);
drm_mode_object_put(dev, &connector->base);
kfree(connector->name);
connector->name = NULL;
list_del(&connector->head);
dev->mode_config.num_connector--;
}
@ -982,16 +1016,27 @@ int drm_encoder_init(struct drm_device *dev,
ret = drm_mode_object_get(dev, &encoder->base, DRM_MODE_OBJECT_ENCODER);
if (ret)
goto out;
goto out_unlock;
encoder->dev = dev;
encoder->encoder_type = encoder_type;
encoder->funcs = funcs;
encoder->name = kasprintf(GFP_KERNEL, "%s-%d",
drm_encoder_enum_list[encoder_type].name,
encoder->base.id);
if (!encoder->name) {
ret = -ENOMEM;
goto out_put;
}
list_add_tail(&encoder->head, &dev->mode_config.encoder_list);
dev->mode_config.num_encoder++;
out:
out_put:
if (ret)
drm_mode_object_put(dev, &encoder->base);
out_unlock:
drm_modeset_unlock_all(dev);
return ret;
@ -1009,6 +1054,8 @@ void drm_encoder_cleanup(struct drm_encoder *encoder)
struct drm_device *dev = encoder->dev;
drm_modeset_lock_all(dev);
drm_mode_object_put(dev, &encoder->base);
kfree(encoder->name);
encoder->name = NULL;
list_del(&encoder->head);
dev->mode_config.num_encoder--;
drm_modeset_unlock_all(dev);
@ -1145,16 +1192,19 @@ EXPORT_SYMBOL(drm_plane_cleanup);
*/
void drm_plane_force_disable(struct drm_plane *plane)
{
struct drm_framebuffer *old_fb = plane->fb;
int ret;
if (!plane->fb)
if (!old_fb)
return;
ret = plane->funcs->disable_plane(plane);
if (ret)
if (ret) {
DRM_ERROR("failed to disable plane with busy fb\n");
return;
}
/* disconnect the plane from the fb and crtc: */
__drm_framebuffer_unreference(plane->fb);
__drm_framebuffer_unreference(old_fb);
plane->fb = NULL;
plane->crtc = NULL;
}
@ -1378,6 +1428,12 @@ static int drm_mode_group_init(struct drm_device *dev, struct drm_mode_group *gr
return 0;
}
void drm_mode_group_destroy(struct drm_mode_group *group)
{
kfree(group->id_list);
group->id_list = NULL;
}
/*
* NOTE: Driver's shouldn't ever call drm_mode_group_init_legacy_group - it is
* the drm core's responsibility to set up mode control groups.
@ -1614,7 +1670,7 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
&dev->mode_config.encoder_list,
head) {
DRM_DEBUG_KMS("[ENCODER:%d:%s]\n", encoder->base.id,
drm_get_encoder_name(encoder));
encoder->name);
if (put_user(encoder->base.id, encoder_id +
copied)) {
ret = -EFAULT;
@ -1646,7 +1702,7 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
head) {
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.id,
drm_get_connector_name(connector));
connector->name);
if (put_user(connector->base.id,
connector_id + copied)) {
ret = -EFAULT;
@ -1695,7 +1751,6 @@ int drm_mode_getcrtc(struct drm_device *dev,
{
struct drm_mode_crtc *crtc_resp = data;
struct drm_crtc *crtc;
struct drm_mode_object *obj;
int ret = 0;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
@ -1703,13 +1758,11 @@ int drm_mode_getcrtc(struct drm_device *dev,
drm_modeset_lock_all(dev);
obj = drm_mode_object_find(dev, crtc_resp->crtc_id,
DRM_MODE_OBJECT_CRTC);
if (!obj) {
crtc = drm_crtc_find(dev, crtc_resp->crtc_id);
if (!crtc) {
ret = -ENOENT;
goto out;
}
crtc = obj_to_crtc(obj);
crtc_resp->x = crtc->x;
crtc_resp->y = crtc->y;
@ -1763,7 +1816,6 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_mode_get_connector *out_resp = data;
struct drm_mode_object *obj;
struct drm_connector *connector;
struct drm_display_mode *mode;
int mode_count = 0;
@ -1787,13 +1839,11 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
mutex_lock(&dev->mode_config.mutex);
obj = drm_mode_object_find(dev, out_resp->connector_id,
DRM_MODE_OBJECT_CONNECTOR);
if (!obj) {
connector = drm_connector_find(dev, out_resp->connector_id);
if (!connector) {
ret = -ENOENT;
goto out;
}
connector = obj_to_connector(obj);
props_count = connector->properties.count;
@ -1821,10 +1871,12 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
out_resp->mm_height = connector->display_info.height_mm;
out_resp->subpixel = connector->display_info.subpixel_order;
out_resp->connection = connector->status;
drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
if (connector->encoder)
out_resp->encoder_id = connector->encoder->base.id;
else
out_resp->encoder_id = 0;
drm_modeset_unlock(&dev->mode_config.connection_mutex);
/*
* This ioctl is called twice, once to determine how much space is
@ -1908,7 +1960,6 @@ int drm_mode_getencoder(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_mode_get_encoder *enc_resp = data;
struct drm_mode_object *obj;
struct drm_encoder *encoder;
int ret = 0;
@ -1916,13 +1967,11 @@ int drm_mode_getencoder(struct drm_device *dev, void *data,
return -EINVAL;
drm_modeset_lock_all(dev);
obj = drm_mode_object_find(dev, enc_resp->encoder_id,
DRM_MODE_OBJECT_ENCODER);
if (!obj) {
encoder = drm_encoder_find(dev, enc_resp->encoder_id);
if (!encoder) {
ret = -ENOENT;
goto out;
}
encoder = obj_to_encoder(obj);
if (encoder->crtc)
enc_resp->crtc_id = encoder->crtc->base.id;
@ -2020,7 +2069,6 @@ int drm_mode_getplane(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_mode_get_plane *plane_resp = data;
struct drm_mode_object *obj;
struct drm_plane *plane;
uint32_t __user *format_ptr;
int ret = 0;
@ -2029,13 +2077,11 @@ int drm_mode_getplane(struct drm_device *dev, void *data,
return -EINVAL;
drm_modeset_lock_all(dev);
obj = drm_mode_object_find(dev, plane_resp->plane_id,
DRM_MODE_OBJECT_PLANE);
if (!obj) {
plane = drm_plane_find(dev, plane_resp->plane_id);
if (!plane) {
ret = -ENOENT;
goto out;
}
plane = obj_to_plane(obj);
if (plane->crtc)
plane_resp->crtc_id = plane->crtc->base.id;
@ -2088,7 +2134,6 @@ int drm_mode_setplane(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_mode_set_plane *plane_req = data;
struct drm_mode_object *obj;
struct drm_plane *plane;
struct drm_crtc *crtc;
struct drm_framebuffer *fb = NULL, *old_fb = NULL;
@ -2103,35 +2148,42 @@ int drm_mode_setplane(struct drm_device *dev, void *data,
* First, find the plane, crtc, and fb objects. If not available,
* we don't bother to call the driver.
*/
obj = drm_mode_object_find(dev, plane_req->plane_id,
DRM_MODE_OBJECT_PLANE);
if (!obj) {
plane = drm_plane_find(dev, plane_req->plane_id);
if (!plane) {
DRM_DEBUG_KMS("Unknown plane ID %d\n",
plane_req->plane_id);
return -ENOENT;
}
plane = obj_to_plane(obj);
/* No fb means shut it down */
if (!plane_req->fb_id) {
drm_modeset_lock_all(dev);
old_fb = plane->fb;
plane->funcs->disable_plane(plane);
plane->crtc = NULL;
plane->fb = NULL;
ret = plane->funcs->disable_plane(plane);
if (!ret) {
plane->crtc = NULL;
plane->fb = NULL;
} else {
old_fb = NULL;
}
drm_modeset_unlock_all(dev);
goto out;
}
obj = drm_mode_object_find(dev, plane_req->crtc_id,
DRM_MODE_OBJECT_CRTC);
if (!obj) {
crtc = drm_crtc_find(dev, plane_req->crtc_id);
if (!crtc) {
DRM_DEBUG_KMS("Unknown crtc ID %d\n",
plane_req->crtc_id);
ret = -ENOENT;
goto out;
}
crtc = obj_to_crtc(obj);
/* Check whether this plane is usable on this CRTC */
if (!(plane->possible_crtcs & drm_crtc_mask(crtc))) {
DRM_DEBUG_KMS("Invalid crtc for plane\n");
ret = -EINVAL;
goto out;
}
fb = drm_framebuffer_lookup(dev, plane_req->fb_id);
if (!fb) {
@ -2187,16 +2239,18 @@ int drm_mode_setplane(struct drm_device *dev, void *data,
}
drm_modeset_lock_all(dev);
old_fb = plane->fb;
ret = plane->funcs->update_plane(plane, crtc, fb,
plane_req->crtc_x, plane_req->crtc_y,
plane_req->crtc_w, plane_req->crtc_h,
plane_req->src_x, plane_req->src_y,
plane_req->src_w, plane_req->src_h);
if (!ret) {
old_fb = plane->fb;
plane->crtc = crtc;
plane->fb = fb;
fb = NULL;
} else {
old_fb = NULL;
}
drm_modeset_unlock_all(dev);
@ -2239,9 +2293,7 @@ int drm_mode_set_config_internal(struct drm_mode_set *set)
ret = crtc->funcs->set_config(set);
if (ret == 0) {
crtc->primary->crtc = crtc;
/* crtc->fb must be updated by ->set_config, enforces this. */
WARN_ON(fb != crtc->primary->fb);
crtc->primary->fb = fb;
}
list_for_each_entry(tmp, &crtc->dev->mode_config.crtc_list, head) {
@ -2318,7 +2370,6 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
{
struct drm_mode_config *config = &dev->mode_config;
struct drm_mode_crtc *crtc_req = data;
struct drm_mode_object *obj;
struct drm_crtc *crtc;
struct drm_connector **connector_set = NULL, *connector;
struct drm_framebuffer *fb = NULL;
@ -2336,14 +2387,12 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
return -ERANGE;
drm_modeset_lock_all(dev);
obj = drm_mode_object_find(dev, crtc_req->crtc_id,
DRM_MODE_OBJECT_CRTC);
if (!obj) {
crtc = drm_crtc_find(dev, crtc_req->crtc_id);
if (!crtc) {
DRM_DEBUG_KMS("Unknown CRTC ID %d\n", crtc_req->crtc_id);
ret = -ENOENT;
goto out;
}
crtc = obj_to_crtc(obj);
DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
if (crtc_req->mode_valid) {
@ -2426,18 +2475,16 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
goto out;
}
obj = drm_mode_object_find(dev, out_id,
DRM_MODE_OBJECT_CONNECTOR);
if (!obj) {
connector = drm_connector_find(dev, out_id);
if (!connector) {
DRM_DEBUG_KMS("Connector id %d unknown\n",
out_id);
ret = -ENOENT;
goto out;
}
connector = obj_to_connector(obj);
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.id,
drm_get_connector_name(connector));
connector->name);
connector_set[i] = connector;
}
@ -2466,7 +2513,6 @@ static int drm_mode_cursor_common(struct drm_device *dev,
struct drm_mode_cursor2 *req,
struct drm_file *file_priv)
{
struct drm_mode_object *obj;
struct drm_crtc *crtc;
int ret = 0;
@ -2476,14 +2522,13 @@ static int drm_mode_cursor_common(struct drm_device *dev,
if (!req->flags || (~DRM_MODE_CURSOR_FLAGS & req->flags))
return -EINVAL;
obj = drm_mode_object_find(dev, req->crtc_id, DRM_MODE_OBJECT_CRTC);
if (!obj) {
crtc = drm_crtc_find(dev, req->crtc_id);
if (!crtc) {
DRM_DEBUG_KMS("Unknown CRTC ID %d\n", req->crtc_id);
return -ENOENT;
}
crtc = obj_to_crtc(obj);
mutex_lock(&crtc->mutex);
drm_modeset_lock(&crtc->mutex, NULL);
if (req->flags & DRM_MODE_CURSOR_BO) {
if (!crtc->funcs->cursor_set && !crtc->funcs->cursor_set2) {
ret = -ENXIO;
@ -2507,7 +2552,7 @@ static int drm_mode_cursor_common(struct drm_device *dev,
}
}
out:
mutex_unlock(&crtc->mutex);
drm_modeset_unlock(&crtc->mutex);
return ret;
@ -3097,6 +3142,8 @@ struct drm_property *drm_property_create(struct drm_device *dev, int flags,
if (!property)
return NULL;
property->dev = dev;
if (num_values) {
property->values = kzalloc(sizeof(uint64_t)*num_values, GFP_KERNEL);
if (!property->values)
@ -3117,6 +3164,9 @@ struct drm_property *drm_property_create(struct drm_device *dev, int flags,
}
list_add_tail(&property->head, &dev->mode_config.property_list);
WARN_ON(!drm_property_type_valid(property));
return property;
fail:
kfree(property->values);
@ -3217,6 +3267,22 @@ struct drm_property *drm_property_create_bitmask(struct drm_device *dev,
}
EXPORT_SYMBOL(drm_property_create_bitmask);
static struct drm_property *property_create_range(struct drm_device *dev,
int flags, const char *name,
uint64_t min, uint64_t max)
{
struct drm_property *property;
property = drm_property_create(dev, flags, name, 2);
if (!property)
return NULL;
property->values[0] = min;
property->values[1] = max;
return property;
}
/**
* drm_property_create - create a new ranged property type
* @dev: drm device
@ -3238,21 +3304,37 @@ EXPORT_SYMBOL(drm_property_create_bitmask);
struct drm_property *drm_property_create_range(struct drm_device *dev, int flags,
const char *name,
uint64_t min, uint64_t max)
{
return property_create_range(dev, DRM_MODE_PROP_RANGE | flags,
name, min, max);
}
EXPORT_SYMBOL(drm_property_create_range);
struct drm_property *drm_property_create_signed_range(struct drm_device *dev,
int flags, const char *name,
int64_t min, int64_t max)
{
return property_create_range(dev, DRM_MODE_PROP_SIGNED_RANGE | flags,
name, I642U64(min), I642U64(max));
}
EXPORT_SYMBOL(drm_property_create_signed_range);
struct drm_property *drm_property_create_object(struct drm_device *dev,
int flags, const char *name, uint32_t type)
{
struct drm_property *property;
flags |= DRM_MODE_PROP_RANGE;
flags |= DRM_MODE_PROP_OBJECT;
property = drm_property_create(dev, flags, name, 2);
property = drm_property_create(dev, flags, name, 1);
if (!property)
return NULL;
property->values[0] = min;
property->values[1] = max;
property->values[0] = type;
return property;
}
EXPORT_SYMBOL(drm_property_create_range);
EXPORT_SYMBOL(drm_property_create_object);
/**
* drm_property_add_enum - add a possible value to an enumeration property
@ -3274,14 +3356,16 @@ int drm_property_add_enum(struct drm_property *property, int index,
{
struct drm_property_enum *prop_enum;
if (!(property->flags & (DRM_MODE_PROP_ENUM | DRM_MODE_PROP_BITMASK)))
if (!(drm_property_type_is(property, DRM_MODE_PROP_ENUM) ||
drm_property_type_is(property, DRM_MODE_PROP_BITMASK)))
return -EINVAL;
/*
* Bitmask enum properties have the additional constraint of values
* from 0 to 63
*/
if ((property->flags & DRM_MODE_PROP_BITMASK) && (value > 63))
if (drm_property_type_is(property, DRM_MODE_PROP_BITMASK) &&
(value > 63))
return -EINVAL;
if (!list_empty(&property->enum_blob_list)) {
@ -3438,7 +3522,6 @@ EXPORT_SYMBOL(drm_object_property_get_value);
int drm_mode_getproperty_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
struct drm_mode_object *obj;
struct drm_mode_get_property *out_resp = data;
struct drm_property *property;
int enum_count = 0;
@ -3457,17 +3540,17 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
return -EINVAL;
drm_modeset_lock_all(dev);
obj = drm_mode_object_find(dev, out_resp->prop_id, DRM_MODE_OBJECT_PROPERTY);
if (!obj) {
property = drm_property_find(dev, out_resp->prop_id);
if (!property) {
ret = -ENOENT;
goto done;
}
property = obj_to_property(obj);
if (property->flags & (DRM_MODE_PROP_ENUM | DRM_MODE_PROP_BITMASK)) {
if (drm_property_type_is(property, DRM_MODE_PROP_ENUM) ||
drm_property_type_is(property, DRM_MODE_PROP_BITMASK)) {
list_for_each_entry(prop_enum, &property->enum_blob_list, head)
enum_count++;
} else if (property->flags & DRM_MODE_PROP_BLOB) {
} else if (drm_property_type_is(property, DRM_MODE_PROP_BLOB)) {
list_for_each_entry(prop_blob, &property->enum_blob_list, head)
blob_count++;
}
@ -3489,7 +3572,8 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
}
out_resp->count_values = value_count;
if (property->flags & (DRM_MODE_PROP_ENUM | DRM_MODE_PROP_BITMASK)) {
if (drm_property_type_is(property, DRM_MODE_PROP_ENUM) ||
drm_property_type_is(property, DRM_MODE_PROP_BITMASK)) {
if ((out_resp->count_enum_blobs >= enum_count) && enum_count) {
copied = 0;
enum_ptr = (struct drm_mode_property_enum __user *)(unsigned long)out_resp->enum_blob_ptr;
@ -3511,7 +3595,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
out_resp->count_enum_blobs = enum_count;
}
if (property->flags & DRM_MODE_PROP_BLOB) {
if (drm_property_type_is(property, DRM_MODE_PROP_BLOB)) {
if ((out_resp->count_enum_blobs >= blob_count) && blob_count) {
copied = 0;
blob_id_ptr = (uint32_t __user *)(unsigned long)out_resp->enum_blob_ptr;
@ -3590,7 +3674,6 @@ static void drm_property_destroy_blob(struct drm_device *dev,
int drm_mode_getblob_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
struct drm_mode_object *obj;
struct drm_mode_get_blob *out_resp = data;
struct drm_property_blob *blob;
int ret = 0;
@ -3600,12 +3683,11 @@ int drm_mode_getblob_ioctl(struct drm_device *dev,
return -EINVAL;
drm_modeset_lock_all(dev);
obj = drm_mode_object_find(dev, out_resp->blob_id, DRM_MODE_OBJECT_BLOB);
if (!obj) {
blob = drm_property_blob_find(dev, out_resp->blob_id);
if (!blob) {
ret = -ENOENT;
goto done;
}
blob = obj_to_blob(obj);
if (out_resp->length == blob->length) {
blob_ptr = (void __user *)(unsigned long)out_resp->data;
@ -3667,19 +3749,40 @@ static bool drm_property_change_is_valid(struct drm_property *property,
{
if (property->flags & DRM_MODE_PROP_IMMUTABLE)
return false;
if (property->flags & DRM_MODE_PROP_RANGE) {
if (drm_property_type_is(property, DRM_MODE_PROP_RANGE)) {
if (value < property->values[0] || value > property->values[1])
return false;
return true;
} else if (property->flags & DRM_MODE_PROP_BITMASK) {
} else if (drm_property_type_is(property, DRM_MODE_PROP_SIGNED_RANGE)) {
int64_t svalue = U642I64(value);
if (svalue < U642I64(property->values[0]) ||
svalue > U642I64(property->values[1]))
return false;
return true;
} else if (drm_property_type_is(property, DRM_MODE_PROP_BITMASK)) {
int i;
uint64_t valid_mask = 0;
for (i = 0; i < property->num_values; i++)
valid_mask |= (1ULL << property->values[i]);
return !(value & ~valid_mask);
} else if (property->flags & DRM_MODE_PROP_BLOB) {
} else if (drm_property_type_is(property, DRM_MODE_PROP_BLOB)) {
/* Only the driver knows */
return true;
} else if (drm_property_type_is(property, DRM_MODE_PROP_OBJECT)) {
struct drm_mode_object *obj;
/* a zero value for an object property translates to null: */
if (value == 0)
return true;
/*
* NOTE: use _object_find() directly to bypass restriction on
* looking up refcnt'd objects (ie. fb's). For a refcnt'd
* object this could race against object finalization, so it
* simply tells us that the object *was* valid. Which is good
* enough.
*/
obj = _object_find(property->dev, value, property->values[0]);
return obj != NULL;
} else {
int i;
for (i = 0; i < property->num_values; i++)
@ -3987,7 +4090,6 @@ int drm_mode_gamma_set_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
struct drm_mode_crtc_lut *crtc_lut = data;
struct drm_mode_object *obj;
struct drm_crtc *crtc;
void *r_base, *g_base, *b_base;
int size;
@ -3997,12 +4099,11 @@ int drm_mode_gamma_set_ioctl(struct drm_device *dev,
return -EINVAL;
drm_modeset_lock_all(dev);
obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC);
if (!obj) {
crtc = drm_crtc_find(dev, crtc_lut->crtc_id);
if (!crtc) {
ret = -ENOENT;
goto out;
}
crtc = obj_to_crtc(obj);
if (crtc->funcs->gamma_set == NULL) {
ret = -ENOSYS;
@ -4061,7 +4162,6 @@ int drm_mode_gamma_get_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
struct drm_mode_crtc_lut *crtc_lut = data;
struct drm_mode_object *obj;
struct drm_crtc *crtc;
void *r_base, *g_base, *b_base;
int size;
@ -4071,12 +4171,11 @@ int drm_mode_gamma_get_ioctl(struct drm_device *dev,
return -EINVAL;
drm_modeset_lock_all(dev);
obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC);
if (!obj) {
crtc = drm_crtc_find(dev, crtc_lut->crtc_id);
if (!crtc) {
ret = -ENOENT;
goto out;
}
crtc = obj_to_crtc(obj);
/* memcpy into gamma store */
if (crtc_lut->gamma_size != crtc->gamma_size) {
@ -4129,7 +4228,6 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
struct drm_mode_crtc_page_flip *page_flip = data;
struct drm_mode_object *obj;
struct drm_crtc *crtc;
struct drm_framebuffer *fb = NULL, *old_fb = NULL;
struct drm_pending_vblank_event *e = NULL;
@ -4143,12 +4241,11 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
if ((page_flip->flags & DRM_MODE_PAGE_FLIP_ASYNC) && !dev->mode_config.async_page_flip)
return -EINVAL;
obj = drm_mode_object_find(dev, page_flip->crtc_id, DRM_MODE_OBJECT_CRTC);
if (!obj)
crtc = drm_crtc_find(dev, page_flip->crtc_id);
if (!crtc)
return -ENOENT;
crtc = obj_to_crtc(obj);
mutex_lock(&crtc->mutex);
drm_modeset_lock(&crtc->mutex, NULL);
if (crtc->primary->fb == NULL) {
/* The framebuffer is currently unbound, presumably
* due to a hotplug event, that userspace has not
@ -4232,7 +4329,7 @@ out:
drm_framebuffer_unreference(fb);
if (old_fb)
drm_framebuffer_unreference(old_fb);
mutex_unlock(&crtc->mutex);
drm_modeset_unlock(&crtc->mutex);
return ret;
}
@ -4597,6 +4694,7 @@ EXPORT_SYMBOL(drm_format_vert_chroma_subsampling);
void drm_mode_config_init(struct drm_device *dev)
{
mutex_init(&dev->mode_config.mutex);
drm_modeset_lock_init(&dev->mode_config.connection_mutex);
mutex_init(&dev->mode_config.idr_mutex);
mutex_init(&dev->mode_config.fb_lock);
INIT_LIST_HEAD(&dev->mode_config.fb_list);
@ -4696,5 +4794,6 @@ void drm_mode_config_cleanup(struct drm_device *dev)
}
idr_destroy(&dev->mode_config.crtc_idr);
drm_modeset_lock_fini(&dev->mode_config.connection_mutex);
}
EXPORT_SYMBOL(drm_mode_config_cleanup);

View File

@ -93,8 +93,10 @@ bool drm_helper_encoder_in_use(struct drm_encoder *encoder)
* We can expect this mutex to be locked if we are not panicking.
* Locking is currently fubar in the panic handler.
*/
if (!oops_in_progress)
if (!oops_in_progress) {
WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
}
list_for_each_entry(connector, &dev->mode_config.connector_list, head)
if (connector->encoder == encoder)
@ -153,20 +155,14 @@ drm_encoder_disable(struct drm_encoder *encoder)
static void __drm_helper_disable_unused_functions(struct drm_device *dev)
{
struct drm_encoder *encoder;
struct drm_connector *connector;
struct drm_crtc *crtc;
drm_warn_on_modeset_not_all_locked(dev);
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
if (!connector->encoder)
continue;
}
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
if (!drm_helper_encoder_in_use(encoder)) {
drm_encoder_disable(encoder);
/* disconnector encoder from any connector */
/* disconnect encoder from any connector */
encoder->crtc = NULL;
}
}
@ -349,7 +345,7 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
continue;
DRM_DEBUG_KMS("[ENCODER:%d:%s] set [MODE:%d:%s]\n",
encoder->base.id, drm_get_encoder_name(encoder),
encoder->base.id, encoder->name,
mode->base.id, mode->name);
encoder_funcs = encoder->helper_private;
encoder_funcs->mode_set(encoder, mode, adjusted_mode);
@ -400,8 +396,7 @@ done:
}
EXPORT_SYMBOL(drm_crtc_helper_set_mode);
static int
static void
drm_crtc_helper_disable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
@ -430,7 +425,6 @@ drm_crtc_helper_disable(struct drm_crtc *crtc)
}
__drm_helper_disable_unused_functions(dev);
return 0;
}
/**
@ -481,7 +475,8 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
(int)set->num_connectors, set->x, set->y);
} else {
DRM_DEBUG_KMS("[CRTC:%d] [NOFB]\n", set->crtc->base.id);
return drm_crtc_helper_disable(set->crtc);
drm_crtc_helper_disable(set->crtc);
return 0;
}
dev = set->crtc->dev;
@ -620,11 +615,11 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
}
if (new_crtc) {
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d]\n",
connector->base.id, drm_get_connector_name(connector),
connector->base.id, connector->name,
new_crtc->base.id);
} else {
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [NOCRTC]\n",
connector->base.id, drm_get_connector_name(connector));
connector->base.id, connector->name);
}
}
@ -650,7 +645,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
DRM_DEBUG_KMS("Setting connector DPMS state to on\n");
for (i = 0; i < set->num_connectors; i++) {
DRM_DEBUG_KMS("\t[CONNECTOR:%d:%s] set DPMS on\n", set->connectors[i]->base.id,
drm_get_connector_name(set->connectors[i]));
set->connectors[i]->name);
set->connectors[i]->funcs->dpms(set->connectors[i], DRM_MODE_DPMS_ON);
}
}

View File

@ -206,13 +206,17 @@ i2c_dp_aux_prepare_bus(struct i2c_adapter *adapter)
* i2c_dp_aux_add_bus() - register an i2c adapter using the aux ch helper
* @adapter: i2c adapter to register
*
* This registers an i2c adapater that uses dp aux channel as it's underlaying
* This registers an i2c adapter that uses dp aux channel as it's underlaying
* transport. The driver needs to fill out the &i2c_algo_dp_aux_data structure
* and store it in the algo_data member of the @adapter argument. This will be
* used by the i2c over dp aux algorithm to drive the hardware.
*
* RETURNS:
* 0 on success, -ERRNO on failure.
*
* IMPORTANT:
* This interface is deprecated, please switch to the new dp aux helpers and
* drm_dp_aux_register().
*/
int
i2c_dp_aux_add_bus(struct i2c_adapter *adapter)
@ -378,7 +382,10 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request,
* transactions.
*/
for (retry = 0; retry < 7; retry++) {
mutex_lock(&aux->hw_mutex);
err = aux->transfer(aux, &msg);
mutex_unlock(&aux->hw_mutex);
if (err < 0) {
if (err == -EBUSY)
continue;
@ -592,7 +599,9 @@ static int drm_dp_i2c_do_msg(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
* before giving up the AUX transaction.
*/
for (retry = 0; retry < 7; retry++) {
mutex_lock(&aux->hw_mutex);
err = aux->transfer(aux, msg);
mutex_unlock(&aux->hw_mutex);
if (err < 0) {
if (err == -EBUSY)
continue;
@ -725,13 +734,15 @@ static const struct i2c_algorithm drm_dp_i2c_algo = {
};
/**
* drm_dp_aux_register_i2c_bus() - register an I2C adapter for I2C-over-AUX
* drm_dp_aux_register() - initialise and register aux channel
* @aux: DisplayPort AUX channel
*
* Returns 0 on success or a negative error code on failure.
*/
int drm_dp_aux_register_i2c_bus(struct drm_dp_aux *aux)
int drm_dp_aux_register(struct drm_dp_aux *aux)
{
mutex_init(&aux->hw_mutex);
aux->ddc.algo = &drm_dp_i2c_algo;
aux->ddc.algo_data = aux;
aux->ddc.retries = 3;
@ -746,14 +757,14 @@ int drm_dp_aux_register_i2c_bus(struct drm_dp_aux *aux)
return i2c_add_adapter(&aux->ddc);
}
EXPORT_SYMBOL(drm_dp_aux_register_i2c_bus);
EXPORT_SYMBOL(drm_dp_aux_register);
/**
* drm_dp_aux_unregister_i2c_bus() - unregister an I2C-over-AUX adapter
* drm_dp_aux_unregister() - unregister an AUX adapter
* @aux: DisplayPort AUX channel
*/
void drm_dp_aux_unregister_i2c_bus(struct drm_dp_aux *aux)
void drm_dp_aux_unregister(struct drm_dp_aux *aux)
{
i2c_del_adapter(&aux->ddc);
}
EXPORT_SYMBOL(drm_dp_aux_unregister_i2c_bus);
EXPORT_SYMBOL(drm_dp_aux_unregister);

View File

@ -70,6 +70,8 @@
#define EDID_QUIRK_FORCE_REDUCED_BLANKING (1 << 7)
/* Force 8bpc */
#define EDID_QUIRK_FORCE_8BPC (1 << 8)
/* Force 12bpc */
#define EDID_QUIRK_FORCE_12BPC (1 << 9)
struct detailed_mode_closure {
struct drm_connector *connector;
@ -125,6 +127,9 @@ static struct edid_quirk {
{ "SAM", 596, EDID_QUIRK_PREFER_LARGE_60 },
{ "SAM", 638, EDID_QUIRK_PREFER_LARGE_60 },
/* Sony PVM-2541A does up to 12 bpc, but only reports max 8 bpc */
{ "SNY", 0x2541, EDID_QUIRK_FORCE_12BPC },
/* ViewSonic VA2026w */
{ "VSC", 5020, EDID_QUIRK_FORCE_REDUCED_BLANKING },
@ -984,9 +989,13 @@ static const u8 edid_header[] = {
0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00
};
/*
* Sanity check the header of the base EDID block. Return 8 if the header
* is perfect, down to 0 if it's totally wrong.
/**
* drm_edid_header_is_valid - sanity check the header of the base EDID block
* @raw_edid: pointer to raw base EDID block
*
* Sanity check the header of the base EDID block.
*
* Return: 8 if the header is perfect, down to 0 if it's totally wrong.
*/
int drm_edid_header_is_valid(const u8 *raw_edid)
{
@ -1005,9 +1014,16 @@ module_param_named(edid_fixup, edid_fixup, int, 0400);
MODULE_PARM_DESC(edid_fixup,
"Minimum number of valid EDID header bytes (0-8, default 6)");
/*
* Sanity check the EDID block (base or extension). Return 0 if the block
* doesn't check out, or 1 if it's valid.
/**
* drm_edid_block_valid - Sanity check the EDID block (base or extension)
* @raw_edid: pointer to raw EDID block
* @block: type of block to validate (0 for base, extension otherwise)
* @print_bad_edid: if true, dump bad EDID blocks to the console
*
* Validate a base or extension EDID block and optionally dump bad blocks to
* the console.
*
* Return: True if the block is valid, false otherwise.
*/
bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid)
{
@ -1077,6 +1093,8 @@ EXPORT_SYMBOL(drm_edid_block_valid);
* @edid: EDID data
*
* Sanity-check an entire EDID record (including extensions)
*
* Return: True if the EDID data is valid, false otherwise.
*/
bool drm_edid_is_valid(struct edid *edid)
{
@ -1096,18 +1114,15 @@ EXPORT_SYMBOL(drm_edid_is_valid);
#define DDC_SEGMENT_ADDR 0x30
/**
* Get EDID information via I2C.
*
* @adapter : i2c device adaptor
* drm_do_probe_ddc_edid() - get EDID information via I2C
* @adapter: I2C device adaptor
* @buf: EDID data buffer to be filled
* @block: 128 byte EDID block to start fetching from
* @len: EDID data buffer length to fetch
*
* Returns:
* Try to fetch EDID information by calling I2C driver functions.
*
* 0 on success or -1 on failure.
*
* Try to fetch EDID information by calling i2c driver function.
* Return: 0 on success or -1 on failure.
*/
static int
drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf,
@ -1118,7 +1133,8 @@ drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf,
unsigned char xfers = segment ? 3 : 2;
int ret, retries = 5;
/* The core i2c driver will automatically retry the transfer if the
/*
* The core I2C driver will automatically retry the transfer if the
* adapter reports EAGAIN. However, we find that bit-banging transfers
* are susceptible to errors under a heavily loaded machine and
* generate spurious NAKs and timeouts. Retrying the transfer
@ -1144,10 +1160,10 @@ drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf,
}
};
/*
* Avoid sending the segment addr to not upset non-compliant ddc
* monitors.
*/
/*
* Avoid sending the segment addr to not upset non-compliant
* DDC monitors.
*/
ret = i2c_transfer(adapter, &msgs[3 - xfers], xfers);
if (ret == -ENXIO) {
@ -1216,7 +1232,7 @@ drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
if (i == 4 && print_bad_edid) {
dev_warn(connector->dev->dev,
"%s: Ignoring invalid EDID block %d.\n",
drm_get_connector_name(connector), j);
connector->name, j);
connector->bad_edid_counter++;
}
@ -1236,7 +1252,7 @@ drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
carp:
if (print_bad_edid) {
dev_warn(connector->dev->dev, "%s: EDID block %d invalid.\n",
drm_get_connector_name(connector), j);
connector->name, j);
}
connector->bad_edid_counter++;
@ -1246,12 +1262,10 @@ out:
}
/**
* Probe DDC presence.
* @adapter: i2c adapter to probe
* drm_probe_ddc() - probe DDC presence
* @adapter: I2C adapter to probe
*
* Returns:
*
* 1 on success
* Return: True on success, false on failure.
*/
bool
drm_probe_ddc(struct i2c_adapter *adapter)
@ -1265,12 +1279,12 @@ EXPORT_SYMBOL(drm_probe_ddc);
/**
* drm_get_edid - get EDID data, if available
* @connector: connector we're probing
* @adapter: i2c adapter to use for DDC
* @adapter: I2C adapter to use for DDC
*
* Poke the given i2c channel to grab EDID data if possible. If found,
* Poke the given I2C channel to grab EDID data if possible. If found,
* attach it to the connector.
*
* Return edid data or NULL if we couldn't find any.
* Return: Pointer to valid EDID or NULL if we couldn't find any.
*/
struct edid *drm_get_edid(struct drm_connector *connector,
struct i2c_adapter *adapter)
@ -1288,7 +1302,7 @@ EXPORT_SYMBOL(drm_get_edid);
* drm_edid_duplicate - duplicate an EDID and the extensions
* @edid: EDID to duplicate
*
* Return duplicate edid or NULL on allocation failure.
* Return: Pointer to duplicated EDID or NULL on allocation failure.
*/
struct edid *drm_edid_duplicate(const struct edid *edid)
{
@ -1411,7 +1425,8 @@ mode_is_rb(const struct drm_display_mode *mode)
* @rb: Mode reduced-blanking-ness
*
* Walk the DMT mode list looking for a match for the given parameters.
* Return a newly allocated copy of the mode, or NULL if not found.
*
* Return: A newly allocated copy of the mode, or NULL if not found.
*/
struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev,
int hsize, int vsize, int fresh,
@ -1595,14 +1610,13 @@ bad_std_timing(u8 a, u8 b)
* @connector: connector of for the EDID block
* @edid: EDID block to scan
* @t: standard timing params
* @revision: standard timing level
*
* Take the standard timing params (in this case width, aspect, and refresh)
* and convert them into a real mode using CVT/GTF/DMT.
*/
static struct drm_display_mode *
drm_mode_std(struct drm_connector *connector, struct edid *edid,
struct std_timing *t, int revision)
struct std_timing *t)
{
struct drm_device *dev = connector->dev;
struct drm_display_mode *m, *mode = NULL;
@ -1623,7 +1637,7 @@ drm_mode_std(struct drm_connector *connector, struct edid *edid,
vrefresh_rate = vfreq + 60;
/* the vdisplay is calculated based on the aspect ratio */
if (aspect_ratio == 0) {
if (revision < 3)
if (edid->revision < 3)
vsize = hsize;
else
vsize = (hsize * 10) / 16;
@ -2140,7 +2154,7 @@ do_established_modes(struct detailed_timing *timing, void *c)
/**
* add_established_modes - get est. modes from EDID and add them
* @connector: connector of for the EDID block
* @connector: connector to add mode(s) to
* @edid: EDID block to scan
*
* Each EDID block contains a bitmap of the supported "established modes" list
@ -2191,8 +2205,7 @@ do_standard_modes(struct detailed_timing *timing, void *c)
struct drm_display_mode *newmode;
std = &data->data.timings[i];
newmode = drm_mode_std(connector, edid, std,
edid->revision);
newmode = drm_mode_std(connector, edid, std);
if (newmode) {
drm_mode_probed_add(connector, newmode);
closure->modes++;
@ -2203,7 +2216,7 @@ do_standard_modes(struct detailed_timing *timing, void *c)
/**
* add_standard_modes - get std. modes from EDID and add them
* @connector: connector of for the EDID block
* @connector: connector to add mode(s) to
* @edid: EDID block to scan
*
* Standard modes can be calculated using the appropriate standard (DMT,
@ -2221,8 +2234,7 @@ add_standard_modes(struct drm_connector *connector, struct edid *edid)
struct drm_display_mode *newmode;
newmode = drm_mode_std(connector, edid,
&edid->standard_timings[i],
edid->revision);
&edid->standard_timings[i]);
if (newmode) {
drm_mode_probed_add(connector, newmode);
modes++;
@ -2425,7 +2437,7 @@ cea_mode_alternate_clock(const struct drm_display_mode *cea_mode)
* drm_match_cea_mode - look for a CEA mode matching given mode
* @to_match: display mode
*
* Returns the CEA Video ID (VIC) of the mode or 0 if it isn't a CEA-861
* Return: The CEA Video ID (VIC) of the mode or 0 if it isn't a CEA-861
* mode.
*/
u8 drm_match_cea_mode(const struct drm_display_mode *to_match)
@ -2452,6 +2464,22 @@ u8 drm_match_cea_mode(const struct drm_display_mode *to_match)
}
EXPORT_SYMBOL(drm_match_cea_mode);
/**
* drm_get_cea_aspect_ratio - get the picture aspect ratio corresponding to
* the input VIC from the CEA mode list
* @video_code: ID given to each of the CEA modes
*
* Returns picture aspect ratio
*/
enum hdmi_picture_aspect drm_get_cea_aspect_ratio(const u8 video_code)
{
/* return picture aspect ratio for video_code - 1 to access the
* right array element
*/
return edid_cea_modes[video_code-1].picture_aspect_ratio;
}
EXPORT_SYMBOL(drm_get_cea_aspect_ratio);
/*
* Calculate the alternate clock for HDMI modes (those from the HDMI vendor
* specific block).
@ -3023,11 +3051,9 @@ monitor_name(struct detailed_timing *t, void *data)
* @connector: connector corresponding to the HDMI/DP sink
* @edid: EDID to parse
*
* Fill the ELD (EDID-Like Data) buffer for passing to the audio driver.
* Some ELD fields are left to the graphics driver caller:
* - Conn_Type
* - HDCP
* - Port_ID
* Fill the ELD (EDID-Like Data) buffer for passing to the audio driver. The
* Conn_Type, HDCP and Port_ID ELD fields are left for the graphics driver to
* fill in.
*/
void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid)
{
@ -3111,9 +3137,10 @@ EXPORT_SYMBOL(drm_edid_to_eld);
* @sads: pointer that will be set to the extracted SADs
*
* Looks for CEA EDID block and extracts SADs (Short Audio Descriptors) from it.
* Note: returned pointer needs to be kfreed
*
* Return number of found SADs or negative number on error.
* Note: The returned pointer needs to be freed using kfree().
*
* Return: The number of found SADs or negative number on error.
*/
int drm_edid_to_sad(struct edid *edid, struct cea_sad **sads)
{
@ -3170,9 +3197,11 @@ EXPORT_SYMBOL(drm_edid_to_sad);
* @sadb: pointer to the speaker block
*
* Looks for CEA EDID block and extracts the Speaker Allocation Data Block from it.
* Note: returned pointer needs to be kfreed
*
* Return number of found Speaker Allocation Blocks or negative number on error.
* Note: The returned pointer needs to be freed using kfree().
*
* Return: The number of found Speaker Allocation Blocks or negative number on
* error.
*/
int drm_edid_to_speaker_allocation(struct edid *edid, u8 **sadb)
{
@ -3204,10 +3233,9 @@ int drm_edid_to_speaker_allocation(struct edid *edid, u8 **sadb)
/* Speaker Allocation Data Block */
if (dbl == 3) {
*sadb = kmalloc(dbl, GFP_KERNEL);
*sadb = kmemdup(&db[1], dbl, GFP_KERNEL);
if (!*sadb)
return -ENOMEM;
memcpy(*sadb, &db[1], dbl);
count = dbl;
break;
}
@ -3219,9 +3247,12 @@ int drm_edid_to_speaker_allocation(struct edid *edid, u8 **sadb)
EXPORT_SYMBOL(drm_edid_to_speaker_allocation);
/**
* drm_av_sync_delay - HDMI/DP sink audio-video sync delay in millisecond
* drm_av_sync_delay - compute the HDMI/DP sink audio-video sync delay
* @connector: connector associated with the HDMI/DP sink
* @mode: the display mode
*
* Return: The HDMI/DP sink's audio-video sync delay in milliseconds or 0 if
* the sink doesn't support audio or video.
*/
int drm_av_sync_delay(struct drm_connector *connector,
struct drm_display_mode *mode)
@ -3263,6 +3294,9 @@ EXPORT_SYMBOL(drm_av_sync_delay);
*
* It's possible for one encoder to be associated with multiple HDMI/DP sinks.
* The policy is now hard coded to simply use the first HDMI/DP sink's ELD.
*
* Return: The connector associated with the first HDMI/DP sink that has ELD
* attached to it.
*/
struct drm_connector *drm_select_eld(struct drm_encoder *encoder,
struct drm_display_mode *mode)
@ -3270,6 +3304,8 @@ struct drm_connector *drm_select_eld(struct drm_encoder *encoder,
struct drm_connector *connector;
struct drm_device *dev = encoder->dev;
WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
list_for_each_entry(connector, &dev->mode_config.connector_list, head)
if (connector->encoder == encoder && connector->eld[0])
return connector;
@ -3279,11 +3315,12 @@ struct drm_connector *drm_select_eld(struct drm_encoder *encoder,
EXPORT_SYMBOL(drm_select_eld);
/**
* drm_detect_hdmi_monitor - detect whether monitor is hdmi.
* drm_detect_hdmi_monitor - detect whether monitor is HDMI
* @edid: monitor EDID information
*
* Parse the CEA extension according to CEA-861-B.
* Return true if HDMI, false if not or unknown.
*
* Return: True if the monitor is HDMI, false if not or unknown.
*/
bool drm_detect_hdmi_monitor(struct edid *edid)
{
@ -3321,6 +3358,7 @@ EXPORT_SYMBOL(drm_detect_hdmi_monitor);
* audio format, assume at least 'basic audio' support, even if 'basic
* audio' is not defined in EDID.
*
* Return: True if the monitor supports audio, false otherwise.
*/
bool drm_detect_monitor_audio(struct edid *edid)
{
@ -3364,6 +3402,8 @@ EXPORT_SYMBOL(drm_detect_monitor_audio);
* Check whether the monitor reports the RGB quantization range selection
* as supported. The AVI infoframe can then be used to inform the monitor
* which quantization range (full or limited) is used.
*
* Return: True if the RGB quantization range is selectable, false otherwise.
*/
bool drm_rgb_quant_range_selectable(struct edid *edid)
{
@ -3389,17 +3429,120 @@ bool drm_rgb_quant_range_selectable(struct edid *edid)
}
EXPORT_SYMBOL(drm_rgb_quant_range_selectable);
/**
* drm_assign_hdmi_deep_color_info - detect whether monitor supports
* hdmi deep color modes and update drm_display_info if so.
*
* @edid: monitor EDID information
* @info: Updated with maximum supported deep color bpc and color format
* if deep color supported.
*
* Parse the CEA extension according to CEA-861-B.
* Return true if HDMI deep color supported, false if not or unknown.
*/
static bool drm_assign_hdmi_deep_color_info(struct edid *edid,
struct drm_display_info *info,
struct drm_connector *connector)
{
u8 *edid_ext, *hdmi;
int i;
int start_offset, end_offset;
unsigned int dc_bpc = 0;
edid_ext = drm_find_cea_extension(edid);
if (!edid_ext)
return false;
if (cea_db_offsets(edid_ext, &start_offset, &end_offset))
return false;
/*
* Because HDMI identifier is in Vendor Specific Block,
* search it from all data blocks of CEA extension.
*/
for_each_cea_db(edid_ext, i, start_offset, end_offset) {
if (cea_db_is_hdmi_vsdb(&edid_ext[i])) {
/* HDMI supports at least 8 bpc */
info->bpc = 8;
hdmi = &edid_ext[i];
if (cea_db_payload_len(hdmi) < 6)
return false;
if (hdmi[6] & DRM_EDID_HDMI_DC_30) {
dc_bpc = 10;
info->edid_hdmi_dc_modes |= DRM_EDID_HDMI_DC_30;
DRM_DEBUG("%s: HDMI sink does deep color 30.\n",
connector->name);
}
if (hdmi[6] & DRM_EDID_HDMI_DC_36) {
dc_bpc = 12;
info->edid_hdmi_dc_modes |= DRM_EDID_HDMI_DC_36;
DRM_DEBUG("%s: HDMI sink does deep color 36.\n",
connector->name);
}
if (hdmi[6] & DRM_EDID_HDMI_DC_48) {
dc_bpc = 16;
info->edid_hdmi_dc_modes |= DRM_EDID_HDMI_DC_48;
DRM_DEBUG("%s: HDMI sink does deep color 48.\n",
connector->name);
}
if (dc_bpc > 0) {
DRM_DEBUG("%s: Assigning HDMI sink color depth as %d bpc.\n",
connector->name, dc_bpc);
info->bpc = dc_bpc;
/*
* Deep color support mandates RGB444 support for all video
* modes and forbids YCRCB422 support for all video modes per
* HDMI 1.3 spec.
*/
info->color_formats = DRM_COLOR_FORMAT_RGB444;
/* YCRCB444 is optional according to spec. */
if (hdmi[6] & DRM_EDID_HDMI_DC_Y444) {
info->color_formats |= DRM_COLOR_FORMAT_YCRCB444;
DRM_DEBUG("%s: HDMI sink does YCRCB444 in deep color.\n",
connector->name);
}
/*
* Spec says that if any deep color mode is supported at all,
* then deep color 36 bit must be supported.
*/
if (!(hdmi[6] & DRM_EDID_HDMI_DC_36)) {
DRM_DEBUG("%s: HDMI sink should do DC_36, but does not!\n",
connector->name);
}
return true;
}
else {
DRM_DEBUG("%s: No deep color support on this HDMI sink.\n",
connector->name);
}
}
}
return false;
}
/**
* drm_add_display_info - pull display info out if present
* @edid: EDID data
* @info: display info (attached to connector)
* @connector: connector whose edid is used to build display info
*
* Grab any available display info and stuff it into the drm_display_info
* structure that's part of the connector. Useful for tracking bpp and
* color spaces.
*/
static void drm_add_display_info(struct edid *edid,
struct drm_display_info *info)
struct drm_display_info *info,
struct drm_connector *connector)
{
u8 *edid_ext;
@ -3429,6 +3572,9 @@ static void drm_add_display_info(struct edid *edid,
info->color_formats |= DRM_COLOR_FORMAT_YCRCB422;
}
/* HDMI deep color modes supported? Assign to info, if so */
drm_assign_hdmi_deep_color_info(edid, info, connector);
/* Only defined for 1.4 with digital displays */
if (edid->revision < 4)
return;
@ -3458,6 +3604,9 @@ static void drm_add_display_info(struct edid *edid,
break;
}
DRM_DEBUG("%s: Assigning EDID-1.4 digital sink color depth as %d bpc.\n",
connector->name, info->bpc);
info->color_formats |= DRM_COLOR_FORMAT_RGB444;
if (edid->features & DRM_EDID_FEATURE_RGB_YCRCB444)
info->color_formats |= DRM_COLOR_FORMAT_YCRCB444;
@ -3468,11 +3617,11 @@ static void drm_add_display_info(struct edid *edid,
/**
* drm_add_edid_modes - add modes from EDID data, if available
* @connector: connector we're probing
* @edid: edid data
* @edid: EDID data
*
* Add the specified modes to the connector's mode list.
*
* Return number of modes added or 0 if we couldn't find any.
* Return: The number of modes added or 0 if we couldn't find any.
*/
int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
{
@ -3484,7 +3633,7 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
}
if (!drm_edid_is_valid(edid)) {
dev_warn(connector->dev->dev, "%s: EDID invalid.\n",
drm_get_connector_name(connector));
connector->name);
return 0;
}
@ -3516,11 +3665,14 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
if (quirks & (EDID_QUIRK_PREFER_LARGE_60 | EDID_QUIRK_PREFER_LARGE_75))
edid_fixup_preferred(connector, quirks);
drm_add_display_info(edid, &connector->display_info);
drm_add_display_info(edid, &connector->display_info, connector);
if (quirks & EDID_QUIRK_FORCE_8BPC)
connector->display_info.bpc = 8;
if (quirks & EDID_QUIRK_FORCE_12BPC)
connector->display_info.bpc = 12;
return num_modes;
}
EXPORT_SYMBOL(drm_add_edid_modes);
@ -3534,7 +3686,7 @@ EXPORT_SYMBOL(drm_add_edid_modes);
* Add the specified modes to the connector's mode list. Only when the
* hdisplay/vdisplay is not beyond the given limit, it will be added.
*
* Return number of modes added or 0 if we couldn't find any.
* Return: The number of modes added or 0 if we couldn't find any.
*/
int drm_add_modes_noedid(struct drm_connector *connector,
int hdisplay, int vdisplay)
@ -3573,13 +3725,22 @@ int drm_add_modes_noedid(struct drm_connector *connector,
}
EXPORT_SYMBOL(drm_add_modes_noedid);
/**
* drm_set_preferred_mode - Sets the preferred mode of a connector
* @connector: connector whose mode list should be processed
* @hpref: horizontal resolution of preferred mode
* @vpref: vertical resolution of preferred mode
*
* Marks a mode as preferred if it matches the resolution specified by @hpref
* and @vpref.
*/
void drm_set_preferred_mode(struct drm_connector *connector,
int hpref, int vpref)
{
struct drm_display_mode *mode;
list_for_each_entry(mode, &connector->probed_modes, head) {
if (mode->hdisplay == hpref &&
if (mode->hdisplay == hpref &&
mode->vdisplay == vpref)
mode->type |= DRM_MODE_TYPE_PREFERRED;
}
@ -3592,7 +3753,7 @@ EXPORT_SYMBOL(drm_set_preferred_mode);
* @frame: HDMI AVI infoframe
* @mode: DRM display mode
*
* Returns 0 on success or a negative error code on failure.
* Return: 0 on success or a negative error code on failure.
*/
int
drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
@ -3613,6 +3774,12 @@ drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
frame->video_code = drm_match_cea_mode(mode);
frame->picture_aspect = HDMI_PICTURE_ASPECT_NONE;
/* Populate picture aspect ratio from CEA mode list */
if (frame->video_code > 0)
frame->picture_aspect = drm_get_cea_aspect_ratio(
frame->video_code);
frame->active_aspect = HDMI_ACTIVE_ASPECT_PICTURE;
frame->scan_mode = HDMI_SCAN_MODE_UNDERSCAN;
@ -3657,7 +3824,7 @@ s3d_structure_from_display_mode(const struct drm_display_mode *mode)
* 4k or stereoscopic 3D mode. So when giving any other mode as input this
* function will return -EINVAL, error that can be safely ignored.
*
* Returns 0 on success or a negative error code on failure.
* Return: 0 on success or a negative error code on failure.
*/
int
drm_hdmi_vendor_infoframe_from_display_mode(struct hdmi_vendor_infoframe *frame,

View File

@ -31,8 +31,9 @@ module_param_string(edid_firmware, edid_firmware, sizeof(edid_firmware), 0644);
MODULE_PARM_DESC(edid_firmware, "Do not probe monitor, use specified EDID blob "
"from built-in data or /lib/firmware instead. ");
#define GENERIC_EDIDS 5
#define GENERIC_EDIDS 6
static const char *generic_edid_name[GENERIC_EDIDS] = {
"edid/800x600.bin",
"edid/1024x768.bin",
"edid/1280x1024.bin",
"edid/1600x1200.bin",
@ -41,6 +42,24 @@ static const char *generic_edid_name[GENERIC_EDIDS] = {
};
static const u8 generic_edid[GENERIC_EDIDS][128] = {
{
0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
0x31, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x05, 0x16, 0x01, 0x03, 0x6d, 0x1b, 0x14, 0x78,
0xea, 0x5e, 0xc0, 0xa4, 0x59, 0x4a, 0x98, 0x25,
0x20, 0x50, 0x54, 0x01, 0x00, 0x00, 0x45, 0x40,
0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0xa0, 0x0f,
0x20, 0x00, 0x31, 0x58, 0x1c, 0x20, 0x28, 0x80,
0x14, 0x00, 0x15, 0xd0, 0x10, 0x00, 0x00, 0x1e,
0x00, 0x00, 0x00, 0xff, 0x00, 0x4c, 0x69, 0x6e,
0x75, 0x78, 0x20, 0x23, 0x30, 0x0a, 0x20, 0x20,
0x20, 0x20, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x3b,
0x3d, 0x24, 0x26, 0x05, 0x00, 0x0a, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0xfc,
0x00, 0x4c, 0x69, 0x6e, 0x75, 0x78, 0x20, 0x53,
0x56, 0x47, 0x41, 0x0a, 0x20, 0x20, 0x00, 0xc2,
},
{
0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
0x31, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
@ -242,7 +261,7 @@ out:
int drm_load_edid_firmware(struct drm_connector *connector)
{
const char *connector_name = drm_get_connector_name(connector);
const char *connector_name = connector->name;
char *edidname = edid_firmware, *last, *colon;
int ret;
struct edid *edid;

View File

@ -429,13 +429,8 @@ EXPORT_SYMBOL_GPL(drm_fbdev_cma_fini);
*/
void drm_fbdev_cma_restore_mode(struct drm_fbdev_cma *fbdev_cma)
{
if (fbdev_cma) {
struct drm_device *dev = fbdev_cma->fb_helper.dev;
drm_modeset_lock_all(dev);
drm_fb_helper_restore_fbdev_mode(&fbdev_cma->fb_helper);
drm_modeset_unlock_all(dev);
}
if (fbdev_cma)
drm_fb_helper_restore_fbdev_mode_unlocked(&fbdev_cma->fb_helper);
}
EXPORT_SYMBOL_GPL(drm_fbdev_cma_restore_mode);

View File

@ -45,13 +45,13 @@ static LIST_HEAD(kernel_fb_helper_list);
* DOC: fbdev helpers
*
* The fb helper functions are useful to provide an fbdev on top of a drm kernel
* mode setting driver. They can be used mostly independantely from the crtc
* mode setting driver. They can be used mostly independently from the crtc
* helper functions used by many drivers to implement the kernel mode setting
* interfaces.
*
* Initialization is done as a three-step process with drm_fb_helper_init(),
* drm_fb_helper_single_add_all_connectors() and drm_fb_helper_initial_config().
* Drivers with fancier requirements than the default beheviour can override the
* Drivers with fancier requirements than the default behaviour can override the
* second step with their own code. Teardown is done with drm_fb_helper_fini().
*
* At runtime drivers should restore the fbdev console by calling
@ -59,7 +59,7 @@ static LIST_HEAD(kernel_fb_helper_list);
* should also notify the fb helper code from updates to the output
* configuration by calling drm_fb_helper_hotplug_event(). For easier
* integration with the output polling code in drm_crtc_helper.c the modeset
* code proves a ->output_poll_changed callback.
* code provides a ->output_poll_changed callback.
*
* All other functions exported by the fb helper library can be used to
* implement the fbdev driver interface by the driver.
@ -120,7 +120,7 @@ static int drm_fb_helper_parse_command_line(struct drm_fb_helper *fb_helper)
mode = &fb_helper_conn->cmdline_mode;
/* do something on return - turn off connector maybe */
if (fb_get_options(drm_get_connector_name(connector), &option))
if (fb_get_options(connector->name, &option))
continue;
if (drm_mode_parse_command_line_for_connector(option,
@ -142,12 +142,12 @@ static int drm_fb_helper_parse_command_line(struct drm_fb_helper *fb_helper)
}
DRM_INFO("forcing %s connector %s\n",
drm_get_connector_name(connector), s);
connector->name, s);
connector->force = mode->force;
}
DRM_DEBUG_KMS("cmdline mode for connector %s %dx%d@%dHz%s%s%s\n",
drm_get_connector_name(connector),
connector->name,
mode->xres, mode->yres,
mode->refresh_specified ? mode->refresh : 60,
mode->rb ? " reduced blanking" : "",
@ -273,15 +273,7 @@ int drm_fb_helper_debug_leave(struct fb_info *info)
}
EXPORT_SYMBOL(drm_fb_helper_debug_leave);
/**
* drm_fb_helper_restore_fbdev_mode - restore fbdev configuration
* @fb_helper: fbcon to restore
*
* This should be called from driver's drm ->lastclose callback
* when implementing an fbcon on top of kms using this helper. This ensures that
* the user isn't greeted with a black screen when e.g. X dies.
*/
bool drm_fb_helper_restore_fbdev_mode(struct drm_fb_helper *fb_helper)
static bool restore_fbdev_mode(struct drm_fb_helper *fb_helper)
{
struct drm_device *dev = fb_helper->dev;
struct drm_plane *plane;
@ -311,7 +303,40 @@ bool drm_fb_helper_restore_fbdev_mode(struct drm_fb_helper *fb_helper)
}
return error;
}
EXPORT_SYMBOL(drm_fb_helper_restore_fbdev_mode);
/**
* drm_fb_helper_restore_fbdev_mode - restore fbdev configuration
* @fb_helper: fbcon to restore
*
* This should be called from driver's drm ->lastclose callback
* when implementing an fbcon on top of kms using this helper. This ensures that
* the user isn't greeted with a black screen when e.g. X dies.
*
* Use this variant if you need to bypass locking (panic), or already
* hold all modeset locks. Otherwise use drm_fb_helper_restore_fbdev_mode_unlocked()
*/
static bool drm_fb_helper_restore_fbdev_mode(struct drm_fb_helper *fb_helper)
{
return restore_fbdev_mode(fb_helper);
}
/**
* drm_fb_helper_restore_fbdev_mode_unlocked - restore fbdev configuration
* @fb_helper: fbcon to restore
*
* This should be called from driver's drm ->lastclose callback
* when implementing an fbcon on top of kms using this helper. This ensures that
* the user isn't greeted with a black screen when e.g. X dies.
*/
bool drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper)
{
struct drm_device *dev = fb_helper->dev;
bool ret;
drm_modeset_lock_all(dev);
ret = restore_fbdev_mode(fb_helper);
drm_modeset_unlock_all(dev);
return ret;
}
EXPORT_SYMBOL(drm_fb_helper_restore_fbdev_mode_unlocked);
/*
* restore fbcon display for all kms driver's using this helper, used for sysrq
@ -326,12 +351,25 @@ static bool drm_fb_helper_force_kernel_mode(void)
return false;
list_for_each_entry(helper, &kernel_fb_helper_list, kernel_fb_list) {
if (helper->dev->switch_power_state == DRM_SWITCH_POWER_OFF)
struct drm_device *dev = helper->dev;
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
continue;
/* NOTE: we use lockless flag below to avoid grabbing other
* modeset locks. So just trylock the underlying mutex
* directly:
*/
if (!mutex_trylock(&dev->mode_config.mutex)) {
error = true;
continue;
}
ret = drm_fb_helper_restore_fbdev_mode(helper);
if (ret)
error = true;
mutex_unlock(&dev->mode_config.mutex);
}
return error;
}
@ -811,7 +849,6 @@ EXPORT_SYMBOL(drm_fb_helper_check_var);
int drm_fb_helper_set_par(struct fb_info *info)
{
struct drm_fb_helper *fb_helper = info->par;
struct drm_device *dev = fb_helper->dev;
struct fb_var_screeninfo *var = &info->var;
if (var->pixclock != 0) {
@ -819,9 +856,7 @@ int drm_fb_helper_set_par(struct fb_info *info)
return -EINVAL;
}
drm_modeset_lock_all(dev);
drm_fb_helper_restore_fbdev_mode(fb_helper);
drm_modeset_unlock_all(dev);
drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper);
if (fb_helper->delayed_hotplug) {
fb_helper->delayed_hotplug = false;

View File

@ -43,8 +43,7 @@
DEFINE_MUTEX(drm_global_mutex);
EXPORT_SYMBOL(drm_global_mutex);
static int drm_open_helper(struct inode *inode, struct file *filp,
struct drm_minor *minor);
static int drm_open_helper(struct file *filp, struct drm_minor *minor);
static int drm_setup(struct drm_device * dev)
{
@ -95,7 +94,7 @@ int drm_open(struct inode *inode, struct file *filp)
/* share address_space across all char-devs of a single device */
filp->f_mapping = dev->anon_inode->i_mapping;
retcode = drm_open_helper(inode, filp, minor);
retcode = drm_open_helper(filp, minor);
if (retcode)
goto err_undo;
if (need_setup) {
@ -171,7 +170,6 @@ static int drm_cpu_valid(void)
/**
* Called whenever a process opens /dev/drm.
*
* \param inode device inode.
* \param filp file pointer.
* \param minor acquired minor-object.
* \return zero on success or a negative number on failure.
@ -179,8 +177,7 @@ static int drm_cpu_valid(void)
* Creates and initializes a drm_file structure for the file private data in \p
* filp and add it into the double linked list in \p dev.
*/
static int drm_open_helper(struct inode *inode, struct file *filp,
struct drm_minor *minor)
static int drm_open_helper(struct file *filp, struct drm_minor *minor)
{
struct drm_device *dev = minor->dev;
struct drm_file *priv;

View File

@ -474,21 +474,10 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask)
goto fail;
pages[i] = p;
/* There is a hypothetical issue w/ drivers that require
* buffer memory in the low 4GB.. if the pages are un-
* pinned, and swapped out, they can end up swapped back
* in above 4GB. If pages are already in memory, then
* shmem_read_mapping_page_gfp will ignore the gfpmask,
* even if the already in-memory page disobeys the mask.
*
* It is only a theoretical issue today, because none of
* the devices with this limitation can be populated with
* enough memory to trigger the issue. But this BUG_ON()
* is here as a reminder in case the problem with
* shmem_read_mapping_page_gfp() isn't solved by the time
* it does become a real issue.
*
* See this thread: http://lkml.org/lkml/2011/7/11/238
/* Make sure shmem keeps __GFP_DMA32 allocated pages in the
* correct region during swapin. Note that this requires
* __GFP_DMA32 to be set in mapping_gfp_mask(inode->i_mapping)
* so shmem can relocate pages during swapin if required.
*/
BUG_ON((gfpmask & __GFP_DMA32) &&
(page_to_pfn(p) >= 0x00100000UL));

View File

@ -47,18 +47,16 @@ int drm_name_info(struct seq_file *m, void *data)
struct drm_minor *minor = node->minor;
struct drm_device *dev = minor->dev;
struct drm_master *master = minor->master;
const char *bus_name;
if (!master)
return 0;
bus_name = dev->driver->bus->get_name(dev);
if (master->unique) {
seq_printf(m, "%s %s %s\n",
bus_name,
dev->driver->name,
dev_name(dev->dev), master->unique);
} else {
seq_printf(m, "%s %s\n",
bus_name, dev_name(dev->dev));
dev->driver->name, dev_name(dev->dev));
}
return 0;
}

View File

@ -72,9 +72,6 @@ static void
drm_unset_busid(struct drm_device *dev,
struct drm_master *master)
{
kfree(dev->devname);
dev->devname = NULL;
kfree(master->unique);
master->unique = NULL;
master->unique_len = 0;
@ -93,7 +90,8 @@ drm_unset_busid(struct drm_device *dev,
* Copies the bus id from userspace into drm_device::unique, and verifies that
* it matches the device this DRM is attached to (EINVAL otherwise). Deprecated
* in interface version 1.1 and will return EBUSY when setversion has requested
* version 1.1 or greater.
* version 1.1 or greater. Also note that KMS is all version 1.1 and later and
* UMS was only ever supported on pci devices.
*/
int drm_setunique(struct drm_device *dev, void *data,
struct drm_file *file_priv)
@ -108,10 +106,13 @@ int drm_setunique(struct drm_device *dev, void *data,
if (!u->unique_len || u->unique_len > 1024)
return -EINVAL;
if (!dev->driver->bus->set_unique)
if (drm_core_check_feature(dev, DRIVER_MODESET))
return 0;
if (WARN_ON(!dev->pdev))
return -EINVAL;
ret = dev->driver->bus->set_unique(dev, master, u);
ret = drm_pci_set_unique(dev, master, u);
if (ret)
goto err;
@ -130,13 +131,25 @@ static int drm_set_busid(struct drm_device *dev, struct drm_file *file_priv)
if (master->unique != NULL)
drm_unset_busid(dev, master);
ret = dev->driver->bus->set_busid(dev, master);
if (ret)
goto err;
if (dev->driver->bus && dev->driver->bus->set_busid) {
ret = dev->driver->bus->set_busid(dev, master);
if (ret) {
drm_unset_busid(dev, master);
return ret;
}
} else {
if (WARN(dev->unique == NULL,
"No drm_bus.set_busid() implementation provided by "
"%ps. Use drm_dev_set_unique() to set the unique "
"name explicitly.", dev->driver))
return -EINVAL;
master->unique = kstrdup(dev->unique, GFP_KERNEL);
if (master->unique)
master->unique_len = strlen(dev->unique);
}
return 0;
err:
drm_unset_busid(dev, master);
return ret;
}
/**

View File

@ -1,6 +1,5 @@
/**
* \file drm_irq.c
* IRQ support
/*
* drm_irq.c IRQ and vblank support
*
* \author Rickard E. (Rik) Faith <faith@valinux.com>
* \author Gareth Hughes <gareth@valinux.com>
@ -56,33 +55,6 @@
*/
#define DRM_REDUNDANT_VBLIRQ_THRESH_NS 1000000
/**
* Get interrupt from bus id.
*
* \param inode device inode.
* \param file_priv DRM file private.
* \param cmd command.
* \param arg user argument, pointing to a drm_irq_busid structure.
* \return zero on success or a negative number on failure.
*
* Finds the PCI device with the specified bus id and gets its IRQ number.
* This IOCTL is deprecated, and will now return EINVAL for any busid not equal
* to that of the device that this DRM instance attached to.
*/
int drm_irq_by_busid(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_irq_busid *p = data;
if (!dev->driver->bus->irq_by_busid)
return -EINVAL;
if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
return -EINVAL;
return dev->driver->bus->irq_by_busid(dev, p);
}
/*
* Clear vblank timestamp buffer for a crtc.
*/
@ -167,33 +139,40 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
static void vblank_disable_fn(unsigned long arg)
{
struct drm_device *dev = (struct drm_device *)arg;
struct drm_vblank_crtc *vblank = (void *)arg;
struct drm_device *dev = vblank->dev;
unsigned long irqflags;
int i;
int crtc = vblank->crtc;
if (!dev->vblank_disable_allowed)
return;
for (i = 0; i < dev->num_crtcs; i++) {
spin_lock_irqsave(&dev->vbl_lock, irqflags);
if (atomic_read(&dev->vblank[i].refcount) == 0 &&
dev->vblank[i].enabled) {
DRM_DEBUG("disabling vblank on crtc %d\n", i);
vblank_disable_and_save(dev, i);
}
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
spin_lock_irqsave(&dev->vbl_lock, irqflags);
if (atomic_read(&vblank->refcount) == 0 && vblank->enabled) {
DRM_DEBUG("disabling vblank on crtc %d\n", crtc);
vblank_disable_and_save(dev, crtc);
}
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
}
/**
* drm_vblank_cleanup - cleanup vblank support
* @dev: DRM device
*
* This function cleans up any resources allocated in drm_vblank_init.
*/
void drm_vblank_cleanup(struct drm_device *dev)
{
int crtc;
/* Bail if the driver didn't call drm_vblank_init() */
if (dev->num_crtcs == 0)
return;
del_timer_sync(&dev->vblank_disable_timer);
vblank_disable_fn((unsigned long)dev);
for (crtc = 0; crtc < dev->num_crtcs; crtc++) {
del_timer_sync(&dev->vblank[crtc].disable_timer);
vblank_disable_fn((unsigned long)&dev->vblank[crtc]);
}
kfree(dev->vblank);
@ -201,12 +180,20 @@ void drm_vblank_cleanup(struct drm_device *dev)
}
EXPORT_SYMBOL(drm_vblank_cleanup);
/**
* drm_vblank_init - initialize vblank support
* @dev: drm_device
* @num_crtcs: number of crtcs supported by @dev
*
* This function initializes vblank support for @num_crtcs display pipelines.
*
* Returns:
* Zero on success or a negative error code on failure.
*/
int drm_vblank_init(struct drm_device *dev, int num_crtcs)
{
int i, ret = -ENOMEM;
setup_timer(&dev->vblank_disable_timer, vblank_disable_fn,
(unsigned long)dev);
spin_lock_init(&dev->vbl_lock);
spin_lock_init(&dev->vblank_time_lock);
@ -216,8 +203,13 @@ int drm_vblank_init(struct drm_device *dev, int num_crtcs)
if (!dev->vblank)
goto err;
for (i = 0; i < num_crtcs; i++)
for (i = 0; i < num_crtcs; i++) {
dev->vblank[i].dev = dev;
dev->vblank[i].crtc = i;
init_waitqueue_head(&dev->vblank[i].queue);
setup_timer(&dev->vblank[i].disable_timer, vblank_disable_fn,
(unsigned long)&dev->vblank[i]);
}
DRM_INFO("Supports vblank timestamp caching Rev 2 (21.10.2013).\n");
@ -261,42 +253,42 @@ static void drm_irq_vgaarb_nokms(void *cookie, bool state)
}
/**
* Install IRQ handler.
*
* \param dev DRM device.
* drm_irq_install - install IRQ handler
* @dev: DRM device
* @irq: IRQ number to install the handler for
*
* Initializes the IRQ related data. Installs the handler, calling the driver
* \c irq_preinstall() and \c irq_postinstall() functions
* before and after the installation.
* irq_preinstall() and irq_postinstall() functions before and after the
* installation.
*
* This is the simplified helper interface provided for drivers with no special
* needs. Drivers which need to install interrupt handlers for multiple
* interrupts must instead set drm_device->irq_enabled to signal the DRM core
* that vblank interrupts are available.
*
* Returns:
* Zero on success or a negative error code on failure.
*/
int drm_irq_install(struct drm_device *dev)
int drm_irq_install(struct drm_device *dev, int irq)
{
int ret;
unsigned long sh_flags = 0;
char *irqname;
if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
return -EINVAL;
if (drm_dev_to_irq(dev) == 0)
if (irq == 0)
return -EINVAL;
mutex_lock(&dev->struct_mutex);
/* Driver must have been initialized */
if (!dev->dev_private) {
mutex_unlock(&dev->struct_mutex);
if (!dev->dev_private)
return -EINVAL;
}
if (dev->irq_enabled) {
mutex_unlock(&dev->struct_mutex);
if (dev->irq_enabled)
return -EBUSY;
}
dev->irq_enabled = true;
mutex_unlock(&dev->struct_mutex);
DRM_DEBUG("irq=%d\n", drm_dev_to_irq(dev));
DRM_DEBUG("irq=%d\n", irq);
/* Before installing handler */
if (dev->driver->irq_preinstall)
@ -306,18 +298,11 @@ int drm_irq_install(struct drm_device *dev)
if (drm_core_check_feature(dev, DRIVER_IRQ_SHARED))
sh_flags = IRQF_SHARED;
if (dev->devname)
irqname = dev->devname;
else
irqname = dev->driver->name;
ret = request_irq(drm_dev_to_irq(dev), dev->driver->irq_handler,
sh_flags, irqname, dev);
ret = request_irq(irq, dev->driver->irq_handler,
sh_flags, dev->driver->name, dev);
if (ret < 0) {
mutex_lock(&dev->struct_mutex);
dev->irq_enabled = false;
mutex_unlock(&dev->struct_mutex);
return ret;
}
@ -329,12 +314,12 @@ int drm_irq_install(struct drm_device *dev)
ret = dev->driver->irq_postinstall(dev);
if (ret < 0) {
mutex_lock(&dev->struct_mutex);
dev->irq_enabled = false;
mutex_unlock(&dev->struct_mutex);
if (!drm_core_check_feature(dev, DRIVER_MODESET))
vga_client_register(dev->pdev, NULL, NULL, NULL);
free_irq(drm_dev_to_irq(dev), dev);
free_irq(irq, dev);
} else {
dev->irq = irq;
}
return ret;
@ -342,11 +327,20 @@ int drm_irq_install(struct drm_device *dev)
EXPORT_SYMBOL(drm_irq_install);
/**
* Uninstall the IRQ handler.
* drm_irq_uninstall - uninstall the IRQ handler
* @dev: DRM device
*
* \param dev DRM device.
* Calls the driver's irq_uninstall() function and unregisters the IRQ handler.
* This should only be called by drivers which used drm_irq_install() to set up
* their interrupt handler. Other drivers must only reset
* drm_device->irq_enabled to false.
*
* Calls the driver's \c irq_uninstall() function, and stops the irq.
* Note that for kernel modesetting drivers it is a bug if this function fails.
* The sanity checks are only to catch buggy user modesetting drivers which call
* the same function through an ioctl.
*
* Returns:
* Zero on success or a negative error code on failure.
*/
int drm_irq_uninstall(struct drm_device *dev)
{
@ -357,10 +351,8 @@ int drm_irq_uninstall(struct drm_device *dev)
if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
return -EINVAL;
mutex_lock(&dev->struct_mutex);
irq_enabled = dev->irq_enabled;
dev->irq_enabled = false;
mutex_unlock(&dev->struct_mutex);
/*
* Wake up any waiters so they don't hang.
@ -379,7 +371,7 @@ int drm_irq_uninstall(struct drm_device *dev)
if (!irq_enabled)
return -EINVAL;
DRM_DEBUG("irq=%d\n", drm_dev_to_irq(dev));
DRM_DEBUG("irq=%d\n", dev->irq);
if (!drm_core_check_feature(dev, DRIVER_MODESET))
vga_client_register(dev->pdev, NULL, NULL, NULL);
@ -387,13 +379,13 @@ int drm_irq_uninstall(struct drm_device *dev)
if (dev->driver->irq_uninstall)
dev->driver->irq_uninstall(dev);
free_irq(drm_dev_to_irq(dev), dev);
free_irq(dev->irq, dev);
return 0;
}
EXPORT_SYMBOL(drm_irq_uninstall);
/**
/*
* IRQ control ioctl.
*
* \param inode device inode.
@ -408,43 +400,52 @@ int drm_control(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_control *ctl = data;
int ret = 0, irq;
/* if we haven't irq we fallback for compatibility reasons -
* this used to be a separate function in drm_dma.h
*/
if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
return 0;
if (drm_core_check_feature(dev, DRIVER_MODESET))
return 0;
/* UMS was only ever support on pci devices. */
if (WARN_ON(!dev->pdev))
return -EINVAL;
switch (ctl->func) {
case DRM_INST_HANDLER:
if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
return 0;
if (drm_core_check_feature(dev, DRIVER_MODESET))
return 0;
irq = dev->pdev->irq;
if (dev->if_version < DRM_IF_VERSION(1, 2) &&
ctl->irq != drm_dev_to_irq(dev))
ctl->irq != irq)
return -EINVAL;
return drm_irq_install(dev);
mutex_lock(&dev->struct_mutex);
ret = drm_irq_install(dev, irq);
mutex_unlock(&dev->struct_mutex);
return ret;
case DRM_UNINST_HANDLER:
if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
return 0;
if (drm_core_check_feature(dev, DRIVER_MODESET))
return 0;
return drm_irq_uninstall(dev);
mutex_lock(&dev->struct_mutex);
ret = drm_irq_uninstall(dev);
mutex_unlock(&dev->struct_mutex);
return ret;
default:
return -EINVAL;
}
}
/**
* drm_calc_timestamping_constants - Calculate vblank timestamp constants
*
* @crtc drm_crtc whose timestamp constants should be updated.
* @mode display mode containing the scanout timings
* drm_calc_timestamping_constants - calculate vblank timestamp constants
* @crtc: drm_crtc whose timestamp constants should be updated.
* @mode: display mode containing the scanout timings
*
* Calculate and store various constants which are later
* needed by vblank and swap-completion timestamping, e.g,
* by drm_calc_vbltimestamp_from_scanoutpos(). They are
* derived from crtc's true scanout timing, so they take
* derived from CRTC's true scanout timing, so they take
* things like panel scaling or other adjustments into account.
*/
void drm_calc_timestamping_constants(struct drm_crtc *crtc,
@ -489,11 +490,22 @@ void drm_calc_timestamping_constants(struct drm_crtc *crtc,
EXPORT_SYMBOL(drm_calc_timestamping_constants);
/**
* drm_calc_vbltimestamp_from_scanoutpos - helper routine for kms
* drivers. Implements calculation of exact vblank timestamps from
* given drm_display_mode timings and current video scanout position
* of a crtc. This can be called from within get_vblank_timestamp()
* implementation of a kms driver to implement the actual timestamping.
* drm_calc_vbltimestamp_from_scanoutpos - precise vblank timestamp helper
* @dev: DRM device
* @crtc: Which CRTC's vblank timestamp to retrieve
* @max_error: Desired maximum allowable error in timestamps (nanosecs)
* On return contains true maximum error of timestamp
* @vblank_time: Pointer to struct timeval which should receive the timestamp
* @flags: Flags to pass to driver:
* 0 = Default,
* DRM_CALLED_FROM_VBLIRQ = If function is called from vbl IRQ handler
* @refcrtc: CRTC which defines scanout timing
* @mode: mode which defines the scanout timings
*
* Implements calculation of exact vblank timestamps from given drm_display_mode
* timings and current video scanout position of a CRTC. This can be called from
* within get_vblank_timestamp() implementation of a kms driver to implement the
* actual timestamping.
*
* Should return timestamps conforming to the OML_sync_control OpenML
* extension specification. The timestamp corresponds to the end of
@ -508,21 +520,11 @@ EXPORT_SYMBOL(drm_calc_timestamping_constants);
* returns as no operation if a doublescan or interlaced video mode is
* active. Higher level code is expected to handle this.
*
* @dev: DRM device.
* @crtc: Which crtc's vblank timestamp to retrieve.
* @max_error: Desired maximum allowable error in timestamps (nanosecs).
* On return contains true maximum error of timestamp.
* @vblank_time: Pointer to struct timeval which should receive the timestamp.
* @flags: Flags to pass to driver:
* 0 = Default.
* DRM_CALLED_FROM_VBLIRQ = If function is called from vbl irq handler.
* @refcrtc: drm_crtc* of crtc which defines scanout timing.
* @mode: mode which defines the scanout timings
*
* Returns negative value on error, failure or if not supported in current
* Returns:
* Negative value on error, failure or if not supported in current
* video mode:
*
* -EINVAL - Invalid crtc.
* -EINVAL - Invalid CRTC.
* -EAGAIN - Temporary unavailable, e.g., called before initial modeset.
* -ENOTSUPP - Function not supported in current display mode.
* -EIO - Failed, e.g., due to failed scanout position query.
@ -671,23 +673,23 @@ static struct timeval get_drm_timestamp(void)
/**
* drm_get_last_vbltimestamp - retrieve raw timestamp for the most recent
* vblank interval.
*
* vblank interval
* @dev: DRM device
* @crtc: which crtc's vblank timestamp to retrieve
* @crtc: which CRTC's vblank timestamp to retrieve
* @tvblank: Pointer to target struct timeval which should receive the timestamp
* @flags: Flags to pass to driver:
* 0 = Default.
* DRM_CALLED_FROM_VBLIRQ = If function is called from vbl irq handler.
* 0 = Default,
* DRM_CALLED_FROM_VBLIRQ = If function is called from vbl IRQ handler
*
* Fetches the system timestamp corresponding to the time of the most recent
* vblank interval on specified crtc. May call into kms-driver to
* vblank interval on specified CRTC. May call into kms-driver to
* compute the timestamp with a high-precision GPU specific method.
*
* Returns zero if timestamp originates from uncorrected do_gettimeofday()
* call, i.e., it isn't very precisely locked to the true vblank.
*
* Returns non-zero if timestamp is considered to be very precise.
* Returns:
* Non-zero if timestamp is considered to be very precise, zero otherwise.
*/
u32 drm_get_last_vbltimestamp(struct drm_device *dev, int crtc,
struct timeval *tvblank, unsigned flags)
@ -722,6 +724,9 @@ EXPORT_SYMBOL(drm_get_last_vbltimestamp);
* Fetches the "cooked" vblank count value that represents the number of
* vblank events since the system was booted, including lost events due to
* modesetting activity.
*
* Returns:
* The software vblank counter.
*/
u32 drm_vblank_count(struct drm_device *dev, int crtc)
{
@ -740,8 +745,7 @@ EXPORT_SYMBOL(drm_vblank_count);
* Fetches the "cooked" vblank count value that represents the number of
* vblank events since the system was booted, including lost events due to
* modesetting activity. Returns corresponding system timestamp of the time
* of the vblank interval that corresponds to the current value vblank counter
* value.
* of the vblank interval that corresponds to the current vblank counter value.
*/
u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc,
struct timeval *vblanktime)
@ -869,6 +873,42 @@ static void drm_update_vblank_count(struct drm_device *dev, int crtc)
smp_mb__after_atomic();
}
/**
* drm_vblank_enable - enable the vblank interrupt on a CRTC
* @dev: DRM device
* @crtc: CRTC in question
*/
static int drm_vblank_enable(struct drm_device *dev, int crtc)
{
int ret = 0;
assert_spin_locked(&dev->vbl_lock);
spin_lock(&dev->vblank_time_lock);
if (!dev->vblank[crtc].enabled) {
/*
* Enable vblank irqs under vblank_time_lock protection.
* All vblank count & timestamp updates are held off
* until we are done reinitializing master counter and
* timestamps. Filtercode in drm_handle_vblank() will
* prevent double-accounting of same vblank interval.
*/
ret = dev->driver->enable_vblank(dev, crtc);
DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n", crtc, ret);
if (ret)
atomic_dec(&dev->vblank[crtc].refcount);
else {
dev->vblank[crtc].enabled = true;
drm_update_vblank_count(dev, crtc);
}
}
spin_unlock(&dev->vblank_time_lock);
return ret;
}
/**
* drm_vblank_get - get a reference count on vblank events
* @dev: DRM device
@ -877,36 +917,20 @@ static void drm_update_vblank_count(struct drm_device *dev, int crtc)
* Acquire a reference count on vblank events to avoid having them disabled
* while in use.
*
* RETURNS
* This is the legacy version of drm_crtc_vblank_get().
*
* Returns:
* Zero on success, nonzero on failure.
*/
int drm_vblank_get(struct drm_device *dev, int crtc)
{
unsigned long irqflags, irqflags2;
unsigned long irqflags;
int ret = 0;
spin_lock_irqsave(&dev->vbl_lock, irqflags);
/* Going from 0->1 means we have to enable interrupts again */
if (atomic_add_return(1, &dev->vblank[crtc].refcount) == 1) {
spin_lock_irqsave(&dev->vblank_time_lock, irqflags2);
if (!dev->vblank[crtc].enabled) {
/* Enable vblank irqs under vblank_time_lock protection.
* All vblank count & timestamp updates are held off
* until we are done reinitializing master counter and
* timestamps. Filtercode in drm_handle_vblank() will
* prevent double-accounting of same vblank interval.
*/
ret = dev->driver->enable_vblank(dev, crtc);
DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n",
crtc, ret);
if (ret)
atomic_dec(&dev->vblank[crtc].refcount);
else {
dev->vblank[crtc].enabled = true;
drm_update_vblank_count(dev, crtc);
}
}
spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags2);
ret = drm_vblank_enable(dev, crtc);
} else {
if (!dev->vblank[crtc].enabled) {
atomic_dec(&dev->vblank[crtc].refcount);
@ -919,6 +943,24 @@ int drm_vblank_get(struct drm_device *dev, int crtc)
}
EXPORT_SYMBOL(drm_vblank_get);
/**
* drm_crtc_vblank_get - get a reference count on vblank events
* @crtc: which CRTC to own
*
* Acquire a reference count on vblank events to avoid having them disabled
* while in use.
*
* This is the native kms version of drm_vblank_off().
*
* Returns:
* Zero on success, nonzero on failure.
*/
int drm_crtc_vblank_get(struct drm_crtc *crtc)
{
return drm_vblank_get(crtc->dev, drm_crtc_index(crtc));
}
EXPORT_SYMBOL(drm_crtc_vblank_get);
/**
* drm_vblank_put - give up ownership of vblank events
* @dev: DRM device
@ -926,6 +968,8 @@ EXPORT_SYMBOL(drm_vblank_get);
*
* Release ownership of a given vblank counter, turning off interrupts
* if possible. Disable interrupts after drm_vblank_offdelay milliseconds.
*
* This is the legacy version of drm_crtc_vblank_put().
*/
void drm_vblank_put(struct drm_device *dev, int crtc)
{
@ -934,17 +978,39 @@ void drm_vblank_put(struct drm_device *dev, int crtc)
/* Last user schedules interrupt disable */
if (atomic_dec_and_test(&dev->vblank[crtc].refcount) &&
(drm_vblank_offdelay > 0))
mod_timer(&dev->vblank_disable_timer,
mod_timer(&dev->vblank[crtc].disable_timer,
jiffies + ((drm_vblank_offdelay * HZ)/1000));
}
EXPORT_SYMBOL(drm_vblank_put);
/**
* drm_crtc_vblank_put - give up ownership of vblank events
* @crtc: which counter to give up
*
* Release ownership of a given vblank counter, turning off interrupts
* if possible. Disable interrupts after drm_vblank_offdelay milliseconds.
*
* This is the native kms version of drm_vblank_put().
*/
void drm_crtc_vblank_put(struct drm_crtc *crtc)
{
drm_vblank_put(crtc->dev, drm_crtc_index(crtc));
}
EXPORT_SYMBOL(drm_crtc_vblank_put);
/**
* drm_vblank_off - disable vblank events on a CRTC
* @dev: DRM device
* @crtc: CRTC in question
*
* Caller must hold event lock.
* Drivers can use this function to shut down the vblank interrupt handling when
* disabling a crtc. This function ensures that the latest vblank frame count is
* stored so that drm_vblank_on() can restore it again.
*
* Drivers must use this function when the hardware vblank counter can get
* reset, e.g. when suspending.
*
* This is the legacy version of drm_crtc_vblank_off().
*/
void drm_vblank_off(struct drm_device *dev, int crtc)
{
@ -977,6 +1043,66 @@ void drm_vblank_off(struct drm_device *dev, int crtc)
}
EXPORT_SYMBOL(drm_vblank_off);
/**
* drm_crtc_vblank_off - disable vblank events on a CRTC
* @crtc: CRTC in question
*
* Drivers can use this function to shut down the vblank interrupt handling when
* disabling a crtc. This function ensures that the latest vblank frame count is
* stored so that drm_vblank_on can restore it again.
*
* Drivers must use this function when the hardware vblank counter can get
* reset, e.g. when suspending.
*
* This is the native kms version of drm_vblank_off().
*/
void drm_crtc_vblank_off(struct drm_crtc *crtc)
{
drm_vblank_off(crtc->dev, drm_crtc_index(crtc));
}
EXPORT_SYMBOL(drm_crtc_vblank_off);
/**
* drm_vblank_on - enable vblank events on a CRTC
* @dev: DRM device
* @crtc: CRTC in question
*
* This functions restores the vblank interrupt state captured with
* drm_vblank_off() again. Note that calls to drm_vblank_on() and
* drm_vblank_off() can be unbalanced and so can also be unconditionaly called
* in driver load code to reflect the current hardware state of the crtc.
*
* This is the legacy version of drm_crtc_vblank_on().
*/
void drm_vblank_on(struct drm_device *dev, int crtc)
{
unsigned long irqflags;
spin_lock_irqsave(&dev->vbl_lock, irqflags);
/* re-enable interrupts if there's are users left */
if (atomic_read(&dev->vblank[crtc].refcount) != 0)
WARN_ON(drm_vblank_enable(dev, crtc));
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
}
EXPORT_SYMBOL(drm_vblank_on);
/**
* drm_crtc_vblank_on - enable vblank events on a CRTC
* @crtc: CRTC in question
*
* This functions restores the vblank interrupt state captured with
* drm_vblank_off() again. Note that calls to drm_vblank_on() and
* drm_vblank_off() can be unbalanced and so can also be unconditionaly called
* in driver load code to reflect the current hardware state of the crtc.
*
* This is the native kms version of drm_vblank_on().
*/
void drm_crtc_vblank_on(struct drm_crtc *crtc)
{
drm_vblank_on(crtc->dev, drm_crtc_index(crtc));
}
EXPORT_SYMBOL(drm_crtc_vblank_on);
/**
* drm_vblank_pre_modeset - account for vblanks across mode sets
* @dev: DRM device
@ -984,6 +1110,21 @@ EXPORT_SYMBOL(drm_vblank_off);
*
* Account for vblank events across mode setting events, which will likely
* reset the hardware frame counter.
*
* This is done by grabbing a temporary vblank reference to ensure that the
* vblank interrupt keeps running across the modeset sequence. With this the
* software-side vblank frame counting will ensure that there are no jumps or
* discontinuities.
*
* Unfortunately this approach is racy and also doesn't work when the vblank
* interrupt stops running, e.g. across system suspend resume. It is therefore
* highly recommended that drivers use the newer drm_vblank_off() and
* drm_vblank_on() instead. drm_vblank_pre_modeset() only works correctly when
* using "cooked" software vblank frame counters and not relying on any hardware
* counters.
*
* Drivers must call drm_vblank_post_modeset() when re-enabling the same crtc
* again.
*/
void drm_vblank_pre_modeset(struct drm_device *dev, int crtc)
{
@ -1005,6 +1146,14 @@ void drm_vblank_pre_modeset(struct drm_device *dev, int crtc)
}
EXPORT_SYMBOL(drm_vblank_pre_modeset);
/**
* drm_vblank_post_modeset - undo drm_vblank_pre_modeset changes
* @dev: DRM device
* @crtc: CRTC in question
*
* This function again drops the temporary vblank reference acquired in
* drm_vblank_pre_modeset.
*/
void drm_vblank_post_modeset(struct drm_device *dev, int crtc)
{
unsigned long irqflags;
@ -1026,7 +1175,7 @@ void drm_vblank_post_modeset(struct drm_device *dev, int crtc)
}
EXPORT_SYMBOL(drm_vblank_post_modeset);
/**
/*
* drm_modeset_ctl - handle vblank event counter changes across mode switch
* @DRM_IOCTL_ARGS: standard ioctl arguments
*
@ -1139,7 +1288,7 @@ err_put:
return ret;
}
/**
/*
* Wait for VBLANK.
*
* \param inode device inode.
@ -1150,7 +1299,7 @@ err_put:
*
* This function enables the vblank interrupt on the pipe requested, then
* sleeps waiting for the requested sequence number to occur, and drops
* the vblank interrupt refcount afterwards. (vblank irq disable follows that
* the vblank interrupt refcount afterwards. (vblank IRQ disable follows that
* after a timeout with no further vblank waits scheduled).
*/
int drm_wait_vblank(struct drm_device *dev, void *data,
@ -1160,9 +1309,8 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
int ret;
unsigned int flags, seq, crtc, high_crtc;
if (drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
if ((!drm_dev_to_irq(dev)) || (!dev->irq_enabled))
return -EINVAL;
if (!dev->irq_enabled)
return -EINVAL;
if (vblwait->request.type & _DRM_VBLANK_SIGNAL)
return -EINVAL;
@ -1222,6 +1370,7 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
DRM_WAIT_ON(ret, dev->vblank[crtc].queue, 3 * HZ,
(((drm_vblank_count(dev, crtc) -
vblwait->request.sequence) <= (1 << 23)) ||
!dev->vblank[crtc].enabled ||
!dev->irq_enabled));
if (ret != -EINTR) {

View File

@ -282,6 +282,14 @@ static int mipi_dsi_drv_remove(struct device *dev)
return drv->remove(dsi);
}
static void mipi_dsi_drv_shutdown(struct device *dev)
{
struct mipi_dsi_driver *drv = to_mipi_dsi_driver(dev->driver);
struct mipi_dsi_device *dsi = to_mipi_dsi_device(dev);
drv->shutdown(dsi);
}
/**
* mipi_dsi_driver_register - register a driver for DSI devices
* @drv: DSI driver structure
@ -293,6 +301,8 @@ int mipi_dsi_driver_register(struct mipi_dsi_driver *drv)
drv->driver.probe = mipi_dsi_drv_probe;
if (drv->remove)
drv->driver.remove = mipi_dsi_drv_remove;
if (drv->shutdown)
drv->driver.shutdown = mipi_dsi_drv_shutdown;
return driver_register(&drv->driver);
}

View File

@ -1013,6 +1013,7 @@ EXPORT_SYMBOL(drm_mode_sort);
/**
* drm_mode_connector_list_update - update the mode list for the connector
* @connector: the connector to update
* @merge_type_bits: whether to merge or overright type bits.
*
* This moves the modes from the @connector probed_modes list
* to the actual mode list. It compares the probed mode against the current
@ -1021,7 +1022,8 @@ EXPORT_SYMBOL(drm_mode_sort);
* This is just a helper functions doesn't validate any modes itself and also
* doesn't prune any invalid modes. Callers need to do that themselves.
*/
void drm_mode_connector_list_update(struct drm_connector *connector)
void drm_mode_connector_list_update(struct drm_connector *connector,
bool merge_type_bits)
{
struct drm_display_mode *mode;
struct drm_display_mode *pmode, *pt;
@ -1039,7 +1041,10 @@ void drm_mode_connector_list_update(struct drm_connector *connector)
/* if equal delete the probed mode */
mode->status = pmode->status;
/* Merge type bits together */
mode->type |= pmode->type;
if (merge_type_bits)
mode->type |= pmode->type;
else
mode->type = pmode->type;
list_del(&pmode->head);
drm_mode_destroy(connector->dev, pmode);
break;

View File

@ -0,0 +1,247 @@
/*
* Copyright (C) 2014 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include <drm/drm_modeset_lock.h>
/**
* DOC: kms locking
*
* As KMS moves toward more fine grained locking, and atomic ioctl where
* userspace can indirectly control locking order, it becomes necessary
* to use ww_mutex and acquire-contexts to avoid deadlocks. But because
* the locking is more distributed around the driver code, we want a bit
* of extra utility/tracking out of our acquire-ctx. This is provided
* by drm_modeset_lock / drm_modeset_acquire_ctx.
*
* For basic principles of ww_mutex, see: Documentation/ww-mutex-design.txt
*
* The basic usage pattern is to:
*
* drm_modeset_acquire_init(&ctx)
* retry:
* foreach (lock in random_ordered_set_of_locks) {
* ret = drm_modeset_lock(lock, &ctx)
* if (ret == -EDEADLK) {
* drm_modeset_backoff(&ctx);
* goto retry;
* }
* }
*
* ... do stuff ...
*
* drm_modeset_drop_locks(&ctx);
* drm_modeset_acquire_fini(&ctx);
*/
/**
* drm_modeset_acquire_init - initialize acquire context
* @ctx: the acquire context
* @flags: for future
*/
void drm_modeset_acquire_init(struct drm_modeset_acquire_ctx *ctx,
uint32_t flags)
{
ww_acquire_init(&ctx->ww_ctx, &crtc_ww_class);
INIT_LIST_HEAD(&ctx->locked);
}
EXPORT_SYMBOL(drm_modeset_acquire_init);
/**
* drm_modeset_acquire_fini - cleanup acquire context
* @ctx: the acquire context
*/
void drm_modeset_acquire_fini(struct drm_modeset_acquire_ctx *ctx)
{
ww_acquire_fini(&ctx->ww_ctx);
}
EXPORT_SYMBOL(drm_modeset_acquire_fini);
/**
* drm_modeset_drop_locks - drop all locks
* @ctx: the acquire context
*
* Drop all locks currently held against this acquire context.
*/
void drm_modeset_drop_locks(struct drm_modeset_acquire_ctx *ctx)
{
WARN_ON(ctx->contended);
while (!list_empty(&ctx->locked)) {
struct drm_modeset_lock *lock;
lock = list_first_entry(&ctx->locked,
struct drm_modeset_lock, head);
drm_modeset_unlock(lock);
}
}
EXPORT_SYMBOL(drm_modeset_drop_locks);
static inline int modeset_lock(struct drm_modeset_lock *lock,
struct drm_modeset_acquire_ctx *ctx,
bool interruptible, bool slow)
{
int ret;
WARN_ON(ctx->contended);
if (interruptible && slow) {
ret = ww_mutex_lock_slow_interruptible(&lock->mutex, &ctx->ww_ctx);
} else if (interruptible) {
ret = ww_mutex_lock_interruptible(&lock->mutex, &ctx->ww_ctx);
} else if (slow) {
ww_mutex_lock_slow(&lock->mutex, &ctx->ww_ctx);
ret = 0;
} else {
ret = ww_mutex_lock(&lock->mutex, &ctx->ww_ctx);
}
if (!ret) {
WARN_ON(!list_empty(&lock->head));
list_add(&lock->head, &ctx->locked);
} else if (ret == -EALREADY) {
/* we already hold the lock.. this is fine. For atomic
* we will need to be able to drm_modeset_lock() things
* without having to keep track of what is already locked
* or not.
*/
ret = 0;
} else if (ret == -EDEADLK) {
ctx->contended = lock;
}
return ret;
}
static int modeset_backoff(struct drm_modeset_acquire_ctx *ctx,
bool interruptible)
{
struct drm_modeset_lock *contended = ctx->contended;
ctx->contended = NULL;
if (WARN_ON(!contended))
return 0;
drm_modeset_drop_locks(ctx);
return modeset_lock(contended, ctx, interruptible, true);
}
/**
* drm_modeset_backoff - deadlock avoidance backoff
* @ctx: the acquire context
*
* If deadlock is detected (ie. drm_modeset_lock() returns -EDEADLK),
* you must call this function to drop all currently held locks and
* block until the contended lock becomes available.
*/
void drm_modeset_backoff(struct drm_modeset_acquire_ctx *ctx)
{
modeset_backoff(ctx, false);
}
EXPORT_SYMBOL(drm_modeset_backoff);
/**
* drm_modeset_backoff_interruptible - deadlock avoidance backoff
* @ctx: the acquire context
*
* Interruptible version of drm_modeset_backoff()
*/
int drm_modeset_backoff_interruptible(struct drm_modeset_acquire_ctx *ctx)
{
return modeset_backoff(ctx, true);
}
EXPORT_SYMBOL(drm_modeset_backoff_interruptible);
/**
* drm_modeset_lock - take modeset lock
* @lock: lock to take
* @ctx: acquire ctx
*
* If ctx is not NULL, then its ww acquire context is used and the
* lock will be tracked by the context and can be released by calling
* drm_modeset_drop_locks(). If -EDEADLK is returned, this means a
* deadlock scenario has been detected and it is an error to attempt
* to take any more locks without first calling drm_modeset_backoff().
*/
int drm_modeset_lock(struct drm_modeset_lock *lock,
struct drm_modeset_acquire_ctx *ctx)
{
if (ctx)
return modeset_lock(lock, ctx, false, false);
ww_mutex_lock(&lock->mutex, NULL);
return 0;
}
EXPORT_SYMBOL(drm_modeset_lock);
/**
* drm_modeset_lock_interruptible - take modeset lock
* @lock: lock to take
* @ctx: acquire ctx
*
* Interruptible version of drm_modeset_lock()
*/
int drm_modeset_lock_interruptible(struct drm_modeset_lock *lock,
struct drm_modeset_acquire_ctx *ctx)
{
if (ctx)
return modeset_lock(lock, ctx, true, false);
return ww_mutex_lock_interruptible(&lock->mutex, NULL);
}
EXPORT_SYMBOL(drm_modeset_lock_interruptible);
/**
* drm_modeset_unlock - drop modeset lock
* @lock: lock to release
*/
void drm_modeset_unlock(struct drm_modeset_lock *lock)
{
list_del_init(&lock->head);
ww_mutex_unlock(&lock->mutex);
}
EXPORT_SYMBOL(drm_modeset_unlock);
/* Temporary.. until we have sufficiently fine grained locking, there
* are a couple scenarios where it is convenient to grab all crtc locks.
* It is planned to remove this:
*/
int drm_modeset_lock_all_crtcs(struct drm_device *dev,
struct drm_modeset_acquire_ctx *ctx)
{
struct drm_mode_config *config = &dev->mode_config;
struct drm_crtc *crtc;
int ret = 0;
list_for_each_entry(crtc, &config->crtc_list, head) {
ret = drm_modeset_lock(&crtc->mutex, ctx);
if (ret)
return ret;
}
return 0;
}
EXPORT_SYMBOL(drm_modeset_lock_all_crtcs);

View File

@ -1,17 +1,3 @@
/* drm_pci.h -- PCI DMA memory management wrappers for DRM -*- linux-c -*- */
/**
* \file drm_pci.c
* \brief Functions and ioctls to manage PCI memory
*
* \warning These interfaces aren't stable yet.
*
* \todo Implement the remaining ioctl's for the PCI pools.
* \todo The wrappers here are so thin that they would be better off inlined..
*
* \author José Fonseca <jrfonseca@tungstengraphics.com>
* \author Leif Delgass <ldelgass@retinalburn.net>
*/
/*
* Copyright 2003 José Fonseca.
* Copyright 2003 Leif Delgass.
@ -42,12 +28,14 @@
#include <linux/export.h>
#include <drm/drmP.h>
/**********************************************************************/
/** \name PCI memory */
/*@{*/
/**
* \brief Allocate a PCI consistent memory block, for DMA.
* drm_pci_alloc - Allocate a PCI consistent memory block, for DMA.
* @dev: DRM device
* @size: size of block to allocate
* @align: alignment of block
*
* Return: A handle to the allocated memory block on success or NULL on
* failure.
*/
drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t align)
{
@ -88,8 +76,8 @@ drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t ali
EXPORT_SYMBOL(drm_pci_alloc);
/**
* \brief Free a PCI consistent memory block without freeing its descriptor.
/*
* Free a PCI consistent memory block without freeing its descriptor.
*
* This function is for internal use in the Linux-specific DRM core code.
*/
@ -111,7 +99,9 @@ void __drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
}
/**
* \brief Free a PCI consistent memory block
* drm_pci_free - Free a PCI consistent memory block
* @dev: DRM device
* @dmah: handle to memory block
*/
void drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
{
@ -137,21 +127,9 @@ static int drm_get_pci_domain(struct drm_device *dev)
return pci_domain_nr(dev->pdev->bus);
}
static int drm_pci_get_irq(struct drm_device *dev)
{
return dev->pdev->irq;
}
static const char *drm_pci_get_name(struct drm_device *dev)
{
struct pci_driver *pdriver = dev->driver->kdriver.pci;
return pdriver->name;
}
static int drm_pci_set_busid(struct drm_device *dev, struct drm_master *master)
{
int len, ret;
struct pci_driver *pdriver = dev->driver->kdriver.pci;
master->unique_len = 40;
master->unique_size = master->unique_len;
master->unique = kmalloc(master->unique_size, GFP_KERNEL);
@ -173,29 +151,16 @@ static int drm_pci_set_busid(struct drm_device *dev, struct drm_master *master)
} else
master->unique_len = len;
dev->devname =
kmalloc(strlen(pdriver->name) +
master->unique_len + 2, GFP_KERNEL);
if (dev->devname == NULL) {
ret = -ENOMEM;
goto err;
}
sprintf(dev->devname, "%s@%s", pdriver->name,
master->unique);
return 0;
err:
return ret;
}
static int drm_pci_set_unique(struct drm_device *dev,
struct drm_master *master,
struct drm_unique *u)
int drm_pci_set_unique(struct drm_device *dev,
struct drm_master *master,
struct drm_unique *u)
{
int domain, bus, slot, func, ret;
const char *bus_name;
master->unique_len = u->unique_len;
master->unique_size = u->unique_len + 1;
@ -212,17 +177,6 @@ static int drm_pci_set_unique(struct drm_device *dev,
master->unique[master->unique_len] = '\0';
bus_name = dev->driver->bus->get_name(dev);
dev->devname = kmalloc(strlen(bus_name) +
strlen(master->unique) + 2, GFP_KERNEL);
if (!dev->devname) {
ret = -ENOMEM;
goto err;
}
sprintf(dev->devname, "%s@%s", bus_name,
master->unique);
/* Return error if the busid submitted doesn't match the device's actual
* busid.
*/
@ -247,7 +201,6 @@ err:
return ret;
}
static int drm_pci_irq_by_busid(struct drm_device *dev, struct drm_irq_busid *p)
{
if ((p->busnum >> 8) != drm_get_pci_domain(dev) ||
@ -262,6 +215,36 @@ static int drm_pci_irq_by_busid(struct drm_device *dev, struct drm_irq_busid *p)
return 0;
}
/**
* drm_irq_by_busid - Get interrupt from bus ID
* @dev: DRM device
* @data: IOCTL parameter pointing to a drm_irq_busid structure
* @file_priv: DRM file private.
*
* Finds the PCI device with the specified bus id and gets its IRQ number.
* This IOCTL is deprecated, and will now return EINVAL for any busid not equal
* to that of the device that this DRM instance attached to.
*
* Return: 0 on success or a negative error code on failure.
*/
int drm_irq_by_busid(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_irq_busid *p = data;
if (drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
/* UMS was only ever support on PCI devices. */
if (WARN_ON(!dev->pdev))
return -EINVAL;
if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
return -EINVAL;
return drm_pci_irq_by_busid(dev, p);
}
static void drm_pci_agp_init(struct drm_device *dev)
{
if (drm_core_check_feature(dev, DRIVER_USE_AGP)) {
@ -287,24 +270,20 @@ void drm_pci_agp_destroy(struct drm_device *dev)
}
static struct drm_bus drm_pci_bus = {
.bus_type = DRIVER_BUS_PCI,
.get_irq = drm_pci_get_irq,
.get_name = drm_pci_get_name,
.set_busid = drm_pci_set_busid,
.set_unique = drm_pci_set_unique,
.irq_by_busid = drm_pci_irq_by_busid,
};
/**
* Register.
*
* \param pdev - PCI device structure
* \param ent entry from the PCI ID table with device type flags
* \return zero on success or a negative number on failure.
* drm_get_pci_dev - Register a PCI device with the DRM subsystem
* @pdev: PCI device
* @ent: entry from the PCI ID table that matches @pdev
* @driver: DRM device driver
*
* Attempt to gets inter module "drm" information. If we are first
* then register the character device and inter module information.
* Try and register, if we fail to register, backout previous work.
*
* Return: 0 on success or a negative error code on failure.
*/
int drm_get_pci_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
struct drm_driver *driver)
@ -357,15 +336,14 @@ err_free:
EXPORT_SYMBOL(drm_get_pci_dev);
/**
* PCI device initialization. Called direct from modules at load time.
* drm_pci_init - Register matching PCI devices with the DRM subsystem
* @driver: DRM device driver
* @pdriver: PCI device driver
*
* \return zero on success or a negative number on failure.
* Initializes a drm_device structures, registering the stubs and initializing
* the AGP device.
*
* Initializes a drm_device structures,registering the
* stubs and initializing the AGP device.
*
* Expands the \c DRIVER_PREINIT and \c DRIVER_POST_INIT macros before and
* after the initialization for driver customization.
* Return: 0 on success or a negative error code on failure.
*/
int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver)
{
@ -375,7 +353,6 @@ int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver)
DRM_DEBUG("\n");
driver->kdriver.pci = pdriver;
driver->bus = &drm_pci_bus;
if (driver->driver_features & DRIVER_MODESET)
@ -453,11 +430,31 @@ int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver)
}
void drm_pci_agp_destroy(struct drm_device *dev) {}
int drm_irq_by_busid(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
return -EINVAL;
}
int drm_pci_set_unique(struct drm_device *dev,
struct drm_master *master,
struct drm_unique *u)
{
return -EINVAL;
}
#endif
EXPORT_SYMBOL(drm_pci_init);
/*@}*/
/**
* drm_pci_exit - Unregister matching PCI devices from the DRM subsystem
* @driver: DRM device driver
* @pdriver: PCI device driver
*
* Unregisters one or more devices matched by a PCI driver from the DRM
* subsystem.
*/
void drm_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver)
{
struct drm_device *dev, *tmp;

View File

@ -25,7 +25,9 @@
#include <linux/list.h>
#include <drm/drmP.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_rect.h>
#include <drm/drm_plane_helper.h>
#define SUBPIXEL_MASK 0xffff
@ -36,9 +38,9 @@
* creating the primary plane. However drivers that still call
* drm_plane_init() will use this minimal format list as the default.
*/
const static uint32_t safe_modeset_formats[] = {
DRM_FORMAT_XRGB8888,
DRM_FORMAT_ARGB8888,
static const uint32_t safe_modeset_formats[] = {
DRM_FORMAT_XRGB8888,
DRM_FORMAT_ARGB8888,
};
/*
@ -54,6 +56,13 @@ static int get_connectors_for_crtc(struct drm_crtc *crtc,
struct drm_connector *connector;
int count = 0;
/*
* Note: Once we change the plane hooks to more fine-grained locking we
* need to grab the connection_mutex here to be able to make these
* checks.
*/
WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
list_for_each_entry(connector, &dev->mode_config.connector_list, head)
if (connector->encoder && connector->encoder->crtc == crtc) {
if (connector_list != NULL && count < num_connectors)
@ -65,6 +74,79 @@ static int get_connectors_for_crtc(struct drm_crtc *crtc,
return count;
}
/**
* drm_plane_helper_check_update() - Check plane update for validity
* @plane: plane object to update
* @crtc: owning CRTC of owning plane
* @fb: framebuffer to flip onto plane
* @src: source coordinates in 16.16 fixed point
* @dest: integer destination coordinates
* @clip: integer clipping coordinates
* @min_scale: minimum @src:@dest scaling factor in 16.16 fixed point
* @max_scale: maximum @src:@dest scaling factor in 16.16 fixed point
* @can_position: is it legal to position the plane such that it
* doesn't cover the entire crtc? This will generally
* only be false for primary planes.
* @can_update_disabled: can the plane be updated while the crtc
* is disabled?
* @visible: output parameter indicating whether plane is still visible after
* clipping
*
* Checks that a desired plane update is valid. Drivers that provide
* their own plane handling rather than helper-provided implementations may
* still wish to call this function to avoid duplication of error checking
* code.
*
* RETURNS:
* Zero if update appears valid, error code on failure
*/
int drm_plane_helper_check_update(struct drm_plane *plane,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_rect *src,
struct drm_rect *dest,
const struct drm_rect *clip,
int min_scale,
int max_scale,
bool can_position,
bool can_update_disabled,
bool *visible)
{
int hscale, vscale;
if (!crtc->enabled && !can_update_disabled) {
DRM_DEBUG_KMS("Cannot update plane of a disabled CRTC.\n");
return -EINVAL;
}
/* Check scaling */
hscale = drm_rect_calc_hscale(src, dest, min_scale, max_scale);
vscale = drm_rect_calc_vscale(src, dest, min_scale, max_scale);
if (hscale < 0 || vscale < 0) {
DRM_DEBUG_KMS("Invalid scaling of plane\n");
return -ERANGE;
}
*visible = drm_rect_clip_scaled(src, dest, clip, hscale, vscale);
if (!*visible)
/*
* Plane isn't visible; some drivers can handle this
* so we just return success here. Drivers that can't
* (including those that use the primary plane helper's
* update function) will return an error from their
* update_plane handler.
*/
return 0;
if (!can_position && !drm_rect_equals(dest, clip)) {
DRM_DEBUG_KMS("Plane must cover entire CRTC\n");
return -EINVAL;
}
return 0;
}
EXPORT_SYMBOL(drm_plane_helper_check_update);
/**
* drm_primary_helper_update() - Helper for primary plane update
* @plane: plane object to update
@ -113,56 +195,42 @@ int drm_primary_helper_update(struct drm_plane *plane, struct drm_crtc *crtc,
.x = src_x >> 16,
.y = src_y >> 16,
};
struct drm_rect src = {
.x1 = src_x,
.y1 = src_y,
.x2 = src_x + src_w,
.y2 = src_y + src_h,
};
struct drm_rect dest = {
.x1 = crtc_x,
.y1 = crtc_y,
.x2 = crtc_x + crtc_w,
.y2 = crtc_y + crtc_h,
};
struct drm_rect clip = {
const struct drm_rect clip = {
.x2 = crtc->mode.hdisplay,
.y2 = crtc->mode.vdisplay,
};
struct drm_connector **connector_list;
struct drm_framebuffer *tmpfb;
int num_connectors, ret;
bool visible;
if (!crtc->enabled) {
DRM_DEBUG_KMS("Cannot update primary plane of a disabled CRTC.\n");
return -EINVAL;
}
/* Disallow subpixel positioning */
if ((src_x | src_y | src_w | src_h) & SUBPIXEL_MASK) {
DRM_DEBUG_KMS("Primary plane does not support subpixel positioning\n");
return -EINVAL;
}
/* Primary planes are locked to their owning CRTC */
if (plane->possible_crtcs != drm_crtc_mask(crtc)) {
DRM_DEBUG_KMS("Cannot change primary plane CRTC\n");
return -EINVAL;
}
/* Disallow scaling */
if (crtc_w != src_w || crtc_h != src_h) {
DRM_DEBUG_KMS("Can't scale primary plane\n");
return -EINVAL;
}
/* Make sure primary plane covers entire CRTC */
drm_rect_intersect(&dest, &clip);
if (dest.x1 != 0 || dest.y1 != 0 ||
dest.x2 != crtc->mode.hdisplay || dest.y2 != crtc->mode.vdisplay) {
DRM_DEBUG_KMS("Primary plane must cover entire CRTC\n");
return -EINVAL;
}
/* Framebuffer must be big enough to cover entire plane */
ret = drm_crtc_check_viewport(crtc, crtc_x, crtc_y, &crtc->mode, fb);
ret = drm_plane_helper_check_update(plane, crtc, fb,
&src, &dest, &clip,
DRM_PLANE_HELPER_NO_SCALING,
DRM_PLANE_HELPER_NO_SCALING,
false, false, &visible);
if (ret)
return ret;
if (!visible)
/*
* Primary plane isn't visible. Note that unless a driver
* provides their own disable function, this will just
* wind up returning -EINVAL to userspace.
*/
return plane->funcs->disable_plane(plane);
/* Find current connectors for CRTC */
num_connectors = get_connectors_for_crtc(crtc, NULL, 0);
BUG_ON(num_connectors == 0);
@ -176,21 +244,14 @@ int drm_primary_helper_update(struct drm_plane *plane, struct drm_crtc *crtc,
set.num_connectors = num_connectors;
/*
* set_config() adjusts crtc->primary->fb; however the DRM setplane
* code that called us expects to handle the framebuffer update and
* reference counting; save and restore the current fb before
* calling it.
*
* N.B., we call set_config() directly here rather than using
* We call set_config() directly here rather than using
* drm_mode_set_config_internal. We're reprogramming the same
* connectors that were already in use, so we shouldn't need the extra
* cross-CRTC fb refcounting to accomodate stealing connectors.
* drm_mode_setplane() already handles the basic refcounting for the
* framebuffers involved in this operation.
*/
tmpfb = plane->fb;
ret = crtc->funcs->set_config(&set);
plane->fb = tmpfb;
kfree(connector_list);
return ret;
@ -232,7 +293,6 @@ EXPORT_SYMBOL(drm_primary_helper_disable);
*/
void drm_primary_helper_destroy(struct drm_plane *plane)
{
plane->funcs->disable_plane(plane);
drm_plane_cleanup(plane);
kfree(plane);
}

View File

@ -68,16 +68,6 @@ err_free:
return ret;
}
static int drm_platform_get_irq(struct drm_device *dev)
{
return platform_get_irq(dev->platformdev, 0);
}
static const char *drm_platform_get_name(struct drm_device *dev)
{
return dev->platformdev->name;
}
static int drm_platform_set_busid(struct drm_device *dev, struct drm_master *master)
{
int len, ret, id;
@ -106,46 +96,30 @@ static int drm_platform_set_busid(struct drm_device *dev, struct drm_master *mas
goto err;
}
dev->devname =
kmalloc(strlen(dev->platformdev->name) +
master->unique_len + 2, GFP_KERNEL);
if (dev->devname == NULL) {
ret = -ENOMEM;
goto err;
}
sprintf(dev->devname, "%s@%s", dev->platformdev->name,
master->unique);
return 0;
err:
return ret;
}
static struct drm_bus drm_platform_bus = {
.bus_type = DRIVER_BUS_PLATFORM,
.get_irq = drm_platform_get_irq,
.get_name = drm_platform_get_name,
.set_busid = drm_platform_set_busid,
};
/**
* Platform device initialization. Called direct from modules.
* drm_platform_init - Register a platform device with the DRM subsystem
* @driver: DRM device driver
* @platform_device: platform device to register
*
* \return zero on success or a negative number on failure.
* Registers the specified DRM device driver and platform device with the DRM
* subsystem, initializing a drm_device structure and calling the driver's
* .load() function.
*
* Initializes a drm_device structures,registering the
* stubs
*
* Expands the \c DRIVER_PREINIT and \c DRIVER_POST_INIT macros before and
* after the initialization for driver customization.
* Return: 0 on success or a negative error code on failure.
*/
int drm_platform_init(struct drm_driver *driver, struct platform_device *platform_device)
{
DRM_DEBUG("\n");
driver->kdriver.platform_device = platform_device;
driver->bus = &drm_platform_bus;
return drm_get_platform_dev(platform_device, driver);
}

View File

@ -82,26 +82,8 @@ static void drm_mode_validate_flag(struct drm_connector *connector,
return;
}
/**
* drm_helper_probe_single_connector_modes - get complete set of display modes
* @connector: connector to probe
* @maxX: max width for modes
* @maxY: max height for modes
*
* Based on the helper callbacks implemented by @connector try to detect all
* valid modes. Modes will first be added to the connector's probed_modes list,
* then culled (based on validity and the @maxX, @maxY parameters) and put into
* the normal modes list.
*
* Intended to be use as a generic implementation of the ->fill_modes()
* @connector vfunc for drivers that use the crtc helpers for output mode
* filtering and detection.
*
* Returns:
* The number of modes found on @connector.
*/
int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
uint32_t maxX, uint32_t maxY)
static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connector *connector,
uint32_t maxX, uint32_t maxY, bool merge_type_bits)
{
struct drm_device *dev = connector->dev;
struct drm_display_mode *mode;
@ -114,7 +96,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id,
drm_get_connector_name(connector));
connector->name);
/* set all modes to the unverified state */
list_for_each_entry(mode, &connector->modes, head)
mode->status = MODE_UNVERIFIED;
@ -138,7 +120,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
if (connector->status == connector_status_disconnected) {
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] disconnected\n",
connector->base.id, drm_get_connector_name(connector));
connector->base.id, connector->name);
drm_mode_connector_update_edid_property(connector, NULL);
verbose_prune = false;
goto prune;
@ -155,7 +137,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
if (count == 0)
goto prune;
drm_mode_connector_list_update(connector);
drm_mode_connector_list_update(connector, merge_type_bits);
if (maxX && maxY)
drm_mode_validate_size(dev, &connector->modes, maxX, maxY);
@ -169,7 +151,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
drm_mode_validate_flag(connector, mode_flags);
list_for_each_entry(mode, &connector->modes, head) {
if (mode->status == MODE_OK)
if (mode->status == MODE_OK && connector_funcs->mode_valid)
mode->status = connector_funcs->mode_valid(connector,
mode);
}
@ -186,7 +168,7 @@ prune:
drm_mode_sort(&connector->modes);
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] probed modes :\n", connector->base.id,
drm_get_connector_name(connector));
connector->name);
list_for_each_entry(mode, &connector->modes, head) {
drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
drm_mode_debug_printmodeline(mode);
@ -194,8 +176,48 @@ prune:
return count;
}
/**
* drm_helper_probe_single_connector_modes - get complete set of display modes
* @connector: connector to probe
* @maxX: max width for modes
* @maxY: max height for modes
*
* Based on the helper callbacks implemented by @connector try to detect all
* valid modes. Modes will first be added to the connector's probed_modes list,
* then culled (based on validity and the @maxX, @maxY parameters) and put into
* the normal modes list.
*
* Intended to be use as a generic implementation of the ->fill_modes()
* @connector vfunc for drivers that use the crtc helpers for output mode
* filtering and detection.
*
* Returns:
* The number of modes found on @connector.
*/
int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
uint32_t maxX, uint32_t maxY)
{
return drm_helper_probe_single_connector_modes_merge_bits(connector, maxX, maxY, true);
}
EXPORT_SYMBOL(drm_helper_probe_single_connector_modes);
/**
* drm_helper_probe_single_connector_modes_nomerge - get complete set of display modes
* @connector: connector to probe
* @maxX: max width for modes
* @maxY: max height for modes
*
* This operates like drm_hehlper_probe_single_connector_modes except it
* replaces the mode bits instead of merging them for preferred modes.
*/
int drm_helper_probe_single_connector_modes_nomerge(struct drm_connector *connector,
uint32_t maxX, uint32_t maxY)
{
return drm_helper_probe_single_connector_modes_merge_bits(connector, maxX, maxY, false);
}
EXPORT_SYMBOL(drm_helper_probe_single_connector_modes_nomerge);
/**
* drm_kms_helper_hotplug_event - fire off KMS hotplug events
* @dev: drm_device whose connector state changed
@ -264,7 +286,7 @@ static void output_poll_execute(struct work_struct *work)
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] "
"status updated from %s to %s\n",
connector->base.id,
drm_get_connector_name(connector),
connector->name,
old, new);
changed = true;
@ -409,7 +431,7 @@ bool drm_helper_hpd_irq_event(struct drm_device *dev)
connector->status = connector->funcs->detect(connector, false);
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
connector->base.id,
drm_get_connector_name(connector),
connector->name,
drm_get_connector_status_name(old_status),
drm_get_connector_status_name(connector->status));
if (old_status != connector->status)

View File

@ -1,16 +1,11 @@
/**
* \file drm_stub.h
* Stub support
*
* \author Rickard E. (Rik) Faith <faith@valinux.com>
*/
/*
* Created: Fri Jan 19 10:48:35 2001 by faith@acm.org
*
* Copyright 2001 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Author Rickard E. (Rik) Faith <faith@valinux.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
@ -128,7 +123,10 @@ struct drm_master *drm_master_create(struct drm_minor *minor)
kref_init(&master->refcount);
spin_lock_init(&master->lock.spinlock);
init_waitqueue_head(&master->lock.lock_queue);
drm_ht_create(&master->magiclist, DRM_MAGIC_HASH_ORDER);
if (drm_ht_create(&master->magiclist, DRM_MAGIC_HASH_ORDER)) {
kfree(master);
return NULL;
}
INIT_LIST_HEAD(&master->magicfree);
master->minor = minor;
@ -166,9 +164,6 @@ static void drm_master_destroy(struct kref *kref)
master->unique_len = 0;
}
kfree(dev->devname);
dev->devname = NULL;
list_for_each_entry_safe(pt, next, &master->magicfree, head) {
list_del(&pt->head);
drm_ht_remove_item(&master->magiclist, &pt->hash_item);
@ -294,6 +289,7 @@ static void drm_minor_free(struct drm_device *dev, unsigned int type)
slot = drm_minor_get_slot(dev, type);
if (*slot) {
drm_mode_group_destroy(&(*slot)->mode_group);
kfree(*slot);
*slot = NULL;
}
@ -424,11 +420,15 @@ void drm_minor_release(struct drm_minor *minor)
}
/**
* Called via drm_exit() at module unload time or when pci device is
* unplugged.
* drm_put_dev - Unregister and release a DRM device
* @dev: DRM device
*
* Called at module unload time or when a PCI device is unplugged.
*
* Use of this function is discouraged. It will eventually go away completely.
* Please use drm_dev_unregister() and drm_dev_unref() explicitly instead.
*
* Cleans up all DRM device, calling drm_lastclose().
*
*/
void drm_put_dev(struct drm_device *dev)
{
@ -535,7 +535,7 @@ static void drm_fs_inode_free(struct inode *inode)
}
/**
* drm_dev_alloc - Allocate new drm device
* drm_dev_alloc - Allocate new DRM device
* @driver: DRM driver to allocate device for
* @parent: Parent device object
*
@ -569,7 +569,7 @@ struct drm_device *drm_dev_alloc(struct drm_driver *driver,
INIT_LIST_HEAD(&dev->maplist);
INIT_LIST_HEAD(&dev->vblank_event_list);
spin_lock_init(&dev->count_lock);
spin_lock_init(&dev->buf_lock);
spin_lock_init(&dev->event_lock);
mutex_init(&dev->struct_mutex);
mutex_init(&dev->ctxlist_mutex);
@ -648,9 +648,8 @@ static void drm_dev_release(struct kref *ref)
drm_minor_free(dev, DRM_MINOR_RENDER);
drm_minor_free(dev, DRM_MINOR_CONTROL);
kfree(dev->devname);
mutex_destroy(&dev->master_mutex);
kfree(dev->unique);
kfree(dev);
}
@ -690,6 +689,7 @@ EXPORT_SYMBOL(drm_dev_unref);
/**
* drm_dev_register - Register DRM device
* @dev: Device to register
* @flags: Flags passed to the driver's .load() function
*
* Register the DRM device @dev with the system, advertise device to user-space
* and start normal device operation. @dev must be allocated via drm_dev_alloc()
@ -778,3 +778,28 @@ void drm_dev_unregister(struct drm_device *dev)
drm_minor_unregister(dev, DRM_MINOR_CONTROL);
}
EXPORT_SYMBOL(drm_dev_unregister);
/**
* drm_dev_set_unique - Set the unique name of a DRM device
* @dev: device of which to set the unique name
* @fmt: format string for unique name
*
* Sets the unique name of a DRM device using the specified format string and
* a variable list of arguments. Drivers can use this at driver probe time if
* the unique name of the devices they drive is static.
*
* Return: 0 on success or a negative error code on failure.
*/
int drm_dev_set_unique(struct drm_device *dev, const char *fmt, ...)
{
va_list ap;
kfree(dev->unique);
va_start(ap, fmt);
dev->unique = kvasprintf(GFP_KERNEL, fmt, ap);
va_end(ap);
return dev->unique ? 0 : -ENOMEM;
}
EXPORT_SYMBOL(drm_dev_set_unique);

View File

@ -380,9 +380,9 @@ int drm_sysfs_connector_add(struct drm_connector *connector)
connector->kdev = device_create(drm_class, dev->primary->kdev,
0, connector, "card%d-%s",
dev->primary->index, drm_get_connector_name(connector));
dev->primary->index, connector->name);
DRM_DEBUG("adding \"%s\" to sysfs\n",
drm_get_connector_name(connector));
connector->name);
if (IS_ERR(connector->kdev)) {
DRM_ERROR("failed to register connector device: %ld\n", PTR_ERR(connector->kdev));
@ -460,7 +460,7 @@ void drm_sysfs_connector_remove(struct drm_connector *connector)
if (!connector->kdev)
return;
DRM_DEBUG("removing \"%s\" from sysfs\n",
drm_get_connector_name(connector));
connector->name);
for (i = 0; i < ARRAY_SIZE(connector_attrs); i++)
device_remove_file(connector->kdev, &connector_attrs[i]);

View File

@ -36,16 +36,6 @@ err_free:
}
EXPORT_SYMBOL(drm_get_usb_dev);
static int drm_usb_get_irq(struct drm_device *dev)
{
return 0;
}
static const char *drm_usb_get_name(struct drm_device *dev)
{
return "USB";
}
static int drm_usb_set_busid(struct drm_device *dev,
struct drm_master *master)
{
@ -53,18 +43,24 @@ static int drm_usb_set_busid(struct drm_device *dev,
}
static struct drm_bus drm_usb_bus = {
.bus_type = DRIVER_BUS_USB,
.get_irq = drm_usb_get_irq,
.get_name = drm_usb_get_name,
.set_busid = drm_usb_set_busid,
};
/**
* drm_usb_init - Register matching USB devices with the DRM subsystem
* @driver: DRM device driver
* @udriver: USB device driver
*
* Registers one or more devices matched by a USB driver with the DRM
* subsystem.
*
* Return: 0 on success or a negative error code on failure.
*/
int drm_usb_init(struct drm_driver *driver, struct usb_driver *udriver)
{
int res;
DRM_DEBUG("\n");
driver->kdriver.usb = udriver;
driver->bus = &drm_usb_bus;
res = usb_register(udriver);
@ -72,6 +68,14 @@ int drm_usb_init(struct drm_driver *driver, struct usb_driver *udriver)
}
EXPORT_SYMBOL(drm_usb_init);
/**
* drm_usb_exit - Unregister matching USB devices from the DRM subsystem
* @driver: DRM device driver
* @udriver: USB device driver
*
* Unregisters one or more devices matched by a USB driver from the DRM
* subsystem.
*/
void drm_usb_exit(struct drm_driver *driver,
struct usb_driver *udriver)
{

View File

@ -26,14 +26,14 @@ config DRM_EXYNOS_DMABUF
config DRM_EXYNOS_FIMD
bool "Exynos DRM FIMD"
depends on DRM_EXYNOS && !FB_S3C && !ARCH_MULTIPLATFORM
depends on DRM_EXYNOS && !FB_S3C
select FB_MODE_HELPERS
help
Choose this option if you want to use Exynos FIMD for DRM.
config DRM_EXYNOS_DPI
bool "EXYNOS DRM parallel output support"
depends on DRM_EXYNOS
depends on DRM_EXYNOS_FIMD
select DRM_PANEL
default n
help
@ -41,7 +41,7 @@ config DRM_EXYNOS_DPI
config DRM_EXYNOS_DSI
bool "EXYNOS DRM MIPI-DSI driver support"
depends on DRM_EXYNOS
depends on DRM_EXYNOS_FIMD
select DRM_MIPI_DSI
select DRM_PANEL
default n
@ -50,7 +50,7 @@ config DRM_EXYNOS_DSI
config DRM_EXYNOS_DP
bool "EXYNOS DRM DP driver support"
depends on DRM_EXYNOS && ARCH_EXYNOS
depends on DRM_EXYNOS_FIMD && ARCH_EXYNOS && (DRM_PTN3460=n || DRM_PTN3460=y || DRM_PTN3460=DRM_EXYNOS)
default DRM_EXYNOS
help
This enables support for DP device.

View File

@ -1,63 +0,0 @@
/*
* Copyright (C) 2011 Samsung Electronics Co.Ltd
* Authors:
* Seung-Woo Kim <sw0312.kim@samsung.com>
* Inki Dae <inki.dae@samsung.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
*/
#include <drm/drmP.h>
#include <linux/kernel.h>
#include <linux/i2c.h>
#include <linux/of.h>
#include "exynos_drm_drv.h"
#include "exynos_hdmi.h"
static int s5p_ddc_probe(struct i2c_client *client,
const struct i2c_device_id *dev_id)
{
hdmi_attach_ddc_client(client);
dev_info(&client->adapter->dev,
"attached %s into i2c adapter successfully\n",
client->name);
return 0;
}
static int s5p_ddc_remove(struct i2c_client *client)
{
dev_info(&client->adapter->dev,
"detached %s from i2c adapter successfully\n",
client->name);
return 0;
}
static struct of_device_id hdmiddc_match_types[] = {
{
.compatible = "samsung,exynos5-hdmiddc",
}, {
.compatible = "samsung,exynos4210-hdmiddc",
}, {
/* end node */
}
};
struct i2c_driver ddc_driver = {
.driver = {
.name = "exynos-hdmiddc",
.owner = THIS_MODULE,
.of_match_table = hdmiddc_match_types,
},
.probe = s5p_ddc_probe,
.remove = s5p_ddc_remove,
.command = NULL,
};

View File

@ -18,6 +18,9 @@
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/of.h>
#include <linux/of_gpio.h>
#include <linux/gpio.h>
#include <linux/component.h>
#include <linux/phy/phy.h>
#include <video/of_display_timing.h>
#include <video/of_videomode.h>
@ -141,15 +144,15 @@ static int exynos_dp_read_edid(struct exynos_dp_device *dp)
return -EIO;
}
exynos_dp_read_byte_from_dpcd(dp, DPCD_ADDR_TEST_REQUEST,
exynos_dp_read_byte_from_dpcd(dp, DP_TEST_REQUEST,
&test_vector);
if (test_vector & DPCD_TEST_EDID_READ) {
if (test_vector & DP_TEST_LINK_EDID_READ) {
exynos_dp_write_byte_to_dpcd(dp,
DPCD_ADDR_TEST_EDID_CHECKSUM,
DP_TEST_EDID_CHECKSUM,
edid[EDID_BLOCK_LENGTH + EDID_CHECKSUM]);
exynos_dp_write_byte_to_dpcd(dp,
DPCD_ADDR_TEST_RESPONSE,
DPCD_TEST_EDID_CHECKSUM_WRITE);
DP_TEST_RESPONSE,
DP_TEST_EDID_CHECKSUM_WRITE);
}
} else {
dev_info(dp->dev, "EDID data does not include any extensions.\n");
@ -171,15 +174,15 @@ static int exynos_dp_read_edid(struct exynos_dp_device *dp)
}
exynos_dp_read_byte_from_dpcd(dp,
DPCD_ADDR_TEST_REQUEST,
DP_TEST_REQUEST,
&test_vector);
if (test_vector & DPCD_TEST_EDID_READ) {
if (test_vector & DP_TEST_LINK_EDID_READ) {
exynos_dp_write_byte_to_dpcd(dp,
DPCD_ADDR_TEST_EDID_CHECKSUM,
DP_TEST_EDID_CHECKSUM,
edid[EDID_CHECKSUM]);
exynos_dp_write_byte_to_dpcd(dp,
DPCD_ADDR_TEST_RESPONSE,
DPCD_TEST_EDID_CHECKSUM_WRITE);
DP_TEST_RESPONSE,
DP_TEST_EDID_CHECKSUM_WRITE);
}
}
@ -193,8 +196,8 @@ static int exynos_dp_handle_edid(struct exynos_dp_device *dp)
int i;
int retval;
/* Read DPCD DPCD_ADDR_DPCD_REV~RECEIVE_PORT1_CAP_1 */
retval = exynos_dp_read_bytes_from_dpcd(dp, DPCD_ADDR_DPCD_REV,
/* Read DPCD DP_DPCD_REV~RECEIVE_PORT1_CAP_1 */
retval = exynos_dp_read_bytes_from_dpcd(dp, DP_DPCD_REV,
12, buf);
if (retval)
return retval;
@ -214,14 +217,14 @@ static void exynos_dp_enable_rx_to_enhanced_mode(struct exynos_dp_device *dp,
{
u8 data;
exynos_dp_read_byte_from_dpcd(dp, DPCD_ADDR_LANE_COUNT_SET, &data);
exynos_dp_read_byte_from_dpcd(dp, DP_LANE_COUNT_SET, &data);
if (enable)
exynos_dp_write_byte_to_dpcd(dp, DPCD_ADDR_LANE_COUNT_SET,
DPCD_ENHANCED_FRAME_EN |
exynos_dp_write_byte_to_dpcd(dp, DP_LANE_COUNT_SET,
DP_LANE_COUNT_ENHANCED_FRAME_EN |
DPCD_LANE_COUNT_SET(data));
else
exynos_dp_write_byte_to_dpcd(dp, DPCD_ADDR_LANE_COUNT_SET,
exynos_dp_write_byte_to_dpcd(dp, DP_LANE_COUNT_SET,
DPCD_LANE_COUNT_SET(data));
}
@ -230,7 +233,7 @@ static int exynos_dp_is_enhanced_mode_available(struct exynos_dp_device *dp)
u8 data;
int retval;
exynos_dp_read_byte_from_dpcd(dp, DPCD_ADDR_MAX_LANE_COUNT, &data);
exynos_dp_read_byte_from_dpcd(dp, DP_MAX_LANE_COUNT, &data);
retval = DPCD_ENHANCED_FRAME_CAP(data);
return retval;
@ -250,8 +253,8 @@ static void exynos_dp_training_pattern_dis(struct exynos_dp_device *dp)
exynos_dp_set_training_pattern(dp, DP_NONE);
exynos_dp_write_byte_to_dpcd(dp,
DPCD_ADDR_TRAINING_PATTERN_SET,
DPCD_TRAINING_PATTERN_DISABLED);
DP_TRAINING_PATTERN_SET,
DP_TRAINING_PATTERN_DISABLE);
}
static void exynos_dp_set_lane_lane_pre_emphasis(struct exynos_dp_device *dp,
@ -295,7 +298,7 @@ static int exynos_dp_link_start(struct exynos_dp_device *dp)
/* Setup RX configuration */
buf[0] = dp->link_train.link_rate;
buf[1] = dp->link_train.lane_count;
retval = exynos_dp_write_bytes_to_dpcd(dp, DPCD_ADDR_LINK_BW_SET,
retval = exynos_dp_write_bytes_to_dpcd(dp, DP_LINK_BW_SET,
2, buf);
if (retval)
return retval;
@ -322,16 +325,16 @@ static int exynos_dp_link_start(struct exynos_dp_device *dp)
/* Set RX training pattern */
retval = exynos_dp_write_byte_to_dpcd(dp,
DPCD_ADDR_TRAINING_PATTERN_SET,
DPCD_SCRAMBLING_DISABLED | DPCD_TRAINING_PATTERN_1);
DP_TRAINING_PATTERN_SET,
DP_LINK_SCRAMBLING_DISABLE | DP_TRAINING_PATTERN_1);
if (retval)
return retval;
for (lane = 0; lane < lane_count; lane++)
buf[lane] = DPCD_PRE_EMPHASIS_PATTERN2_LEVEL0 |
DPCD_VOLTAGE_SWING_PATTERN1_LEVEL0;
buf[lane] = DP_TRAIN_PRE_EMPHASIS_0 |
DP_TRAIN_VOLTAGE_SWING_400;
retval = exynos_dp_write_bytes_to_dpcd(dp, DPCD_ADDR_TRAINING_LANE0_SET,
retval = exynos_dp_write_bytes_to_dpcd(dp, DP_TRAINING_LANE0_SET,
lane_count, buf);
return retval;
@ -352,7 +355,7 @@ static int exynos_dp_clock_recovery_ok(u8 link_status[2], int lane_count)
for (lane = 0; lane < lane_count; lane++) {
lane_status = exynos_dp_get_lane_status(link_status, lane);
if ((lane_status & DPCD_LANE_CR_DONE) == 0)
if ((lane_status & DP_LANE_CR_DONE) == 0)
return -EINVAL;
}
return 0;
@ -364,13 +367,13 @@ static int exynos_dp_channel_eq_ok(u8 link_status[2], u8 link_align,
int lane;
u8 lane_status;
if ((link_align & DPCD_INTERLANE_ALIGN_DONE) == 0)
if ((link_align & DP_INTERLANE_ALIGN_DONE) == 0)
return -EINVAL;
for (lane = 0; lane < lane_count; lane++) {
lane_status = exynos_dp_get_lane_status(link_status, lane);
lane_status &= DPCD_CHANNEL_EQ_BITS;
if (lane_status != DPCD_CHANNEL_EQ_BITS)
lane_status &= DP_CHANNEL_EQ_BITS;
if (lane_status != DP_CHANNEL_EQ_BITS)
return -EINVAL;
}
@ -468,9 +471,9 @@ static void exynos_dp_get_adjust_training_lane(struct exynos_dp_device *dp,
DPCD_PRE_EMPHASIS_SET(pre_emphasis);
if (voltage_swing == VOLTAGE_LEVEL_3)
training_lane |= DPCD_MAX_SWING_REACHED;
training_lane |= DP_TRAIN_MAX_SWING_REACHED;
if (pre_emphasis == PRE_EMPHASIS_LEVEL_3)
training_lane |= DPCD_MAX_PRE_EMPHASIS_REACHED;
training_lane |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
dp->link_train.training_lane[lane] = training_lane;
}
@ -487,12 +490,12 @@ static int exynos_dp_process_clock_recovery(struct exynos_dp_device *dp)
lane_count = dp->link_train.lane_count;
retval = exynos_dp_read_bytes_from_dpcd(dp,
DPCD_ADDR_LANE0_1_STATUS, 2, link_status);
DP_LANE0_1_STATUS, 2, link_status);
if (retval)
return retval;
retval = exynos_dp_read_bytes_from_dpcd(dp,
DPCD_ADDR_ADJUST_REQUEST_LANE0_1, 2, adjust_request);
DP_ADJUST_REQUEST_LANE0_1, 2, adjust_request);
if (retval)
return retval;
@ -501,9 +504,9 @@ static int exynos_dp_process_clock_recovery(struct exynos_dp_device *dp)
exynos_dp_set_training_pattern(dp, TRAINING_PTN2);
retval = exynos_dp_write_byte_to_dpcd(dp,
DPCD_ADDR_TRAINING_PATTERN_SET,
DPCD_SCRAMBLING_DISABLED |
DPCD_TRAINING_PATTERN_2);
DP_TRAINING_PATTERN_SET,
DP_LINK_SCRAMBLING_DISABLE |
DP_TRAINING_PATTERN_2);
if (retval)
return retval;
@ -543,7 +546,7 @@ static int exynos_dp_process_clock_recovery(struct exynos_dp_device *dp)
dp->link_train.training_lane[lane], lane);
retval = exynos_dp_write_bytes_to_dpcd(dp,
DPCD_ADDR_TRAINING_LANE0_SET, lane_count,
DP_TRAINING_LANE0_SET, lane_count,
dp->link_train.training_lane);
if (retval)
return retval;
@ -562,7 +565,7 @@ static int exynos_dp_process_equalizer_training(struct exynos_dp_device *dp)
lane_count = dp->link_train.lane_count;
retval = exynos_dp_read_bytes_from_dpcd(dp,
DPCD_ADDR_LANE0_1_STATUS, 2, link_status);
DP_LANE0_1_STATUS, 2, link_status);
if (retval)
return retval;
@ -572,12 +575,12 @@ static int exynos_dp_process_equalizer_training(struct exynos_dp_device *dp)
}
retval = exynos_dp_read_bytes_from_dpcd(dp,
DPCD_ADDR_ADJUST_REQUEST_LANE0_1, 2, adjust_request);
DP_ADJUST_REQUEST_LANE0_1, 2, adjust_request);
if (retval)
return retval;
retval = exynos_dp_read_byte_from_dpcd(dp,
DPCD_ADDR_LANE_ALIGN_STATUS_UPDATED, &link_align);
DP_LANE_ALIGN_STATUS_UPDATED, &link_align);
if (retval)
return retval;
@ -619,7 +622,7 @@ static int exynos_dp_process_equalizer_training(struct exynos_dp_device *dp)
exynos_dp_set_lane_link_training(dp,
dp->link_train.training_lane[lane], lane);
retval = exynos_dp_write_bytes_to_dpcd(dp, DPCD_ADDR_TRAINING_LANE0_SET,
retval = exynos_dp_write_bytes_to_dpcd(dp, DP_TRAINING_LANE0_SET,
lane_count, dp->link_train.training_lane);
return retval;
@ -634,7 +637,7 @@ static void exynos_dp_get_max_rx_bandwidth(struct exynos_dp_device *dp,
* For DP rev.1.1, Maximum link rate of Main Link lanes
* 0x06 = 1.62 Gbps, 0x0a = 2.7 Gbps
*/
exynos_dp_read_byte_from_dpcd(dp, DPCD_ADDR_MAX_LINK_RATE, &data);
exynos_dp_read_byte_from_dpcd(dp, DP_MAX_LINK_RATE, &data);
*bandwidth = data;
}
@ -647,7 +650,7 @@ static void exynos_dp_get_max_rx_lane_count(struct exynos_dp_device *dp,
* For DP rev.1.1, Maximum number of Main Link lanes
* 0x01 = 1 lane, 0x02 = 2 lanes, 0x04 = 4 lanes
*/
exynos_dp_read_byte_from_dpcd(dp, DPCD_ADDR_MAX_LANE_COUNT, &data);
exynos_dp_read_byte_from_dpcd(dp, DP_MAX_LANE_COUNT, &data);
*lane_count = DPCD_MAX_LANE_COUNT(data);
}
@ -819,20 +822,20 @@ static void exynos_dp_enable_scramble(struct exynos_dp_device *dp, bool enable)
exynos_dp_enable_scrambling(dp);
exynos_dp_read_byte_from_dpcd(dp,
DPCD_ADDR_TRAINING_PATTERN_SET,
DP_TRAINING_PATTERN_SET,
&data);
exynos_dp_write_byte_to_dpcd(dp,
DPCD_ADDR_TRAINING_PATTERN_SET,
(u8)(data & ~DPCD_SCRAMBLING_DISABLED));
DP_TRAINING_PATTERN_SET,
(u8)(data & ~DP_LINK_SCRAMBLING_DISABLE));
} else {
exynos_dp_disable_scrambling(dp);
exynos_dp_read_byte_from_dpcd(dp,
DPCD_ADDR_TRAINING_PATTERN_SET,
DP_TRAINING_PATTERN_SET,
&data);
exynos_dp_write_byte_to_dpcd(dp,
DPCD_ADDR_TRAINING_PATTERN_SET,
(u8)(data | DPCD_SCRAMBLING_DISABLED));
DP_TRAINING_PATTERN_SET,
(u8)(data | DP_LINK_SCRAMBLING_DISABLE));
}
}
@ -949,12 +952,6 @@ static int exynos_dp_get_modes(struct drm_connector *connector)
return 1;
}
static int exynos_dp_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
return MODE_OK;
}
static struct drm_encoder *exynos_dp_best_encoder(
struct drm_connector *connector)
{
@ -965,20 +962,9 @@ static struct drm_encoder *exynos_dp_best_encoder(
static struct drm_connector_helper_funcs exynos_dp_connector_helper_funcs = {
.get_modes = exynos_dp_get_modes,
.mode_valid = exynos_dp_mode_valid,
.best_encoder = exynos_dp_best_encoder,
};
static int exynos_dp_initialize(struct exynos_drm_display *display,
struct drm_device *drm_dev)
{
struct exynos_dp_device *dp = display->ctx;
dp->drm_dev = drm_dev;
return 0;
}
static bool find_bridge(const char *compat, struct bridge_init *bridge)
{
bridge->client = NULL;
@ -1101,12 +1087,11 @@ static void exynos_dp_dpms(struct exynos_drm_display *display, int mode)
break;
default:
break;
};
}
dp->dpms_mode = mode;
}
static struct exynos_drm_display_ops exynos_dp_display_ops = {
.initialize = exynos_dp_initialize,
.create_connector = exynos_dp_create_connector,
.dpms = exynos_dp_dpms,
};
@ -1123,10 +1108,8 @@ static struct video_info *exynos_dp_dt_parse_pdata(struct device *dev)
dp_video_config = devm_kzalloc(dev,
sizeof(*dp_video_config), GFP_KERNEL);
if (!dp_video_config) {
dev_err(dev, "memory allocation for video config failed\n");
if (!dp_video_config)
return ERR_PTR(-ENOMEM);
}
dp_video_config->h_sync_polarity =
of_property_read_bool(dp_node, "hsync-active-high");
@ -1185,10 +1168,7 @@ static int exynos_dp_dt_parse_phydata(struct exynos_dp_device *dp)
dp_phy_node = of_find_node_by_name(dp_phy_node, "dptx-phy");
if (!dp_phy_node) {
dp->phy = devm_phy_get(dp->dev, "dp");
if (IS_ERR(dp->phy))
return PTR_ERR(dp->phy);
else
return 0;
return PTR_ERR_OR_ZERO(dp->phy);
}
if (of_property_read_u32(dp_phy_node, "reg", &phy_base)) {
@ -1230,19 +1210,20 @@ static int exynos_dp_dt_parse_panel(struct exynos_dp_device *dp)
return 0;
}
static int exynos_dp_probe(struct platform_device *pdev)
static int exynos_dp_bind(struct device *dev, struct device *master, void *data)
{
struct platform_device *pdev = to_platform_device(dev);
struct drm_device *drm_dev = data;
struct resource *res;
struct exynos_dp_device *dp;
unsigned int irq_flags;
int ret = 0;
dp = devm_kzalloc(&pdev->dev, sizeof(struct exynos_dp_device),
GFP_KERNEL);
if (!dp) {
dev_err(&pdev->dev, "no memory for device data\n");
if (!dp)
return -ENOMEM;
}
dp->dev = &pdev->dev;
dp->dpms_mode = DRM_MODE_DPMS_OFF;
@ -1273,7 +1254,30 @@ static int exynos_dp_probe(struct platform_device *pdev)
if (IS_ERR(dp->reg_base))
return PTR_ERR(dp->reg_base);
dp->irq = platform_get_irq(pdev, 0);
dp->hpd_gpio = of_get_named_gpio(dev->of_node, "samsung,hpd-gpio", 0);
if (gpio_is_valid(dp->hpd_gpio)) {
/*
* Set up the hotplug GPIO from the device tree as an interrupt.
* Simply specifying a different interrupt in the device tree
* doesn't work since we handle hotplug rather differently when
* using a GPIO. We also need the actual GPIO specifier so
* that we can get the current state of the GPIO.
*/
ret = devm_gpio_request_one(&pdev->dev, dp->hpd_gpio, GPIOF_IN,
"hpd_gpio");
if (ret) {
dev_err(&pdev->dev, "failed to get hpd gpio\n");
return ret;
}
dp->irq = gpio_to_irq(dp->hpd_gpio);
irq_flags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING;
} else {
dp->hpd_gpio = -ENODEV;
dp->irq = platform_get_irq(pdev, 0);
irq_flags = 0;
}
if (dp->irq == -ENXIO) {
dev_err(&pdev->dev, "failed to get irq\n");
return -ENODEV;
@ -1285,28 +1289,61 @@ static int exynos_dp_probe(struct platform_device *pdev)
exynos_dp_init_dp(dp);
ret = devm_request_irq(&pdev->dev, dp->irq, exynos_dp_irq_handler, 0,
"exynos-dp", dp);
ret = devm_request_irq(&pdev->dev, dp->irq, exynos_dp_irq_handler,
irq_flags, "exynos-dp", dp);
if (ret) {
dev_err(&pdev->dev, "failed to request irq\n");
return ret;
}
disable_irq(dp->irq);
dp->drm_dev = drm_dev;
exynos_dp_display.ctx = dp;
platform_set_drvdata(pdev, &exynos_dp_display);
exynos_drm_display_register(&exynos_dp_display);
return 0;
return exynos_drm_create_enc_conn(drm_dev, &exynos_dp_display);
}
static void exynos_dp_unbind(struct device *dev, struct device *master,
void *data)
{
struct exynos_drm_display *display = dev_get_drvdata(dev);
struct exynos_dp_device *dp = display->ctx;
struct drm_encoder *encoder = dp->encoder;
exynos_dp_dpms(display, DRM_MODE_DPMS_OFF);
encoder->funcs->destroy(encoder);
drm_connector_cleanup(&dp->connector);
}
static const struct component_ops exynos_dp_ops = {
.bind = exynos_dp_bind,
.unbind = exynos_dp_unbind,
};
static int exynos_dp_probe(struct platform_device *pdev)
{
int ret;
ret = exynos_drm_component_add(&pdev->dev, EXYNOS_DEVICE_TYPE_CONNECTOR,
exynos_dp_display.type);
if (ret)
return ret;
ret = component_add(&pdev->dev, &exynos_dp_ops);
if (ret)
exynos_drm_component_del(&pdev->dev,
EXYNOS_DEVICE_TYPE_CONNECTOR);
return ret;
}
static int exynos_dp_remove(struct platform_device *pdev)
{
struct exynos_drm_display *display = platform_get_drvdata(pdev);
exynos_dp_dpms(display, DRM_MODE_DPMS_OFF);
exynos_drm_display_unregister(&exynos_dp_display);
component_del(&pdev->dev, &exynos_dp_ops);
exynos_drm_component_del(&pdev->dev, EXYNOS_DEVICE_TYPE_CONNECTOR);
return 0;
}

View File

@ -14,6 +14,7 @@
#define _EXYNOS_DP_CORE_H
#include <drm/drm_crtc.h>
#include <drm/drm_dp_helper.h>
#include <drm/exynos_drm.h>
#define DP_TIMEOUT_LOOP_COUNT 100
@ -159,6 +160,7 @@ struct exynos_dp_device {
struct work_struct hotplug_work;
struct phy *phy;
int dpms_mode;
int hpd_gpio;
struct exynos_drm_panel_info panel;
};
@ -261,69 +263,17 @@ void exynos_dp_disable_scrambling(struct exynos_dp_device *dp);
#define EDID_EXTENSION_FLAG 0x7e
#define EDID_CHECKSUM 0x7f
/* Definition for DPCD Register */
#define DPCD_ADDR_DPCD_REV 0x0000
#define DPCD_ADDR_MAX_LINK_RATE 0x0001
#define DPCD_ADDR_MAX_LANE_COUNT 0x0002
#define DPCD_ADDR_LINK_BW_SET 0x0100
#define DPCD_ADDR_LANE_COUNT_SET 0x0101
#define DPCD_ADDR_TRAINING_PATTERN_SET 0x0102
#define DPCD_ADDR_TRAINING_LANE0_SET 0x0103
#define DPCD_ADDR_LANE0_1_STATUS 0x0202
#define DPCD_ADDR_LANE_ALIGN_STATUS_UPDATED 0x0204
#define DPCD_ADDR_ADJUST_REQUEST_LANE0_1 0x0206
#define DPCD_ADDR_ADJUST_REQUEST_LANE2_3 0x0207
#define DPCD_ADDR_TEST_REQUEST 0x0218
#define DPCD_ADDR_TEST_RESPONSE 0x0260
#define DPCD_ADDR_TEST_EDID_CHECKSUM 0x0261
#define DPCD_ADDR_SINK_POWER_STATE 0x0600
/* DPCD_ADDR_MAX_LANE_COUNT */
/* DP_MAX_LANE_COUNT */
#define DPCD_ENHANCED_FRAME_CAP(x) (((x) >> 7) & 0x1)
#define DPCD_MAX_LANE_COUNT(x) ((x) & 0x1f)
/* DPCD_ADDR_LANE_COUNT_SET */
#define DPCD_ENHANCED_FRAME_EN (0x1 << 7)
/* DP_LANE_COUNT_SET */
#define DPCD_LANE_COUNT_SET(x) ((x) & 0x1f)
/* DPCD_ADDR_TRAINING_PATTERN_SET */
#define DPCD_SCRAMBLING_DISABLED (0x1 << 5)
#define DPCD_SCRAMBLING_ENABLED (0x0 << 5)
#define DPCD_TRAINING_PATTERN_2 (0x2 << 0)
#define DPCD_TRAINING_PATTERN_1 (0x1 << 0)
#define DPCD_TRAINING_PATTERN_DISABLED (0x0 << 0)
/* DPCD_ADDR_TRAINING_LANE0_SET */
#define DPCD_MAX_PRE_EMPHASIS_REACHED (0x1 << 5)
/* DP_TRAINING_LANE0_SET */
#define DPCD_PRE_EMPHASIS_SET(x) (((x) & 0x3) << 3)
#define DPCD_PRE_EMPHASIS_GET(x) (((x) >> 3) & 0x3)
#define DPCD_PRE_EMPHASIS_PATTERN2_LEVEL0 (0x0 << 3)
#define DPCD_MAX_SWING_REACHED (0x1 << 2)
#define DPCD_VOLTAGE_SWING_SET(x) (((x) & 0x3) << 0)
#define DPCD_VOLTAGE_SWING_GET(x) (((x) >> 0) & 0x3)
#define DPCD_VOLTAGE_SWING_PATTERN1_LEVEL0 (0x0 << 0)
/* DPCD_ADDR_LANE0_1_STATUS */
#define DPCD_LANE_SYMBOL_LOCKED (0x1 << 2)
#define DPCD_LANE_CHANNEL_EQ_DONE (0x1 << 1)
#define DPCD_LANE_CR_DONE (0x1 << 0)
#define DPCD_CHANNEL_EQ_BITS (DPCD_LANE_CR_DONE| \
DPCD_LANE_CHANNEL_EQ_DONE|\
DPCD_LANE_SYMBOL_LOCKED)
/* DPCD_ADDR_LANE_ALIGN__STATUS_UPDATED */
#define DPCD_LINK_STATUS_UPDATED (0x1 << 7)
#define DPCD_DOWNSTREAM_PORT_STATUS_CHANGED (0x1 << 6)
#define DPCD_INTERLANE_ALIGN_DONE (0x1 << 0)
/* DPCD_ADDR_TEST_REQUEST */
#define DPCD_TEST_EDID_READ (0x1 << 2)
/* DPCD_ADDR_TEST_RESPONSE */
#define DPCD_TEST_EDID_CHECKSUM_WRITE (0x1 << 2)
/* DPCD_ADDR_SINK_POWER_STATE */
#define DPCD_SET_POWER_STATE_D0 (0x1 << 0)
#define DPCD_SET_POWER_STATE_D4 (0x2 << 0)
#endif /* _EXYNOS_DP_CORE_H */

View File

@ -13,6 +13,7 @@
#include <linux/device.h>
#include <linux/io.h>
#include <linux/delay.h>
#include <linux/gpio.h>
#include "exynos_dp_core.h"
#include "exynos_dp_reg.h"
@ -326,6 +327,9 @@ void exynos_dp_clear_hotplug_interrupts(struct exynos_dp_device *dp)
{
u32 reg;
if (gpio_is_valid(dp->hpd_gpio))
return;
reg = HOTPLUG_CHG | HPD_LOST | PLUG;
writel(reg, dp->reg_base + EXYNOS_DP_COMMON_INT_STA_4);
@ -337,6 +341,9 @@ void exynos_dp_init_hpd(struct exynos_dp_device *dp)
{
u32 reg;
if (gpio_is_valid(dp->hpd_gpio))
return;
exynos_dp_clear_hotplug_interrupts(dp);
reg = readl(dp->reg_base + EXYNOS_DP_SYS_CTL_3);
@ -348,19 +355,27 @@ enum dp_irq_type exynos_dp_get_irq_type(struct exynos_dp_device *dp)
{
u32 reg;
/* Parse hotplug interrupt status register */
reg = readl(dp->reg_base + EXYNOS_DP_COMMON_INT_STA_4);
if (gpio_is_valid(dp->hpd_gpio)) {
reg = gpio_get_value(dp->hpd_gpio);
if (reg)
return DP_IRQ_TYPE_HP_CABLE_IN;
else
return DP_IRQ_TYPE_HP_CABLE_OUT;
} else {
/* Parse hotplug interrupt status register */
reg = readl(dp->reg_base + EXYNOS_DP_COMMON_INT_STA_4);
if (reg & PLUG)
return DP_IRQ_TYPE_HP_CABLE_IN;
if (reg & PLUG)
return DP_IRQ_TYPE_HP_CABLE_IN;
if (reg & HPD_LOST)
return DP_IRQ_TYPE_HP_CABLE_OUT;
if (reg & HPD_LOST)
return DP_IRQ_TYPE_HP_CABLE_OUT;
if (reg & HOTPLUG_CHG)
return DP_IRQ_TYPE_HP_CHANGE;
if (reg & HOTPLUG_CHG)
return DP_IRQ_TYPE_HP_CHANGE;
return DP_IRQ_TYPE_UNKNOWN;
return DP_IRQ_TYPE_UNKNOWN;
}
}
void exynos_dp_reset_aux(struct exynos_dp_device *dp)
@ -386,7 +401,7 @@ void exynos_dp_init_aux(struct exynos_dp_device *dp)
/* Disable AUX transaction H/W retry */
reg = AUX_BIT_PERIOD_EXPECTED_DELAY(3) | AUX_HW_RETRY_COUNT_SEL(0)|
AUX_HW_RETRY_INTERVAL_600_MICROSECONDS;
writel(reg, dp->reg_base + EXYNOS_DP_AUX_HW_RETRY_CTL) ;
writel(reg, dp->reg_base + EXYNOS_DP_AUX_HW_RETRY_CTL);
/* Receive AUX Channel DEFER commands equal to DEFFER_COUNT*64 */
reg = DEFER_CTRL_EN | DEFER_COUNT(1);
@ -402,9 +417,14 @@ int exynos_dp_get_plug_in_status(struct exynos_dp_device *dp)
{
u32 reg;
reg = readl(dp->reg_base + EXYNOS_DP_SYS_CTL_3);
if (reg & HPD_STATUS)
return 0;
if (gpio_is_valid(dp->hpd_gpio)) {
if (gpio_get_value(dp->hpd_gpio))
return 0;
} else {
reg = readl(dp->reg_base + EXYNOS_DP_SYS_CTL_3);
if (reg & HPD_STATUS)
return 0;
}
return -EINVAL;
}

View File

@ -19,21 +19,19 @@
#include "exynos_drm_fbdev.h"
static LIST_HEAD(exynos_drm_subdrv_list);
static LIST_HEAD(exynos_drm_manager_list);
static LIST_HEAD(exynos_drm_display_list);
static int exynos_drm_create_enc_conn(struct drm_device *dev,
int exynos_drm_create_enc_conn(struct drm_device *dev,
struct exynos_drm_display *display)
{
struct drm_encoder *encoder;
struct exynos_drm_manager *manager;
int ret;
unsigned long possible_crtcs = 0;
/* Find possible crtcs for this display */
list_for_each_entry(manager, &exynos_drm_manager_list, list)
if (manager->type == display->type)
possible_crtcs |= 1 << manager->pipe;
ret = exynos_drm_crtc_get_pipe_from_type(dev, display->type);
if (ret < 0)
return ret;
possible_crtcs |= 1 << ret;
/* create and initialize a encoder for this sub driver. */
encoder = exynos_drm_encoder_create(dev, display, possible_crtcs);
@ -57,196 +55,6 @@ err_destroy_encoder:
return ret;
}
static int exynos_drm_subdrv_probe(struct drm_device *dev,
struct exynos_drm_subdrv *subdrv)
{
if (subdrv->probe) {
int ret;
subdrv->drm_dev = dev;
/*
* this probe callback would be called by sub driver
* after setting of all resources to this sub driver,
* such as clock, irq and register map are done or by load()
* of exynos drm driver.
*
* P.S. note that this driver is considered for modularization.
*/
ret = subdrv->probe(dev, subdrv->dev);
if (ret)
return ret;
}
return 0;
}
static void exynos_drm_subdrv_remove(struct drm_device *dev,
struct exynos_drm_subdrv *subdrv)
{
if (subdrv->remove)
subdrv->remove(dev, subdrv->dev);
}
int exynos_drm_initialize_managers(struct drm_device *dev)
{
struct exynos_drm_manager *manager, *n;
int ret, pipe = 0;
list_for_each_entry(manager, &exynos_drm_manager_list, list) {
if (manager->ops->initialize) {
ret = manager->ops->initialize(manager, dev, pipe);
if (ret) {
DRM_ERROR("Mgr init [%d] failed with %d\n",
manager->type, ret);
goto err;
}
}
manager->drm_dev = dev;
manager->pipe = pipe++;
ret = exynos_drm_crtc_create(manager);
if (ret) {
DRM_ERROR("CRTC create [%d] failed with %d\n",
manager->type, ret);
goto err;
}
}
return 0;
err:
list_for_each_entry_safe(manager, n, &exynos_drm_manager_list, list) {
if (pipe-- > 0)
exynos_drm_manager_unregister(manager);
else
list_del(&manager->list);
}
return ret;
}
void exynos_drm_remove_managers(struct drm_device *dev)
{
struct exynos_drm_manager *manager, *n;
list_for_each_entry_safe(manager, n, &exynos_drm_manager_list, list)
exynos_drm_manager_unregister(manager);
}
int exynos_drm_initialize_displays(struct drm_device *dev)
{
struct exynos_drm_display *display, *n;
int ret, initialized = 0;
list_for_each_entry(display, &exynos_drm_display_list, list) {
if (display->ops->initialize) {
ret = display->ops->initialize(display, dev);
if (ret) {
DRM_ERROR("Display init [%d] failed with %d\n",
display->type, ret);
goto err;
}
}
initialized++;
ret = exynos_drm_create_enc_conn(dev, display);
if (ret) {
DRM_ERROR("Encoder create [%d] failed with %d\n",
display->type, ret);
goto err;
}
}
return 0;
err:
list_for_each_entry_safe(display, n, &exynos_drm_display_list, list) {
if (initialized-- > 0)
exynos_drm_display_unregister(display);
else
list_del(&display->list);
}
return ret;
}
void exynos_drm_remove_displays(struct drm_device *dev)
{
struct exynos_drm_display *display, *n;
list_for_each_entry_safe(display, n, &exynos_drm_display_list, list)
exynos_drm_display_unregister(display);
}
int exynos_drm_device_register(struct drm_device *dev)
{
struct exynos_drm_subdrv *subdrv, *n;
int err;
if (!dev)
return -EINVAL;
list_for_each_entry_safe(subdrv, n, &exynos_drm_subdrv_list, list) {
err = exynos_drm_subdrv_probe(dev, subdrv);
if (err) {
DRM_DEBUG("exynos drm subdrv probe failed.\n");
list_del(&subdrv->list);
continue;
}
}
return 0;
}
EXPORT_SYMBOL_GPL(exynos_drm_device_register);
int exynos_drm_device_unregister(struct drm_device *dev)
{
struct exynos_drm_subdrv *subdrv;
if (!dev) {
WARN(1, "Unexpected drm device unregister!\n");
return -EINVAL;
}
list_for_each_entry(subdrv, &exynos_drm_subdrv_list, list) {
exynos_drm_subdrv_remove(dev, subdrv);
}
return 0;
}
EXPORT_SYMBOL_GPL(exynos_drm_device_unregister);
int exynos_drm_manager_register(struct exynos_drm_manager *manager)
{
BUG_ON(!manager->ops);
list_add_tail(&manager->list, &exynos_drm_manager_list);
return 0;
}
int exynos_drm_manager_unregister(struct exynos_drm_manager *manager)
{
if (manager->ops->remove)
manager->ops->remove(manager);
list_del(&manager->list);
return 0;
}
int exynos_drm_display_register(struct exynos_drm_display *display)
{
BUG_ON(!display->ops);
list_add_tail(&display->list, &exynos_drm_display_list);
return 0;
}
int exynos_drm_display_unregister(struct exynos_drm_display *display)
{
if (display->ops->remove)
display->ops->remove(display);
list_del(&display->list);
return 0;
}
int exynos_drm_subdrv_register(struct exynos_drm_subdrv *subdrv)
{
if (!subdrv)
@ -269,6 +77,54 @@ int exynos_drm_subdrv_unregister(struct exynos_drm_subdrv *subdrv)
}
EXPORT_SYMBOL_GPL(exynos_drm_subdrv_unregister);
int exynos_drm_device_subdrv_probe(struct drm_device *dev)
{
struct exynos_drm_subdrv *subdrv, *n;
int err;
if (!dev)
return -EINVAL;
list_for_each_entry_safe(subdrv, n, &exynos_drm_subdrv_list, list) {
if (subdrv->probe) {
subdrv->drm_dev = dev;
/*
* this probe callback would be called by sub driver
* after setting of all resources to this sub driver,
* such as clock, irq and register map are done.
*/
err = subdrv->probe(dev, subdrv->dev);
if (err) {
DRM_DEBUG("exynos drm subdrv probe failed.\n");
list_del(&subdrv->list);
continue;
}
}
}
return 0;
}
EXPORT_SYMBOL_GPL(exynos_drm_device_subdrv_probe);
int exynos_drm_device_subdrv_remove(struct drm_device *dev)
{
struct exynos_drm_subdrv *subdrv;
if (!dev) {
WARN(1, "Unexpected drm device unregister!\n");
return -EINVAL;
}
list_for_each_entry(subdrv, &exynos_drm_subdrv_list, list) {
if (subdrv->remove)
subdrv->remove(dev, subdrv->dev);
}
return 0;
}
EXPORT_SYMBOL_GPL(exynos_drm_device_subdrv_remove);
int exynos_drm_subdrv_open(struct drm_device *dev, struct drm_file *file)
{
struct exynos_drm_subdrv *subdrv;

View File

@ -368,6 +368,7 @@ int exynos_drm_crtc_create(struct exynos_drm_manager *manager)
return -ENOMEM;
}
manager->crtc = &exynos_crtc->drm_crtc;
crtc = &exynos_crtc->drm_crtc;
private->crtc[manager->pipe] = crtc;
@ -491,3 +492,19 @@ void exynos_drm_crtc_complete_scanout(struct drm_framebuffer *fb)
manager->ops->wait_for_vblank(manager);
}
}
int exynos_drm_crtc_get_pipe_from_type(struct drm_device *drm_dev,
unsigned int out_type)
{
struct drm_crtc *crtc;
list_for_each_entry(crtc, &drm_dev->mode_config.crtc_list, head) {
struct exynos_drm_crtc *exynos_crtc;
exynos_crtc = to_exynos_crtc(crtc);
if (exynos_crtc->manager->type == out_type)
return exynos_crtc->manager->pipe;
}
return -EPERM;
}

View File

@ -32,4 +32,8 @@ void exynos_drm_crtc_plane_commit(struct drm_crtc *crtc, int zpos);
void exynos_drm_crtc_plane_enable(struct drm_crtc *crtc, int zpos);
void exynos_drm_crtc_plane_disable(struct drm_crtc *crtc, int zpos);
/* This function gets pipe value to crtc device matched with out_type. */
int exynos_drm_crtc_get_pipe_from_type(struct drm_device *drm_dev,
unsigned int out_type);
#endif

View File

@ -40,20 +40,10 @@ exynos_dpi_detect(struct drm_connector *connector, bool force)
{
struct exynos_dpi *ctx = connector_to_dpi(connector);
/* panels supported only by boot-loader are always connected */
if (!ctx->panel_node)
return connector_status_connected;
if (!ctx->panel->connector)
drm_panel_attach(ctx->panel, &ctx->connector);
if (!ctx->panel) {
ctx->panel = of_drm_find_panel(ctx->panel_node);
if (ctx->panel)
drm_panel_attach(ctx->panel, &ctx->connector);
}
if (ctx->panel)
return connector_status_connected;
return connector_status_disconnected;
return connector_status_connected;
}
static void exynos_dpi_connector_destroy(struct drm_connector *connector)
@ -94,12 +84,6 @@ static int exynos_dpi_get_modes(struct drm_connector *connector)
return 0;
}
static int exynos_dpi_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
return MODE_OK;
}
static struct drm_encoder *
exynos_dpi_best_encoder(struct drm_connector *connector)
{
@ -110,7 +94,6 @@ exynos_dpi_best_encoder(struct drm_connector *connector)
static struct drm_connector_helper_funcs exynos_dpi_connector_helper_funcs = {
.get_modes = exynos_dpi_get_modes,
.mode_valid = exynos_dpi_mode_valid,
.best_encoder = exynos_dpi_best_encoder,
};
@ -123,10 +106,7 @@ static int exynos_dpi_create_connector(struct exynos_drm_display *display,
ctx->encoder = encoder;
if (ctx->panel_node)
connector->polled = DRM_CONNECTOR_POLL_CONNECT;
else
connector->polled = DRM_CONNECTOR_POLL_HPD;
connector->polled = DRM_CONNECTOR_POLL_HPD;
ret = drm_connector_init(encoder->dev, connector,
&exynos_dpi_connector_funcs,
@ -172,7 +152,7 @@ static void exynos_dpi_dpms(struct exynos_drm_display *display, int mode)
break;
default:
break;
};
}
ctx->dpms_mode = mode;
}
@ -294,8 +274,10 @@ static int exynos_dpi_parse_dt(struct exynos_dpi *ctx)
return -ENOMEM;
ret = of_get_videomode(dn, vm, 0);
if (ret < 0)
if (ret < 0) {
devm_kfree(dev, vm);
return ret;
}
ctx->vm = vm;
@ -308,32 +290,58 @@ static int exynos_dpi_parse_dt(struct exynos_dpi *ctx)
return 0;
}
int exynos_dpi_probe(struct device *dev)
struct exynos_drm_display *exynos_dpi_probe(struct device *dev)
{
struct exynos_dpi *ctx;
int ret;
ret = exynos_drm_component_add(dev,
EXYNOS_DEVICE_TYPE_CONNECTOR,
exynos_dpi_display.type);
if (ret)
return ERR_PTR(ret);
ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
goto err_del_component;
ctx->dev = dev;
exynos_dpi_display.ctx = ctx;
ctx->dpms_mode = DRM_MODE_DPMS_OFF;
ret = exynos_dpi_parse_dt(ctx);
if (ret < 0)
return ret;
if (ret < 0) {
devm_kfree(dev, ctx);
goto err_del_component;
}
exynos_drm_display_register(&exynos_dpi_display);
if (ctx->panel_node) {
ctx->panel = of_drm_find_panel(ctx->panel_node);
if (!ctx->panel) {
exynos_drm_component_del(dev,
EXYNOS_DEVICE_TYPE_CONNECTOR);
return ERR_PTR(-EPROBE_DEFER);
}
}
return 0;
return &exynos_dpi_display;
err_del_component:
exynos_drm_component_del(dev, EXYNOS_DEVICE_TYPE_CONNECTOR);
return NULL;
}
int exynos_dpi_remove(struct device *dev)
{
struct drm_encoder *encoder = exynos_dpi_display.encoder;
struct exynos_dpi *ctx = exynos_dpi_display.ctx;
exynos_dpi_dpms(&exynos_dpi_display, DRM_MODE_DPMS_OFF);
exynos_drm_display_unregister(&exynos_dpi_display);
encoder->funcs->destroy(encoder);
drm_connector_cleanup(&ctx->connector);
exynos_drm_component_del(dev, EXYNOS_DEVICE_TYPE_CONNECTOR);
return 0;
}

View File

@ -16,6 +16,7 @@
#include <drm/drm_crtc_helper.h>
#include <linux/anon_inodes.h>
#include <linux/component.h>
#include <drm/exynos_drm.h>
@ -40,9 +41,19 @@
#define VBLANK_OFF_DELAY 50000
/* platform device pointer for eynos drm device. */
static struct platform_device *exynos_drm_pdev;
static DEFINE_MUTEX(drm_component_lock);
static LIST_HEAD(drm_component_list);
struct component_dev {
struct list_head list;
struct device *crtc_dev;
struct device *conn_dev;
enum exynos_drm_output_type out_type;
unsigned int dev_type_flag;
};
static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
{
struct exynos_drm_private *private;
@ -73,38 +84,21 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
exynos_drm_mode_config_init(dev);
ret = exynos_drm_initialize_managers(dev);
if (ret)
goto err_mode_config_cleanup;
for (nr = 0; nr < MAX_PLANE; nr++) {
struct drm_plane *plane;
unsigned long possible_crtcs = (1 << MAX_CRTC) - 1;
plane = exynos_plane_init(dev, possible_crtcs, false);
if (!plane)
goto err_manager_cleanup;
goto err_mode_config_cleanup;
}
ret = exynos_drm_initialize_displays(dev);
if (ret)
goto err_manager_cleanup;
/* init kms poll for handling hpd */
drm_kms_helper_poll_init(dev);
ret = drm_vblank_init(dev, MAX_CRTC);
if (ret)
goto err_display_cleanup;
/*
* probe sub drivers such as display controller and hdmi driver,
* that were registered at probe() of platform driver
* to the sub driver and create encoder and connector for them.
*/
ret = exynos_drm_device_register(dev);
if (ret)
goto err_vblank;
goto err_mode_config_cleanup;
/* setup possible_clones. */
exynos_drm_encoder_setup(dev);
@ -113,17 +107,25 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
platform_set_drvdata(dev->platformdev, dev);
/* Try to bind all sub drivers. */
ret = component_bind_all(dev->dev, dev);
if (ret)
goto err_cleanup_vblank;
/* Probe non kms sub drivers and virtual display driver. */
ret = exynos_drm_device_subdrv_probe(dev);
if (ret)
goto err_unbind_all;
/* force connectors detection */
drm_helper_hpd_irq_event(dev);
return 0;
err_vblank:
err_unbind_all:
component_unbind_all(dev->dev, dev);
err_cleanup_vblank:
drm_vblank_cleanup(dev);
err_display_cleanup:
exynos_drm_remove_displays(dev);
err_manager_cleanup:
exynos_drm_remove_managers(dev);
err_mode_config_cleanup:
drm_mode_config_cleanup(dev);
drm_release_iommu_mapping(dev);
@ -135,17 +137,17 @@ err_free_private:
static int exynos_drm_unload(struct drm_device *dev)
{
exynos_drm_device_subdrv_remove(dev);
exynos_drm_fbdev_fini(dev);
exynos_drm_device_unregister(dev);
drm_vblank_cleanup(dev);
drm_kms_helper_poll_fini(dev);
exynos_drm_remove_displays(dev);
exynos_drm_remove_managers(dev);
drm_mode_config_cleanup(dev);
drm_release_iommu_mapping(dev);
kfree(dev->dev_private);
component_unbind_all(dev->dev, dev);
dev->dev_private = NULL;
return 0;
@ -183,9 +185,9 @@ static int exynos_drm_resume(struct drm_device *dev)
if (connector->funcs->dpms)
connector->funcs->dpms(connector, connector->dpms);
}
drm_modeset_unlock_all(dev);
drm_helper_resume_force_mode(dev);
drm_modeset_unlock_all(dev);
return 0;
}
@ -323,8 +325,7 @@ static const struct file_operations exynos_drm_driver_fops = {
};
static struct drm_driver exynos_drm_driver = {
.driver_features = DRIVER_HAVE_IRQ | DRIVER_MODESET |
DRIVER_GEM | DRIVER_PRIME,
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME,
.load = exynos_drm_load,
.unload = exynos_drm_unload,
.suspend = exynos_drm_suspend,
@ -355,27 +356,6 @@ static struct drm_driver exynos_drm_driver = {
.minor = DRIVER_MINOR,
};
static int exynos_drm_platform_probe(struct platform_device *pdev)
{
int ret;
ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
if (ret)
return ret;
pm_runtime_enable(&pdev->dev);
pm_runtime_get_sync(&pdev->dev);
return drm_platform_init(&exynos_drm_driver, pdev);
}
static int exynos_drm_platform_remove(struct platform_device *pdev)
{
drm_put_dev(platform_get_drvdata(pdev));
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int exynos_drm_sys_suspend(struct device *dev)
{
@ -400,237 +380,409 @@ static int exynos_drm_sys_resume(struct device *dev)
}
#endif
#ifdef CONFIG_PM_RUNTIME
static int exynos_drm_runtime_suspend(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
pm_message_t message;
if (pm_runtime_suspended(dev))
return 0;
message.event = PM_EVENT_SUSPEND;
return exynos_drm_suspend(drm_dev, message);
}
static int exynos_drm_runtime_resume(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
if (!pm_runtime_suspended(dev))
return 0;
return exynos_drm_resume(drm_dev);
}
#endif
static const struct dev_pm_ops exynos_drm_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(exynos_drm_sys_suspend, exynos_drm_sys_resume)
SET_RUNTIME_PM_OPS(exynos_drm_runtime_suspend,
exynos_drm_runtime_resume, NULL)
};
int exynos_drm_component_add(struct device *dev,
enum exynos_drm_device_type dev_type,
enum exynos_drm_output_type out_type)
{
struct component_dev *cdev;
if (dev_type != EXYNOS_DEVICE_TYPE_CRTC &&
dev_type != EXYNOS_DEVICE_TYPE_CONNECTOR) {
DRM_ERROR("invalid device type.\n");
return -EINVAL;
}
mutex_lock(&drm_component_lock);
/*
* Make sure to check if there is a component which has two device
* objects, for connector and for encoder/connector.
* It should make sure that crtc and encoder/connector drivers are
* ready before exynos drm core binds them.
*/
list_for_each_entry(cdev, &drm_component_list, list) {
if (cdev->out_type == out_type) {
/*
* If crtc and encoder/connector device objects are
* added already just return.
*/
if (cdev->dev_type_flag == (EXYNOS_DEVICE_TYPE_CRTC |
EXYNOS_DEVICE_TYPE_CONNECTOR)) {
mutex_unlock(&drm_component_lock);
return 0;
}
if (dev_type == EXYNOS_DEVICE_TYPE_CRTC) {
cdev->crtc_dev = dev;
cdev->dev_type_flag |= dev_type;
}
if (dev_type == EXYNOS_DEVICE_TYPE_CONNECTOR) {
cdev->conn_dev = dev;
cdev->dev_type_flag |= dev_type;
}
mutex_unlock(&drm_component_lock);
return 0;
}
}
mutex_unlock(&drm_component_lock);
cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
if (!cdev)
return -ENOMEM;
if (dev_type == EXYNOS_DEVICE_TYPE_CRTC)
cdev->crtc_dev = dev;
if (dev_type == EXYNOS_DEVICE_TYPE_CONNECTOR)
cdev->conn_dev = dev;
cdev->out_type = out_type;
cdev->dev_type_flag = dev_type;
mutex_lock(&drm_component_lock);
list_add_tail(&cdev->list, &drm_component_list);
mutex_unlock(&drm_component_lock);
return 0;
}
void exynos_drm_component_del(struct device *dev,
enum exynos_drm_device_type dev_type)
{
struct component_dev *cdev, *next;
mutex_lock(&drm_component_lock);
list_for_each_entry_safe(cdev, next, &drm_component_list, list) {
if (dev_type == EXYNOS_DEVICE_TYPE_CRTC) {
if (cdev->crtc_dev == dev) {
cdev->crtc_dev = NULL;
cdev->dev_type_flag &= ~dev_type;
}
}
if (dev_type == EXYNOS_DEVICE_TYPE_CONNECTOR) {
if (cdev->conn_dev == dev) {
cdev->conn_dev = NULL;
cdev->dev_type_flag &= ~dev_type;
}
}
/*
* Release cdev object only in case that both of crtc and
* encoder/connector device objects are NULL.
*/
if (!cdev->crtc_dev && !cdev->conn_dev) {
list_del(&cdev->list);
kfree(cdev);
}
break;
}
mutex_unlock(&drm_component_lock);
}
static int compare_of(struct device *dev, void *data)
{
return dev == (struct device *)data;
}
static int exynos_drm_add_components(struct device *dev, struct master *m)
{
struct component_dev *cdev;
unsigned int attach_cnt = 0;
mutex_lock(&drm_component_lock);
list_for_each_entry(cdev, &drm_component_list, list) {
int ret;
/*
* Add components to master only in case that crtc and
* encoder/connector device objects exist.
*/
if (!cdev->crtc_dev || !cdev->conn_dev)
continue;
attach_cnt++;
mutex_unlock(&drm_component_lock);
/*
* fimd and dpi modules have same device object so add
* only crtc device object in this case.
*
* TODO. if dpi module follows driver-model driver then
* below codes can be removed.
*/
if (cdev->crtc_dev == cdev->conn_dev) {
ret = component_master_add_child(m, compare_of,
cdev->crtc_dev);
if (ret < 0)
return ret;
goto out_lock;
}
/*
* Do not chage below call order.
* crtc device first should be added to master because
* connector/encoder need pipe number of crtc when they
* are created.
*/
ret = component_master_add_child(m, compare_of, cdev->crtc_dev);
ret |= component_master_add_child(m, compare_of,
cdev->conn_dev);
if (ret < 0)
return ret;
out_lock:
mutex_lock(&drm_component_lock);
}
mutex_unlock(&drm_component_lock);
return attach_cnt ? 0 : -ENODEV;
}
static int exynos_drm_bind(struct device *dev)
{
return drm_platform_init(&exynos_drm_driver, to_platform_device(dev));
}
static void exynos_drm_unbind(struct device *dev)
{
drm_put_dev(dev_get_drvdata(dev));
}
static const struct component_master_ops exynos_drm_ops = {
.add_components = exynos_drm_add_components,
.bind = exynos_drm_bind,
.unbind = exynos_drm_unbind,
};
static int exynos_drm_platform_probe(struct platform_device *pdev)
{
int ret;
pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
exynos_drm_driver.num_ioctls = ARRAY_SIZE(exynos_ioctls);
#ifdef CONFIG_DRM_EXYNOS_FIMD
ret = platform_driver_register(&fimd_driver);
if (ret < 0)
return ret;
#endif
#ifdef CONFIG_DRM_EXYNOS_DP
ret = platform_driver_register(&dp_driver);
if (ret < 0)
goto err_unregister_fimd_drv;
#endif
#ifdef CONFIG_DRM_EXYNOS_DSI
ret = platform_driver_register(&dsi_driver);
if (ret < 0)
goto err_unregister_dp_drv;
#endif
#ifdef CONFIG_DRM_EXYNOS_HDMI
ret = platform_driver_register(&mixer_driver);
if (ret < 0)
goto err_unregister_dsi_drv;
ret = platform_driver_register(&hdmi_driver);
if (ret < 0)
goto err_unregister_mixer_drv;
#endif
#ifdef CONFIG_DRM_EXYNOS_G2D
ret = platform_driver_register(&g2d_driver);
if (ret < 0)
goto err_unregister_hdmi_drv;
#endif
#ifdef CONFIG_DRM_EXYNOS_FIMC
ret = platform_driver_register(&fimc_driver);
if (ret < 0)
goto err_unregister_g2d_drv;
#endif
#ifdef CONFIG_DRM_EXYNOS_ROTATOR
ret = platform_driver_register(&rotator_driver);
if (ret < 0)
goto err_unregister_fimc_drv;
#endif
#ifdef CONFIG_DRM_EXYNOS_GSC
ret = platform_driver_register(&gsc_driver);
if (ret < 0)
goto err_unregister_rotator_drv;
#endif
#ifdef CONFIG_DRM_EXYNOS_IPP
ret = platform_driver_register(&ipp_driver);
if (ret < 0)
goto err_unregister_gsc_drv;
ret = exynos_platform_device_ipp_register();
if (ret < 0)
goto err_unregister_ipp_drv;
#endif
ret = component_master_add(&pdev->dev, &exynos_drm_ops);
if (ret < 0)
DRM_DEBUG_KMS("re-tried by last sub driver probed later.\n");
return 0;
#ifdef CONFIG_DRM_EXYNOS_IPP
err_unregister_ipp_drv:
platform_driver_unregister(&ipp_driver);
err_unregister_gsc_drv:
#endif
#ifdef CONFIG_DRM_EXYNOS_GSC
platform_driver_unregister(&gsc_driver);
err_unregister_rotator_drv:
#endif
#ifdef CONFIG_DRM_EXYNOS_ROTATOR
platform_driver_unregister(&rotator_driver);
err_unregister_fimc_drv:
#endif
#ifdef CONFIG_DRM_EXYNOS_FIMC
platform_driver_unregister(&fimc_driver);
err_unregister_g2d_drv:
#endif
#ifdef CONFIG_DRM_EXYNOS_G2D
platform_driver_unregister(&g2d_driver);
err_unregister_hdmi_drv:
#endif
#ifdef CONFIG_DRM_EXYNOS_HDMI
platform_driver_unregister(&hdmi_driver);
err_unregister_mixer_drv:
platform_driver_unregister(&mixer_driver);
err_unregister_dsi_drv:
#endif
#ifdef CONFIG_DRM_EXYNOS_DSI
platform_driver_unregister(&dsi_driver);
err_unregister_dp_drv:
#endif
#ifdef CONFIG_DRM_EXYNOS_DP
platform_driver_unregister(&dp_driver);
err_unregister_fimd_drv:
#endif
#ifdef CONFIG_DRM_EXYNOS_FIMD
platform_driver_unregister(&fimd_driver);
#endif
return ret;
}
static int exynos_drm_platform_remove(struct platform_device *pdev)
{
#ifdef CONFIG_DRM_EXYNOS_IPP
exynos_platform_device_ipp_unregister();
platform_driver_unregister(&ipp_driver);
#endif
#ifdef CONFIG_DRM_EXYNOS_GSC
platform_driver_unregister(&gsc_driver);
#endif
#ifdef CONFIG_DRM_EXYNOS_ROTATOR
platform_driver_unregister(&rotator_driver);
#endif
#ifdef CONFIG_DRM_EXYNOS_FIMC
platform_driver_unregister(&fimc_driver);
#endif
#ifdef CONFIG_DRM_EXYNOS_G2D
platform_driver_unregister(&g2d_driver);
#endif
#ifdef CONFIG_DRM_EXYNOS_HDMI
platform_driver_unregister(&mixer_driver);
platform_driver_unregister(&hdmi_driver);
#endif
#ifdef CONFIG_DRM_EXYNOS_FIMD
platform_driver_unregister(&fimd_driver);
#endif
#ifdef CONFIG_DRM_EXYNOS_DSI
platform_driver_unregister(&dsi_driver);
#endif
#ifdef CONFIG_DRM_EXYNOS_DP
platform_driver_unregister(&dp_driver);
#endif
component_master_del(&pdev->dev, &exynos_drm_ops);
return 0;
}
static struct platform_driver exynos_drm_platform_driver = {
.probe = exynos_drm_platform_probe,
.remove = exynos_drm_platform_remove,
.driver = {
.probe = exynos_drm_platform_probe,
.remove = exynos_drm_platform_remove,
.driver = {
.owner = THIS_MODULE,
.name = "exynos-drm",
.pm = &exynos_drm_pm_ops,
},
};
static int __init exynos_drm_init(void)
static int exynos_drm_init(void)
{
int ret;
#ifdef CONFIG_DRM_EXYNOS_DP
ret = platform_driver_register(&dp_driver);
if (ret < 0)
goto out_dp;
#endif
#ifdef CONFIG_DRM_EXYNOS_DSI
ret = platform_driver_register(&dsi_driver);
if (ret < 0)
goto out_dsi;
#endif
#ifdef CONFIG_DRM_EXYNOS_FIMD
ret = platform_driver_register(&fimd_driver);
if (ret < 0)
goto out_fimd;
#endif
#ifdef CONFIG_DRM_EXYNOS_HDMI
ret = platform_driver_register(&hdmi_driver);
if (ret < 0)
goto out_hdmi;
ret = platform_driver_register(&mixer_driver);
if (ret < 0)
goto out_mixer;
#endif
exynos_drm_pdev = platform_device_register_simple("exynos-drm", -1,
NULL, 0);
if (IS_ERR(exynos_drm_pdev))
return PTR_ERR(exynos_drm_pdev);
#ifdef CONFIG_DRM_EXYNOS_VIDI
ret = platform_driver_register(&vidi_driver);
ret = exynos_drm_probe_vidi();
if (ret < 0)
goto out_vidi;
#endif
#ifdef CONFIG_DRM_EXYNOS_G2D
ret = platform_driver_register(&g2d_driver);
if (ret < 0)
goto out_g2d;
#endif
#ifdef CONFIG_DRM_EXYNOS_FIMC
ret = platform_driver_register(&fimc_driver);
if (ret < 0)
goto out_fimc;
#endif
#ifdef CONFIG_DRM_EXYNOS_ROTATOR
ret = platform_driver_register(&rotator_driver);
if (ret < 0)
goto out_rotator;
#endif
#ifdef CONFIG_DRM_EXYNOS_GSC
ret = platform_driver_register(&gsc_driver);
if (ret < 0)
goto out_gsc;
#endif
#ifdef CONFIG_DRM_EXYNOS_IPP
ret = platform_driver_register(&ipp_driver);
if (ret < 0)
goto out_ipp;
ret = exynos_platform_device_ipp_register();
if (ret < 0)
goto out_ipp_dev;
goto err_unregister_pd;
#endif
ret = platform_driver_register(&exynos_drm_platform_driver);
if (ret < 0)
goto out_drm;
exynos_drm_pdev = platform_device_register_simple("exynos-drm", -1,
NULL, 0);
if (IS_ERR(exynos_drm_pdev)) {
ret = PTR_ERR(exynos_drm_pdev);
goto out;
}
if (ret)
goto err_remove_vidi;
return 0;
out:
platform_driver_unregister(&exynos_drm_platform_driver);
out_drm:
#ifdef CONFIG_DRM_EXYNOS_IPP
exynos_platform_device_ipp_unregister();
out_ipp_dev:
platform_driver_unregister(&ipp_driver);
out_ipp:
#endif
#ifdef CONFIG_DRM_EXYNOS_GSC
platform_driver_unregister(&gsc_driver);
out_gsc:
#endif
#ifdef CONFIG_DRM_EXYNOS_ROTATOR
platform_driver_unregister(&rotator_driver);
out_rotator:
#endif
#ifdef CONFIG_DRM_EXYNOS_FIMC
platform_driver_unregister(&fimc_driver);
out_fimc:
#endif
#ifdef CONFIG_DRM_EXYNOS_G2D
platform_driver_unregister(&g2d_driver);
out_g2d:
#endif
err_unregister_pd:
platform_device_unregister(exynos_drm_pdev);
err_remove_vidi:
#ifdef CONFIG_DRM_EXYNOS_VIDI
platform_driver_unregister(&vidi_driver);
out_vidi:
exynos_drm_remove_vidi();
#endif
#ifdef CONFIG_DRM_EXYNOS_HDMI
platform_driver_unregister(&mixer_driver);
out_mixer:
platform_driver_unregister(&hdmi_driver);
out_hdmi:
#endif
#ifdef CONFIG_DRM_EXYNOS_FIMD
platform_driver_unregister(&fimd_driver);
out_fimd:
#endif
#ifdef CONFIG_DRM_EXYNOS_DSI
platform_driver_unregister(&dsi_driver);
out_dsi:
#endif
#ifdef CONFIG_DRM_EXYNOS_DP
platform_driver_unregister(&dp_driver);
out_dp:
#endif
return ret;
}
static void __exit exynos_drm_exit(void)
static void exynos_drm_exit(void)
{
platform_device_unregister(exynos_drm_pdev);
platform_driver_unregister(&exynos_drm_platform_driver);
#ifdef CONFIG_DRM_EXYNOS_IPP
exynos_platform_device_ipp_unregister();
platform_driver_unregister(&ipp_driver);
#endif
#ifdef CONFIG_DRM_EXYNOS_GSC
platform_driver_unregister(&gsc_driver);
#endif
#ifdef CONFIG_DRM_EXYNOS_ROTATOR
platform_driver_unregister(&rotator_driver);
#endif
#ifdef CONFIG_DRM_EXYNOS_FIMC
platform_driver_unregister(&fimc_driver);
#endif
#ifdef CONFIG_DRM_EXYNOS_G2D
platform_driver_unregister(&g2d_driver);
#endif
#ifdef CONFIG_DRM_EXYNOS_HDMI
platform_driver_unregister(&mixer_driver);
platform_driver_unregister(&hdmi_driver);
#endif
#ifdef CONFIG_DRM_EXYNOS_VIDI
platform_driver_unregister(&vidi_driver);
#endif
#ifdef CONFIG_DRM_EXYNOS_FIMD
platform_driver_unregister(&fimd_driver);
#endif
#ifdef CONFIG_DRM_EXYNOS_DSI
platform_driver_unregister(&dsi_driver);
#endif
#ifdef CONFIG_DRM_EXYNOS_DP
platform_driver_unregister(&dp_driver);
exynos_drm_remove_vidi();
#endif
platform_device_unregister(exynos_drm_pdev);
platform_driver_unregister(&exynos_drm_platform_driver);
}
module_init(exynos_drm_init);

View File

@ -42,6 +42,13 @@ struct drm_connector;
extern unsigned int drm_vblank_offdelay;
/* This enumerates device type. */
enum exynos_drm_device_type {
EXYNOS_DEVICE_TYPE_NONE,
EXYNOS_DEVICE_TYPE_CRTC,
EXYNOS_DEVICE_TYPE_CONNECTOR,
};
/* this enumerates display type. */
enum exynos_drm_output_type {
EXYNOS_DISPLAY_TYPE_NONE,
@ -122,7 +129,6 @@ struct exynos_drm_overlay {
* Exynos DRM Display Structure.
* - this structure is common to analog tv, digital tv and lcd panel.
*
* @initialize: initializes the display with drm_dev
* @remove: cleans up the display for removal
* @mode_fixup: fix mode data comparing to hw specific display mode.
* @mode_set: convert drm_display_mode to hw specific display mode and
@ -133,8 +139,6 @@ struct exynos_drm_overlay {
*/
struct exynos_drm_display;
struct exynos_drm_display_ops {
int (*initialize)(struct exynos_drm_display *display,
struct drm_device *drm_dev);
int (*create_connector)(struct exynos_drm_display *display,
struct drm_encoder *encoder);
void (*remove)(struct exynos_drm_display *display);
@ -172,8 +176,6 @@ struct exynos_drm_display {
/*
* Exynos drm manager ops
*
* @initialize: initializes the manager with drm_dev
* @remove: cleans up the manager for removal
* @dpms: control device power.
* @mode_fixup: fix mode data before applying it
* @mode_set: set the given mode to the manager
@ -189,9 +191,6 @@ struct exynos_drm_display {
*/
struct exynos_drm_manager;
struct exynos_drm_manager_ops {
int (*initialize)(struct exynos_drm_manager *mgr,
struct drm_device *drm_dev, int pipe);
void (*remove)(struct exynos_drm_manager *mgr);
void (*dpms)(struct exynos_drm_manager *mgr, int mode);
bool (*mode_fixup)(struct exynos_drm_manager *mgr,
const struct drm_display_mode *mode,
@ -215,6 +214,7 @@ struct exynos_drm_manager_ops {
* @list: the list entry for this manager
* @type: one of EXYNOS_DISPLAY_TYPE_LCD and HDMI.
* @drm_dev: pointer to the drm device
* @crtc: crtc object.
* @pipe: the pipe number for this crtc/manager
* @ops: pointer to callbacks for exynos drm specific functionality
* @ctx: A pointer to the manager's implementation specific context
@ -223,6 +223,7 @@ struct exynos_drm_manager {
struct list_head list;
enum exynos_drm_output_type type;
struct drm_device *drm_dev;
struct drm_crtc *crtc;
int pipe;
struct exynos_drm_manager_ops *ops;
void *ctx;
@ -254,6 +255,7 @@ struct drm_exynos_file_private {
* otherwise default one.
* @da_space_size: size of device address space.
* if 0 then default value is used for it.
* @pipe: the pipe number for this crtc/manager.
*/
struct exynos_drm_private {
struct drm_fb_helper *fb_helper;
@ -271,6 +273,8 @@ struct exynos_drm_private {
unsigned long da_start;
unsigned long da_space_size;
unsigned int pipe;
};
/*
@ -281,11 +285,11 @@ struct exynos_drm_private {
* @drm_dev: pointer to drm_device and this pointer would be set
* when sub driver calls exynos_drm_subdrv_register().
* @manager: subdrv has its own manager to control a hardware appropriately
* and we can access a hardware drawing on this manager.
* and we can access a hardware drawing on this manager.
* @probe: this callback would be called by exynos drm driver after
* subdrv is registered to it.
* subdrv is registered to it.
* @remove: this callback is used to release resources created
* by probe callback.
* by probe callback.
* @open: this would be called with drm device file open.
* @close: this would be called with drm device file close.
*/
@ -302,39 +306,14 @@ struct exynos_drm_subdrv {
struct drm_file *file);
};
/*
* this function calls a probe callback registered to sub driver list and
* create its own encoder and connector and then set drm_device object
* to global one.
*/
int exynos_drm_device_register(struct drm_device *dev);
/*
* this function calls a remove callback registered to sub driver list and
* destroy its own encoder and connetor.
*/
int exynos_drm_device_unregister(struct drm_device *dev);
int exynos_drm_initialize_managers(struct drm_device *dev);
void exynos_drm_remove_managers(struct drm_device *dev);
int exynos_drm_initialize_displays(struct drm_device *dev);
void exynos_drm_remove_displays(struct drm_device *dev);
int exynos_drm_manager_register(struct exynos_drm_manager *manager);
int exynos_drm_manager_unregister(struct exynos_drm_manager *manager);
int exynos_drm_display_register(struct exynos_drm_display *display);
int exynos_drm_display_unregister(struct exynos_drm_display *display);
/*
* this function would be called by sub drivers such as display controller
* or hdmi driver to register this sub driver object to exynos drm driver
* and when a sub driver is registered to exynos drm driver a probe callback
* of the sub driver is called and creates its own encoder and connector.
*/
/* This function would be called by non kms drivers such as g2d and ipp. */
int exynos_drm_subdrv_register(struct exynos_drm_subdrv *drm_subdrv);
/* this function removes subdrv list from exynos drm driver */
int exynos_drm_subdrv_unregister(struct exynos_drm_subdrv *drm_subdrv);
int exynos_drm_device_subdrv_probe(struct drm_device *dev);
int exynos_drm_device_subdrv_remove(struct drm_device *dev);
int exynos_drm_subdrv_open(struct drm_device *dev, struct drm_file *file);
void exynos_drm_subdrv_close(struct drm_device *dev, struct drm_file *file);
@ -360,18 +339,40 @@ int exynos_platform_device_ipp_register(void);
void exynos_platform_device_ipp_unregister(void);
#ifdef CONFIG_DRM_EXYNOS_DPI
int exynos_dpi_probe(struct device *dev);
struct exynos_drm_display * exynos_dpi_probe(struct device *dev);
int exynos_dpi_remove(struct device *dev);
#else
static inline int exynos_dpi_probe(struct device *dev) { return 0; }
static inline struct exynos_drm_display *
exynos_dpi_probe(struct device *dev) { return 0; }
static inline int exynos_dpi_remove(struct device *dev) { return 0; }
#endif
/*
* this function registers exynos drm vidi platform device/driver.
*/
int exynos_drm_probe_vidi(void);
/*
* this function unregister exynos drm vidi platform device/driver.
*/
void exynos_drm_remove_vidi(void);
/* This function creates a encoder and a connector, and initializes them. */
int exynos_drm_create_enc_conn(struct drm_device *dev,
struct exynos_drm_display *display);
int exynos_drm_component_add(struct device *dev,
enum exynos_drm_device_type dev_type,
enum exynos_drm_output_type out_type);
void exynos_drm_component_del(struct device *dev,
enum exynos_drm_device_type dev_type);
extern struct platform_driver fimd_driver;
extern struct platform_driver dp_driver;
extern struct platform_driver dsi_driver;
extern struct platform_driver fimd_driver;
extern struct platform_driver hdmi_driver;
extern struct platform_driver mixer_driver;
extern struct platform_driver hdmi_driver;
extern struct platform_driver exynos_drm_common_hdmi_driver;
extern struct platform_driver vidi_driver;
extern struct platform_driver g2d_driver;

View File

@ -19,6 +19,7 @@
#include <linux/irq.h>
#include <linux/phy/phy.h>
#include <linux/regulator/consumer.h>
#include <linux/component.h>
#include <video/mipi_display.h>
#include <video/videomode.h>
@ -1378,16 +1379,60 @@ end:
return ret;
}
static int exynos_dsi_bind(struct device *dev, struct device *master,
void *data)
{
struct drm_device *drm_dev = data;
struct exynos_dsi *dsi;
int ret;
ret = exynos_drm_create_enc_conn(drm_dev, &exynos_dsi_display);
if (ret) {
DRM_ERROR("Encoder create [%d] failed with %d\n",
exynos_dsi_display.type, ret);
return ret;
}
dsi = exynos_dsi_display.ctx;
return mipi_dsi_host_register(&dsi->dsi_host);
}
static void exynos_dsi_unbind(struct device *dev, struct device *master,
void *data)
{
struct exynos_dsi *dsi = exynos_dsi_display.ctx;
struct drm_encoder *encoder = dsi->encoder;
exynos_dsi_dpms(&exynos_dsi_display, DRM_MODE_DPMS_OFF);
mipi_dsi_host_unregister(&dsi->dsi_host);
encoder->funcs->destroy(encoder);
drm_connector_cleanup(&dsi->connector);
}
static const struct component_ops exynos_dsi_component_ops = {
.bind = exynos_dsi_bind,
.unbind = exynos_dsi_unbind,
};
static int exynos_dsi_probe(struct platform_device *pdev)
{
struct resource *res;
struct exynos_dsi *dsi;
int ret;
ret = exynos_drm_component_add(&pdev->dev, EXYNOS_DEVICE_TYPE_CONNECTOR,
exynos_dsi_display.type);
if (ret)
return ret;
dsi = devm_kzalloc(&pdev->dev, sizeof(*dsi), GFP_KERNEL);
if (!dsi) {
dev_err(&pdev->dev, "failed to allocate dsi object.\n");
return -ENOMEM;
ret = -ENOMEM;
goto err_del_component;
}
init_completion(&dsi->completed);
@ -1401,7 +1446,7 @@ static int exynos_dsi_probe(struct platform_device *pdev)
ret = exynos_dsi_parse_dt(dsi);
if (ret)
return ret;
goto err_del_component;
dsi->supplies[0].supply = "vddcore";
dsi->supplies[1].supply = "vddio";
@ -1415,32 +1460,37 @@ static int exynos_dsi_probe(struct platform_device *pdev)
dsi->pll_clk = devm_clk_get(&pdev->dev, "pll_clk");
if (IS_ERR(dsi->pll_clk)) {
dev_info(&pdev->dev, "failed to get dsi pll input clock\n");
return -EPROBE_DEFER;
ret = PTR_ERR(dsi->pll_clk);
goto err_del_component;
}
dsi->bus_clk = devm_clk_get(&pdev->dev, "bus_clk");
if (IS_ERR(dsi->bus_clk)) {
dev_info(&pdev->dev, "failed to get dsi bus clock\n");
return -EPROBE_DEFER;
ret = PTR_ERR(dsi->bus_clk);
goto err_del_component;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
dsi->reg_base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(dsi->reg_base)) {
dev_err(&pdev->dev, "failed to remap io region\n");
return PTR_ERR(dsi->reg_base);
ret = PTR_ERR(dsi->reg_base);
goto err_del_component;
}
dsi->phy = devm_phy_get(&pdev->dev, "dsim");
if (IS_ERR(dsi->phy)) {
dev_info(&pdev->dev, "failed to get dsim phy\n");
return -EPROBE_DEFER;
ret = PTR_ERR(dsi->phy);
goto err_del_component;
}
dsi->irq = platform_get_irq(pdev, 0);
if (dsi->irq < 0) {
dev_err(&pdev->dev, "failed to request dsi irq resource\n");
return dsi->irq;
ret = dsi->irq;
goto err_del_component;
}
irq_set_status_flags(dsi->irq, IRQ_NOAUTOEN);
@ -1449,59 +1499,32 @@ static int exynos_dsi_probe(struct platform_device *pdev)
dev_name(&pdev->dev), dsi);
if (ret) {
dev_err(&pdev->dev, "failed to request dsi irq\n");
return ret;
goto err_del_component;
}
exynos_dsi_display.ctx = dsi;
platform_set_drvdata(pdev, &exynos_dsi_display);
exynos_drm_display_register(&exynos_dsi_display);
return mipi_dsi_host_register(&dsi->dsi_host);
ret = component_add(&pdev->dev, &exynos_dsi_component_ops);
if (ret)
goto err_del_component;
return ret;
err_del_component:
exynos_drm_component_del(&pdev->dev, EXYNOS_DEVICE_TYPE_CONNECTOR);
return ret;
}
static int exynos_dsi_remove(struct platform_device *pdev)
{
struct exynos_dsi *dsi = exynos_dsi_display.ctx;
exynos_dsi_dpms(&exynos_dsi_display, DRM_MODE_DPMS_OFF);
exynos_drm_display_unregister(&exynos_dsi_display);
mipi_dsi_host_unregister(&dsi->dsi_host);
component_del(&pdev->dev, &exynos_dsi_component_ops);
exynos_drm_component_del(&pdev->dev, EXYNOS_DEVICE_TYPE_CONNECTOR);
return 0;
}
#if CONFIG_PM_SLEEP
static int exynos_dsi_resume(struct device *dev)
{
struct exynos_dsi *dsi = exynos_dsi_display.ctx;
if (dsi->state & DSIM_STATE_ENABLED) {
dsi->state &= ~DSIM_STATE_ENABLED;
exynos_dsi_enable(dsi);
}
return 0;
}
static int exynos_dsi_suspend(struct device *dev)
{
struct exynos_dsi *dsi = exynos_dsi_display.ctx;
if (dsi->state & DSIM_STATE_ENABLED) {
exynos_dsi_disable(dsi);
dsi->state |= DSIM_STATE_ENABLED;
}
return 0;
}
#endif
static const struct dev_pm_ops exynos_dsi_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(exynos_dsi_suspend, exynos_dsi_resume)
};
static struct of_device_id exynos_dsi_of_match[] = {
{ .compatible = "samsung,exynos4210-mipi-dsi" },
{ }
@ -1513,7 +1536,6 @@ struct platform_driver dsi_driver = {
.driver = {
.name = "exynos-dsi",
.owner = THIS_MODULE,
.pm = &exynos_dsi_pm_ops,
.of_match_table = exynos_dsi_of_match,
},
};

View File

@ -121,16 +121,8 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
offset = fbi->var.xoffset * (fb->bits_per_pixel >> 3);
offset += fbi->var.yoffset * fb->pitches[0];
dev->mode_config.fb_base = (resource_size_t)buffer->dma_addr;
fbi->screen_base = buffer->kvaddr + offset;
if (is_drm_iommu_supported(dev))
fbi->fix.smem_start = (unsigned long)
(page_to_phys(sg_page(buffer->sgt->sgl)) + offset);
else
fbi->fix.smem_start = (unsigned long)buffer->dma_addr;
fbi->screen_size = size;
fbi->fix.smem_len = size;
return 0;
}
@ -237,7 +229,7 @@ static struct drm_fb_helper_funcs exynos_drm_fb_helper_funcs = {
.fb_probe = exynos_drm_fbdev_create,
};
bool exynos_drm_fbdev_is_anything_connected(struct drm_device *dev)
static bool exynos_drm_fbdev_is_anything_connected(struct drm_device *dev)
{
struct drm_connector *connector;
bool ret = false;
@ -375,7 +367,5 @@ void exynos_drm_fbdev_restore_mode(struct drm_device *dev)
if (!private || !private->fb_helper)
return;
drm_modeset_lock_all(dev);
drm_fb_helper_restore_fbdev_mode(private->fb_helper);
drm_modeset_unlock_all(dev);
drm_fb_helper_restore_fbdev_mode_unlocked(private->fb_helper);
}

View File

@ -18,6 +18,7 @@
#include <linux/clk.h>
#include <linux/pm_runtime.h>
#include <linux/of.h>
#include <linux/spinlock.h>
#include <drm/drmP.h>
#include <drm/exynos_drm.h>
@ -57,7 +58,6 @@
#define FIMC_SHFACTOR 10
#define FIMC_BUF_STOP 1
#define FIMC_BUF_START 2
#define FIMC_REG_SZ 32
#define FIMC_WIDTH_ITU_709 1280
#define FIMC_REFRESH_MAX 60
#define FIMC_REFRESH_MIN 12
@ -69,9 +69,6 @@
#define get_fimc_context(dev) platform_get_drvdata(to_platform_device(dev))
#define get_ctx_from_ippdrv(ippdrv) container_of(ippdrv,\
struct fimc_context, ippdrv);
#define fimc_read(offset) readl(ctx->regs + (offset))
#define fimc_write(cfg, offset) writel(cfg, ctx->regs + (offset))
enum fimc_wb {
FIMC_WB_NONE,
FIMC_WB_A,
@ -161,7 +158,7 @@ struct fimc_context {
struct exynos_drm_ippdrv ippdrv;
struct resource *regs_res;
void __iomem *regs;
struct mutex lock;
spinlock_t lock;
struct clk *clocks[FIMC_CLKS_MAX];
u32 clk_frequency;
struct regmap *sysreg;
@ -172,39 +169,53 @@ struct fimc_context {
bool suspended;
};
static u32 fimc_read(struct fimc_context *ctx, u32 reg)
{
return readl(ctx->regs + reg);
}
static void fimc_write(struct fimc_context *ctx, u32 val, u32 reg)
{
writel(val, ctx->regs + reg);
}
static void fimc_set_bits(struct fimc_context *ctx, u32 reg, u32 bits)
{
void __iomem *r = ctx->regs + reg;
writel(readl(r) | bits, r);
}
static void fimc_clear_bits(struct fimc_context *ctx, u32 reg, u32 bits)
{
void __iomem *r = ctx->regs + reg;
writel(readl(r) & ~bits, r);
}
static void fimc_sw_reset(struct fimc_context *ctx)
{
u32 cfg;
/* stop dma operation */
cfg = fimc_read(EXYNOS_CISTATUS);
if (EXYNOS_CISTATUS_GET_ENVID_STATUS(cfg)) {
cfg = fimc_read(EXYNOS_MSCTRL);
cfg &= ~EXYNOS_MSCTRL_ENVID;
fimc_write(cfg, EXYNOS_MSCTRL);
}
cfg = fimc_read(ctx, EXYNOS_CISTATUS);
if (EXYNOS_CISTATUS_GET_ENVID_STATUS(cfg))
fimc_clear_bits(ctx, EXYNOS_MSCTRL, EXYNOS_MSCTRL_ENVID);
cfg = fimc_read(EXYNOS_CISRCFMT);
cfg |= EXYNOS_CISRCFMT_ITU601_8BIT;
fimc_write(cfg, EXYNOS_CISRCFMT);
fimc_set_bits(ctx, EXYNOS_CISRCFMT, EXYNOS_CISRCFMT_ITU601_8BIT);
/* disable image capture */
cfg = fimc_read(EXYNOS_CIIMGCPT);
cfg &= ~(EXYNOS_CIIMGCPT_IMGCPTEN_SC | EXYNOS_CIIMGCPT_IMGCPTEN);
fimc_write(cfg, EXYNOS_CIIMGCPT);
fimc_clear_bits(ctx, EXYNOS_CIIMGCPT,
EXYNOS_CIIMGCPT_IMGCPTEN_SC | EXYNOS_CIIMGCPT_IMGCPTEN);
/* s/w reset */
cfg = fimc_read(EXYNOS_CIGCTRL);
cfg |= (EXYNOS_CIGCTRL_SWRST);
fimc_write(cfg, EXYNOS_CIGCTRL);
fimc_set_bits(ctx, EXYNOS_CIGCTRL, EXYNOS_CIGCTRL_SWRST);
/* s/w reset complete */
cfg = fimc_read(EXYNOS_CIGCTRL);
cfg &= ~EXYNOS_CIGCTRL_SWRST;
fimc_write(cfg, EXYNOS_CIGCTRL);
fimc_clear_bits(ctx, EXYNOS_CIGCTRL, EXYNOS_CIGCTRL_SWRST);
/* reset sequence */
fimc_write(0x0, EXYNOS_CIFCNTSEQ);
fimc_write(ctx, 0x0, EXYNOS_CIFCNTSEQ);
}
static int fimc_set_camblk_fimd0_wb(struct fimc_context *ctx)
@ -220,7 +231,7 @@ static void fimc_set_type_ctrl(struct fimc_context *ctx, enum fimc_wb wb)
DRM_DEBUG_KMS("wb[%d]\n", wb);
cfg = fimc_read(EXYNOS_CIGCTRL);
cfg = fimc_read(ctx, EXYNOS_CIGCTRL);
cfg &= ~(EXYNOS_CIGCTRL_TESTPATTERN_MASK |
EXYNOS_CIGCTRL_SELCAM_ITU_MASK |
EXYNOS_CIGCTRL_SELCAM_MIPI_MASK |
@ -246,7 +257,7 @@ static void fimc_set_type_ctrl(struct fimc_context *ctx, enum fimc_wb wb)
break;
}
fimc_write(cfg, EXYNOS_CIGCTRL);
fimc_write(ctx, cfg, EXYNOS_CIGCTRL);
}
static void fimc_set_polarity(struct fimc_context *ctx,
@ -259,7 +270,7 @@ static void fimc_set_polarity(struct fimc_context *ctx,
DRM_DEBUG_KMS("inv_href[%d]inv_hsync[%d]\n",
pol->inv_href, pol->inv_hsync);
cfg = fimc_read(EXYNOS_CIGCTRL);
cfg = fimc_read(ctx, EXYNOS_CIGCTRL);
cfg &= ~(EXYNOS_CIGCTRL_INVPOLPCLK | EXYNOS_CIGCTRL_INVPOLVSYNC |
EXYNOS_CIGCTRL_INVPOLHREF | EXYNOS_CIGCTRL_INVPOLHSYNC);
@ -272,7 +283,7 @@ static void fimc_set_polarity(struct fimc_context *ctx,
if (pol->inv_hsync)
cfg |= EXYNOS_CIGCTRL_INVPOLHSYNC;
fimc_write(cfg, EXYNOS_CIGCTRL);
fimc_write(ctx, cfg, EXYNOS_CIGCTRL);
}
static void fimc_handle_jpeg(struct fimc_context *ctx, bool enable)
@ -281,70 +292,54 @@ static void fimc_handle_jpeg(struct fimc_context *ctx, bool enable)
DRM_DEBUG_KMS("enable[%d]\n", enable);
cfg = fimc_read(EXYNOS_CIGCTRL);
cfg = fimc_read(ctx, EXYNOS_CIGCTRL);
if (enable)
cfg |= EXYNOS_CIGCTRL_CAM_JPEG;
else
cfg &= ~EXYNOS_CIGCTRL_CAM_JPEG;
fimc_write(cfg, EXYNOS_CIGCTRL);
fimc_write(ctx, cfg, EXYNOS_CIGCTRL);
}
static void fimc_handle_irq(struct fimc_context *ctx, bool enable,
bool overflow, bool level)
static void fimc_mask_irq(struct fimc_context *ctx, bool enable)
{
u32 cfg;
DRM_DEBUG_KMS("enable[%d]overflow[%d]level[%d]\n",
enable, overflow, level);
DRM_DEBUG_KMS("enable[%d]\n", enable);
cfg = fimc_read(EXYNOS_CIGCTRL);
cfg = fimc_read(ctx, EXYNOS_CIGCTRL);
if (enable) {
cfg &= ~(EXYNOS_CIGCTRL_IRQ_OVFEN | EXYNOS_CIGCTRL_IRQ_LEVEL);
cfg |= EXYNOS_CIGCTRL_IRQ_ENABLE;
if (overflow)
cfg |= EXYNOS_CIGCTRL_IRQ_OVFEN;
if (level)
cfg |= EXYNOS_CIGCTRL_IRQ_LEVEL;
cfg &= ~EXYNOS_CIGCTRL_IRQ_OVFEN;
cfg |= EXYNOS_CIGCTRL_IRQ_ENABLE | EXYNOS_CIGCTRL_IRQ_LEVEL;
} else
cfg &= ~(EXYNOS_CIGCTRL_IRQ_OVFEN | EXYNOS_CIGCTRL_IRQ_ENABLE);
fimc_write(cfg, EXYNOS_CIGCTRL);
cfg &= ~EXYNOS_CIGCTRL_IRQ_ENABLE;
fimc_write(ctx, cfg, EXYNOS_CIGCTRL);
}
static void fimc_clear_irq(struct fimc_context *ctx)
{
u32 cfg;
cfg = fimc_read(EXYNOS_CIGCTRL);
cfg |= EXYNOS_CIGCTRL_IRQ_CLR;
fimc_write(cfg, EXYNOS_CIGCTRL);
fimc_set_bits(ctx, EXYNOS_CIGCTRL, EXYNOS_CIGCTRL_IRQ_CLR);
}
static bool fimc_check_ovf(struct fimc_context *ctx)
{
struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
u32 cfg, status, flag;
u32 status, flag;
status = fimc_read(EXYNOS_CISTATUS);
status = fimc_read(ctx, EXYNOS_CISTATUS);
flag = EXYNOS_CISTATUS_OVFIY | EXYNOS_CISTATUS_OVFICB |
EXYNOS_CISTATUS_OVFICR;
DRM_DEBUG_KMS("flag[0x%x]\n", flag);
if (status & flag) {
cfg = fimc_read(EXYNOS_CIWDOFST);
cfg |= (EXYNOS_CIWDOFST_CLROVFIY | EXYNOS_CIWDOFST_CLROVFICB |
fimc_set_bits(ctx, EXYNOS_CIWDOFST,
EXYNOS_CIWDOFST_CLROVFIY | EXYNOS_CIWDOFST_CLROVFICB |
EXYNOS_CIWDOFST_CLROVFICR);
fimc_write(cfg, EXYNOS_CIWDOFST);
cfg = fimc_read(EXYNOS_CIWDOFST);
cfg &= ~(EXYNOS_CIWDOFST_CLROVFIY | EXYNOS_CIWDOFST_CLROVFICB |
fimc_clear_bits(ctx, EXYNOS_CIWDOFST,
EXYNOS_CIWDOFST_CLROVFIY | EXYNOS_CIWDOFST_CLROVFICB |
EXYNOS_CIWDOFST_CLROVFICR);
fimc_write(cfg, EXYNOS_CIWDOFST);
dev_err(ippdrv->dev, "occurred overflow at %d, status 0x%x.\n",
ctx->id, status);
return true;
@ -357,7 +352,7 @@ static bool fimc_check_frame_end(struct fimc_context *ctx)
{
u32 cfg;
cfg = fimc_read(EXYNOS_CISTATUS);
cfg = fimc_read(ctx, EXYNOS_CISTATUS);
DRM_DEBUG_KMS("cfg[0x%x]\n", cfg);
@ -365,7 +360,7 @@ static bool fimc_check_frame_end(struct fimc_context *ctx)
return false;
cfg &= ~(EXYNOS_CISTATUS_FRAMEEND);
fimc_write(cfg, EXYNOS_CISTATUS);
fimc_write(ctx, cfg, EXYNOS_CISTATUS);
return true;
}
@ -375,7 +370,7 @@ static int fimc_get_buf_id(struct fimc_context *ctx)
u32 cfg;
int frame_cnt, buf_id;
cfg = fimc_read(EXYNOS_CISTATUS2);
cfg = fimc_read(ctx, EXYNOS_CISTATUS2);
frame_cnt = EXYNOS_CISTATUS2_GET_FRAMECOUNT_BEFORE(cfg);
if (frame_cnt == 0)
@ -402,13 +397,13 @@ static void fimc_handle_lastend(struct fimc_context *ctx, bool enable)
DRM_DEBUG_KMS("enable[%d]\n", enable);
cfg = fimc_read(EXYNOS_CIOCTRL);
cfg = fimc_read(ctx, EXYNOS_CIOCTRL);
if (enable)
cfg |= EXYNOS_CIOCTRL_LASTENDEN;
else
cfg &= ~EXYNOS_CIOCTRL_LASTENDEN;
fimc_write(cfg, EXYNOS_CIOCTRL);
fimc_write(ctx, cfg, EXYNOS_CIOCTRL);
}
@ -420,18 +415,18 @@ static int fimc_src_set_fmt_order(struct fimc_context *ctx, u32 fmt)
DRM_DEBUG_KMS("fmt[0x%x]\n", fmt);
/* RGB */
cfg = fimc_read(EXYNOS_CISCCTRL);
cfg = fimc_read(ctx, EXYNOS_CISCCTRL);
cfg &= ~EXYNOS_CISCCTRL_INRGB_FMT_RGB_MASK;
switch (fmt) {
case DRM_FORMAT_RGB565:
cfg |= EXYNOS_CISCCTRL_INRGB_FMT_RGB565;
fimc_write(cfg, EXYNOS_CISCCTRL);
fimc_write(ctx, cfg, EXYNOS_CISCCTRL);
return 0;
case DRM_FORMAT_RGB888:
case DRM_FORMAT_XRGB8888:
cfg |= EXYNOS_CISCCTRL_INRGB_FMT_RGB888;
fimc_write(cfg, EXYNOS_CISCCTRL);
fimc_write(ctx, cfg, EXYNOS_CISCCTRL);
return 0;
default:
/* bypass */
@ -439,7 +434,7 @@ static int fimc_src_set_fmt_order(struct fimc_context *ctx, u32 fmt)
}
/* YUV */
cfg = fimc_read(EXYNOS_MSCTRL);
cfg = fimc_read(ctx, EXYNOS_MSCTRL);
cfg &= ~(EXYNOS_MSCTRL_ORDER2P_SHIFT_MASK |
EXYNOS_MSCTRL_C_INT_IN_2PLANE |
EXYNOS_MSCTRL_ORDER422_YCBYCR);
@ -479,7 +474,7 @@ static int fimc_src_set_fmt_order(struct fimc_context *ctx, u32 fmt)
return -EINVAL;
}
fimc_write(cfg, EXYNOS_MSCTRL);
fimc_write(ctx, cfg, EXYNOS_MSCTRL);
return 0;
}
@ -492,7 +487,7 @@ static int fimc_src_set_fmt(struct device *dev, u32 fmt)
DRM_DEBUG_KMS("fmt[0x%x]\n", fmt);
cfg = fimc_read(EXYNOS_MSCTRL);
cfg = fimc_read(ctx, EXYNOS_MSCTRL);
cfg &= ~EXYNOS_MSCTRL_INFORMAT_RGB;
switch (fmt) {
@ -527,9 +522,9 @@ static int fimc_src_set_fmt(struct device *dev, u32 fmt)
return -EINVAL;
}
fimc_write(cfg, EXYNOS_MSCTRL);
fimc_write(ctx, cfg, EXYNOS_MSCTRL);
cfg = fimc_read(EXYNOS_CIDMAPARAM);
cfg = fimc_read(ctx, EXYNOS_CIDMAPARAM);
cfg &= ~EXYNOS_CIDMAPARAM_R_MODE_MASK;
if (fmt == DRM_FORMAT_NV12MT)
@ -537,7 +532,7 @@ static int fimc_src_set_fmt(struct device *dev, u32 fmt)
else
cfg |= EXYNOS_CIDMAPARAM_R_MODE_LINEAR;
fimc_write(cfg, EXYNOS_CIDMAPARAM);
fimc_write(ctx, cfg, EXYNOS_CIDMAPARAM);
return fimc_src_set_fmt_order(ctx, fmt);
}
@ -552,11 +547,11 @@ static int fimc_src_set_transf(struct device *dev,
DRM_DEBUG_KMS("degree[%d]flip[0x%x]\n", degree, flip);
cfg1 = fimc_read(EXYNOS_MSCTRL);
cfg1 = fimc_read(ctx, EXYNOS_MSCTRL);
cfg1 &= ~(EXYNOS_MSCTRL_FLIP_X_MIRROR |
EXYNOS_MSCTRL_FLIP_Y_MIRROR);
cfg2 = fimc_read(EXYNOS_CITRGFMT);
cfg2 = fimc_read(ctx, EXYNOS_CITRGFMT);
cfg2 &= ~EXYNOS_CITRGFMT_INROT90_CLOCKWISE;
switch (degree) {
@ -595,8 +590,8 @@ static int fimc_src_set_transf(struct device *dev,
return -EINVAL;
}
fimc_write(cfg1, EXYNOS_MSCTRL);
fimc_write(cfg2, EXYNOS_CITRGFMT);
fimc_write(ctx, cfg1, EXYNOS_MSCTRL);
fimc_write(ctx, cfg2, EXYNOS_CITRGFMT);
*swap = (cfg2 & EXYNOS_CITRGFMT_INROT90_CLOCKWISE) ? 1 : 0;
return 0;
@ -621,17 +616,17 @@ static int fimc_set_window(struct fimc_context *ctx,
* set window offset 1, 2 size
* check figure 43-21 in user manual
*/
cfg = fimc_read(EXYNOS_CIWDOFST);
cfg = fimc_read(ctx, EXYNOS_CIWDOFST);
cfg &= ~(EXYNOS_CIWDOFST_WINHOROFST_MASK |
EXYNOS_CIWDOFST_WINVEROFST_MASK);
cfg |= (EXYNOS_CIWDOFST_WINHOROFST(h1) |
EXYNOS_CIWDOFST_WINVEROFST(v1));
cfg |= EXYNOS_CIWDOFST_WINOFSEN;
fimc_write(cfg, EXYNOS_CIWDOFST);
fimc_write(ctx, cfg, EXYNOS_CIWDOFST);
cfg = (EXYNOS_CIWDOFST2_WINHOROFST2(h2) |
EXYNOS_CIWDOFST2_WINVEROFST2(v2));
fimc_write(cfg, EXYNOS_CIWDOFST2);
fimc_write(ctx, cfg, EXYNOS_CIWDOFST2);
return 0;
}
@ -651,7 +646,7 @@ static int fimc_src_set_size(struct device *dev, int swap,
cfg = (EXYNOS_ORGISIZE_HORIZONTAL(img_sz.hsize) |
EXYNOS_ORGISIZE_VERTICAL(img_sz.vsize));
fimc_write(cfg, EXYNOS_ORGISIZE);
fimc_write(ctx, cfg, EXYNOS_ORGISIZE);
DRM_DEBUG_KMS("x[%d]y[%d]w[%d]h[%d]\n", pos->x, pos->y, pos->w, pos->h);
@ -663,12 +658,12 @@ static int fimc_src_set_size(struct device *dev, int swap,
}
/* set input DMA image size */
cfg = fimc_read(EXYNOS_CIREAL_ISIZE);
cfg = fimc_read(ctx, EXYNOS_CIREAL_ISIZE);
cfg &= ~(EXYNOS_CIREAL_ISIZE_HEIGHT_MASK |
EXYNOS_CIREAL_ISIZE_WIDTH_MASK);
cfg |= (EXYNOS_CIREAL_ISIZE_WIDTH(img_pos.w) |
EXYNOS_CIREAL_ISIZE_HEIGHT(img_pos.h));
fimc_write(cfg, EXYNOS_CIREAL_ISIZE);
fimc_write(ctx, cfg, EXYNOS_CIREAL_ISIZE);
/*
* set input FIFO image size
@ -677,18 +672,18 @@ static int fimc_src_set_size(struct device *dev, int swap,
cfg = (EXYNOS_CISRCFMT_ITU601_8BIT |
EXYNOS_CISRCFMT_SOURCEHSIZE(img_sz.hsize) |
EXYNOS_CISRCFMT_SOURCEVSIZE(img_sz.vsize));
fimc_write(cfg, EXYNOS_CISRCFMT);
fimc_write(ctx, cfg, EXYNOS_CISRCFMT);
/* offset Y(RGB), Cb, Cr */
cfg = (EXYNOS_CIIYOFF_HORIZONTAL(img_pos.x) |
EXYNOS_CIIYOFF_VERTICAL(img_pos.y));
fimc_write(cfg, EXYNOS_CIIYOFF);
fimc_write(ctx, cfg, EXYNOS_CIIYOFF);
cfg = (EXYNOS_CIICBOFF_HORIZONTAL(img_pos.x) |
EXYNOS_CIICBOFF_VERTICAL(img_pos.y));
fimc_write(cfg, EXYNOS_CIICBOFF);
fimc_write(ctx, cfg, EXYNOS_CIICBOFF);
cfg = (EXYNOS_CIICROFF_HORIZONTAL(img_pos.x) |
EXYNOS_CIICROFF_VERTICAL(img_pos.y));
fimc_write(cfg, EXYNOS_CIICROFF);
fimc_write(ctx, cfg, EXYNOS_CIICROFF);
return fimc_set_window(ctx, &img_pos, &img_sz);
}
@ -722,25 +717,25 @@ static int fimc_src_set_addr(struct device *dev,
switch (buf_type) {
case IPP_BUF_ENQUEUE:
config = &property->config[EXYNOS_DRM_OPS_SRC];
fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_Y],
fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_Y],
EXYNOS_CIIYSA(buf_id));
if (config->fmt == DRM_FORMAT_YVU420) {
fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR],
fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_CR],
EXYNOS_CIICBSA(buf_id));
fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB],
fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_CB],
EXYNOS_CIICRSA(buf_id));
} else {
fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB],
fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_CB],
EXYNOS_CIICBSA(buf_id));
fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR],
fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_CR],
EXYNOS_CIICRSA(buf_id));
}
break;
case IPP_BUF_DEQUEUE:
fimc_write(0x0, EXYNOS_CIIYSA(buf_id));
fimc_write(0x0, EXYNOS_CIICBSA(buf_id));
fimc_write(0x0, EXYNOS_CIICRSA(buf_id));
fimc_write(ctx, 0x0, EXYNOS_CIIYSA(buf_id));
fimc_write(ctx, 0x0, EXYNOS_CIICBSA(buf_id));
fimc_write(ctx, 0x0, EXYNOS_CIICRSA(buf_id));
break;
default:
/* bypass */
@ -765,22 +760,22 @@ static int fimc_dst_set_fmt_order(struct fimc_context *ctx, u32 fmt)
DRM_DEBUG_KMS("fmt[0x%x]\n", fmt);
/* RGB */
cfg = fimc_read(EXYNOS_CISCCTRL);
cfg = fimc_read(ctx, EXYNOS_CISCCTRL);
cfg &= ~EXYNOS_CISCCTRL_OUTRGB_FMT_RGB_MASK;
switch (fmt) {
case DRM_FORMAT_RGB565:
cfg |= EXYNOS_CISCCTRL_OUTRGB_FMT_RGB565;
fimc_write(cfg, EXYNOS_CISCCTRL);
fimc_write(ctx, cfg, EXYNOS_CISCCTRL);
return 0;
case DRM_FORMAT_RGB888:
cfg |= EXYNOS_CISCCTRL_OUTRGB_FMT_RGB888;
fimc_write(cfg, EXYNOS_CISCCTRL);
fimc_write(ctx, cfg, EXYNOS_CISCCTRL);
return 0;
case DRM_FORMAT_XRGB8888:
cfg |= (EXYNOS_CISCCTRL_OUTRGB_FMT_RGB888 |
EXYNOS_CISCCTRL_EXTRGB_EXTENSION);
fimc_write(cfg, EXYNOS_CISCCTRL);
fimc_write(ctx, cfg, EXYNOS_CISCCTRL);
break;
default:
/* bypass */
@ -788,7 +783,7 @@ static int fimc_dst_set_fmt_order(struct fimc_context *ctx, u32 fmt)
}
/* YUV */
cfg = fimc_read(EXYNOS_CIOCTRL);
cfg = fimc_read(ctx, EXYNOS_CIOCTRL);
cfg &= ~(EXYNOS_CIOCTRL_ORDER2P_MASK |
EXYNOS_CIOCTRL_ORDER422_MASK |
EXYNOS_CIOCTRL_YCBCR_PLANE_MASK);
@ -830,7 +825,7 @@ static int fimc_dst_set_fmt_order(struct fimc_context *ctx, u32 fmt)
return -EINVAL;
}
fimc_write(cfg, EXYNOS_CIOCTRL);
fimc_write(ctx, cfg, EXYNOS_CIOCTRL);
return 0;
}
@ -843,16 +838,16 @@ static int fimc_dst_set_fmt(struct device *dev, u32 fmt)
DRM_DEBUG_KMS("fmt[0x%x]\n", fmt);
cfg = fimc_read(EXYNOS_CIEXTEN);
cfg = fimc_read(ctx, EXYNOS_CIEXTEN);
if (fmt == DRM_FORMAT_AYUV) {
cfg |= EXYNOS_CIEXTEN_YUV444_OUT;
fimc_write(cfg, EXYNOS_CIEXTEN);
fimc_write(ctx, cfg, EXYNOS_CIEXTEN);
} else {
cfg &= ~EXYNOS_CIEXTEN_YUV444_OUT;
fimc_write(cfg, EXYNOS_CIEXTEN);
fimc_write(ctx, cfg, EXYNOS_CIEXTEN);
cfg = fimc_read(EXYNOS_CITRGFMT);
cfg = fimc_read(ctx, EXYNOS_CITRGFMT);
cfg &= ~EXYNOS_CITRGFMT_OUTFORMAT_MASK;
switch (fmt) {
@ -885,10 +880,10 @@ static int fimc_dst_set_fmt(struct device *dev, u32 fmt)
return -EINVAL;
}
fimc_write(cfg, EXYNOS_CITRGFMT);
fimc_write(ctx, cfg, EXYNOS_CITRGFMT);
}
cfg = fimc_read(EXYNOS_CIDMAPARAM);
cfg = fimc_read(ctx, EXYNOS_CIDMAPARAM);
cfg &= ~EXYNOS_CIDMAPARAM_W_MODE_MASK;
if (fmt == DRM_FORMAT_NV12MT)
@ -896,7 +891,7 @@ static int fimc_dst_set_fmt(struct device *dev, u32 fmt)
else
cfg |= EXYNOS_CIDMAPARAM_W_MODE_LINEAR;
fimc_write(cfg, EXYNOS_CIDMAPARAM);
fimc_write(ctx, cfg, EXYNOS_CIDMAPARAM);
return fimc_dst_set_fmt_order(ctx, fmt);
}
@ -911,7 +906,7 @@ static int fimc_dst_set_transf(struct device *dev,
DRM_DEBUG_KMS("degree[%d]flip[0x%x]\n", degree, flip);
cfg = fimc_read(EXYNOS_CITRGFMT);
cfg = fimc_read(ctx, EXYNOS_CITRGFMT);
cfg &= ~EXYNOS_CITRGFMT_FLIP_MASK;
cfg &= ~EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE;
@ -951,53 +946,23 @@ static int fimc_dst_set_transf(struct device *dev,
return -EINVAL;
}
fimc_write(cfg, EXYNOS_CITRGFMT);
fimc_write(ctx, cfg, EXYNOS_CITRGFMT);
*swap = (cfg & EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE) ? 1 : 0;
return 0;
}
static int fimc_get_ratio_shift(u32 src, u32 dst, u32 *ratio, u32 *shift)
{
DRM_DEBUG_KMS("src[%d]dst[%d]\n", src, dst);
if (src >= dst * 64) {
DRM_ERROR("failed to make ratio and shift.\n");
return -EINVAL;
} else if (src >= dst * 32) {
*ratio = 32;
*shift = 5;
} else if (src >= dst * 16) {
*ratio = 16;
*shift = 4;
} else if (src >= dst * 8) {
*ratio = 8;
*shift = 3;
} else if (src >= dst * 4) {
*ratio = 4;
*shift = 2;
} else if (src >= dst * 2) {
*ratio = 2;
*shift = 1;
} else {
*ratio = 1;
*shift = 0;
}
return 0;
}
static int fimc_set_prescaler(struct fimc_context *ctx, struct fimc_scaler *sc,
struct drm_exynos_pos *src, struct drm_exynos_pos *dst)
{
struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
u32 cfg, cfg_ext, shfactor;
u32 pre_dst_width, pre_dst_height;
u32 pre_hratio, hfactor, pre_vratio, vfactor;
u32 hfactor, vfactor;
int ret = 0;
u32 src_w, src_h, dst_w, dst_h;
cfg_ext = fimc_read(EXYNOS_CITRGFMT);
cfg_ext = fimc_read(ctx, EXYNOS_CITRGFMT);
if (cfg_ext & EXYNOS_CITRGFMT_INROT90_CLOCKWISE) {
src_w = src->h;
src_h = src->w;
@ -1014,24 +979,24 @@ static int fimc_set_prescaler(struct fimc_context *ctx, struct fimc_scaler *sc,
dst_h = dst->h;
}
ret = fimc_get_ratio_shift(src_w, dst_w, &pre_hratio, &hfactor);
if (ret) {
/* fimc_ippdrv_check_property assures that dividers are not null */
hfactor = fls(src_w / dst_w / 2);
if (hfactor > FIMC_SHFACTOR / 2) {
dev_err(ippdrv->dev, "failed to get ratio horizontal.\n");
return ret;
return -EINVAL;
}
ret = fimc_get_ratio_shift(src_h, dst_h, &pre_vratio, &vfactor);
if (ret) {
vfactor = fls(src_h / dst_h / 2);
if (vfactor > FIMC_SHFACTOR / 2) {
dev_err(ippdrv->dev, "failed to get ratio vertical.\n");
return ret;
return -EINVAL;
}
pre_dst_width = src_w / pre_hratio;
pre_dst_height = src_h / pre_vratio;
pre_dst_width = src_w >> hfactor;
pre_dst_height = src_h >> vfactor;
DRM_DEBUG_KMS("pre_dst_width[%d]pre_dst_height[%d]\n",
pre_dst_width, pre_dst_height);
DRM_DEBUG_KMS("pre_hratio[%d]hfactor[%d]pre_vratio[%d]vfactor[%d]\n",
pre_hratio, hfactor, pre_vratio, vfactor);
DRM_DEBUG_KMS("hfactor[%d]vfactor[%d]\n", hfactor, vfactor);
sc->hratio = (src_w << 14) / (dst_w << hfactor);
sc->vratio = (src_h << 14) / (dst_h << vfactor);
@ -1044,13 +1009,13 @@ static int fimc_set_prescaler(struct fimc_context *ctx, struct fimc_scaler *sc,
DRM_DEBUG_KMS("shfactor[%d]\n", shfactor);
cfg = (EXYNOS_CISCPRERATIO_SHFACTOR(shfactor) |
EXYNOS_CISCPRERATIO_PREHORRATIO(pre_hratio) |
EXYNOS_CISCPRERATIO_PREVERRATIO(pre_vratio));
fimc_write(cfg, EXYNOS_CISCPRERATIO);
EXYNOS_CISCPRERATIO_PREHORRATIO(1 << hfactor) |
EXYNOS_CISCPRERATIO_PREVERRATIO(1 << vfactor));
fimc_write(ctx, cfg, EXYNOS_CISCPRERATIO);
cfg = (EXYNOS_CISCPREDST_PREDSTWIDTH(pre_dst_width) |
EXYNOS_CISCPREDST_PREDSTHEIGHT(pre_dst_height));
fimc_write(cfg, EXYNOS_CISCPREDST);
fimc_write(ctx, cfg, EXYNOS_CISCPREDST);
return ret;
}
@ -1064,7 +1029,7 @@ static void fimc_set_scaler(struct fimc_context *ctx, struct fimc_scaler *sc)
DRM_DEBUG_KMS("hratio[%d]vratio[%d]\n",
sc->hratio, sc->vratio);
cfg = fimc_read(EXYNOS_CISCCTRL);
cfg = fimc_read(ctx, EXYNOS_CISCCTRL);
cfg &= ~(EXYNOS_CISCCTRL_SCALERBYPASS |
EXYNOS_CISCCTRL_SCALEUP_H | EXYNOS_CISCCTRL_SCALEUP_V |
EXYNOS_CISCCTRL_MAIN_V_RATIO_MASK |
@ -1084,14 +1049,14 @@ static void fimc_set_scaler(struct fimc_context *ctx, struct fimc_scaler *sc)
cfg |= (EXYNOS_CISCCTRL_MAINHORRATIO((sc->hratio >> 6)) |
EXYNOS_CISCCTRL_MAINVERRATIO((sc->vratio >> 6)));
fimc_write(cfg, EXYNOS_CISCCTRL);
fimc_write(ctx, cfg, EXYNOS_CISCCTRL);
cfg_ext = fimc_read(EXYNOS_CIEXTEN);
cfg_ext = fimc_read(ctx, EXYNOS_CIEXTEN);
cfg_ext &= ~EXYNOS_CIEXTEN_MAINHORRATIO_EXT_MASK;
cfg_ext &= ~EXYNOS_CIEXTEN_MAINVERRATIO_EXT_MASK;
cfg_ext |= (EXYNOS_CIEXTEN_MAINHORRATIO_EXT(sc->hratio) |
EXYNOS_CIEXTEN_MAINVERRATIO_EXT(sc->vratio));
fimc_write(cfg_ext, EXYNOS_CIEXTEN);
fimc_write(ctx, cfg_ext, EXYNOS_CIEXTEN);
}
static int fimc_dst_set_size(struct device *dev, int swap,
@ -1109,12 +1074,12 @@ static int fimc_dst_set_size(struct device *dev, int swap,
cfg = (EXYNOS_ORGOSIZE_HORIZONTAL(img_sz.hsize) |
EXYNOS_ORGOSIZE_VERTICAL(img_sz.vsize));
fimc_write(cfg, EXYNOS_ORGOSIZE);
fimc_write(ctx, cfg, EXYNOS_ORGOSIZE);
DRM_DEBUG_KMS("x[%d]y[%d]w[%d]h[%d]\n", pos->x, pos->y, pos->w, pos->h);
/* CSC ITU */
cfg = fimc_read(EXYNOS_CIGCTRL);
cfg = fimc_read(ctx, EXYNOS_CIGCTRL);
cfg &= ~EXYNOS_CIGCTRL_CSC_MASK;
if (sz->hsize >= FIMC_WIDTH_ITU_709)
@ -1122,7 +1087,7 @@ static int fimc_dst_set_size(struct device *dev, int swap,
else
cfg |= EXYNOS_CIGCTRL_CSC_ITU601;
fimc_write(cfg, EXYNOS_CIGCTRL);
fimc_write(ctx, cfg, EXYNOS_CIGCTRL);
if (swap) {
img_pos.w = pos->h;
@ -1132,41 +1097,38 @@ static int fimc_dst_set_size(struct device *dev, int swap,
}
/* target image size */
cfg = fimc_read(EXYNOS_CITRGFMT);
cfg = fimc_read(ctx, EXYNOS_CITRGFMT);
cfg &= ~(EXYNOS_CITRGFMT_TARGETH_MASK |
EXYNOS_CITRGFMT_TARGETV_MASK);
cfg |= (EXYNOS_CITRGFMT_TARGETHSIZE(img_pos.w) |
EXYNOS_CITRGFMT_TARGETVSIZE(img_pos.h));
fimc_write(cfg, EXYNOS_CITRGFMT);
fimc_write(ctx, cfg, EXYNOS_CITRGFMT);
/* target area */
cfg = EXYNOS_CITAREA_TARGET_AREA(img_pos.w * img_pos.h);
fimc_write(cfg, EXYNOS_CITAREA);
fimc_write(ctx, cfg, EXYNOS_CITAREA);
/* offset Y(RGB), Cb, Cr */
cfg = (EXYNOS_CIOYOFF_HORIZONTAL(img_pos.x) |
EXYNOS_CIOYOFF_VERTICAL(img_pos.y));
fimc_write(cfg, EXYNOS_CIOYOFF);
fimc_write(ctx, cfg, EXYNOS_CIOYOFF);
cfg = (EXYNOS_CIOCBOFF_HORIZONTAL(img_pos.x) |
EXYNOS_CIOCBOFF_VERTICAL(img_pos.y));
fimc_write(cfg, EXYNOS_CIOCBOFF);
fimc_write(ctx, cfg, EXYNOS_CIOCBOFF);
cfg = (EXYNOS_CIOCROFF_HORIZONTAL(img_pos.x) |
EXYNOS_CIOCROFF_VERTICAL(img_pos.y));
fimc_write(cfg, EXYNOS_CIOCROFF);
fimc_write(ctx, cfg, EXYNOS_CIOCROFF);
return 0;
}
static int fimc_dst_get_buf_seq(struct fimc_context *ctx)
static int fimc_dst_get_buf_count(struct fimc_context *ctx)
{
u32 cfg, i, buf_num = 0;
u32 mask = 0x00000001;
u32 cfg, buf_num;
cfg = fimc_read(EXYNOS_CIFCNTSEQ);
cfg = fimc_read(ctx, EXYNOS_CIFCNTSEQ);
for (i = 0; i < FIMC_REG_SZ; i++)
if (cfg & (mask << i))
buf_num++;
buf_num = hweight32(cfg);
DRM_DEBUG_KMS("buf_num[%d]\n", buf_num);
@ -1181,13 +1143,14 @@ static int fimc_dst_set_buf_seq(struct fimc_context *ctx, u32 buf_id,
u32 cfg;
u32 mask = 0x00000001 << buf_id;
int ret = 0;
unsigned long flags;
DRM_DEBUG_KMS("buf_id[%d]buf_type[%d]\n", buf_id, buf_type);
mutex_lock(&ctx->lock);
spin_lock_irqsave(&ctx->lock, flags);
/* mask register set */
cfg = fimc_read(EXYNOS_CIFCNTSEQ);
cfg = fimc_read(ctx, EXYNOS_CIFCNTSEQ);
switch (buf_type) {
case IPP_BUF_ENQUEUE:
@ -1205,20 +1168,20 @@ static int fimc_dst_set_buf_seq(struct fimc_context *ctx, u32 buf_id,
/* sequence id */
cfg &= ~mask;
cfg |= (enable << buf_id);
fimc_write(cfg, EXYNOS_CIFCNTSEQ);
fimc_write(ctx, cfg, EXYNOS_CIFCNTSEQ);
/* interrupt enable */
if (buf_type == IPP_BUF_ENQUEUE &&
fimc_dst_get_buf_seq(ctx) >= FIMC_BUF_START)
fimc_handle_irq(ctx, true, false, true);
fimc_dst_get_buf_count(ctx) >= FIMC_BUF_START)
fimc_mask_irq(ctx, true);
/* interrupt disable */
if (buf_type == IPP_BUF_DEQUEUE &&
fimc_dst_get_buf_seq(ctx) <= FIMC_BUF_STOP)
fimc_handle_irq(ctx, false, false, true);
fimc_dst_get_buf_count(ctx) <= FIMC_BUF_STOP)
fimc_mask_irq(ctx, false);
err_unlock:
mutex_unlock(&ctx->lock);
spin_unlock_irqrestore(&ctx->lock, flags);
return ret;
}
@ -1252,25 +1215,25 @@ static int fimc_dst_set_addr(struct device *dev,
case IPP_BUF_ENQUEUE:
config = &property->config[EXYNOS_DRM_OPS_DST];
fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_Y],
fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_Y],
EXYNOS_CIOYSA(buf_id));
if (config->fmt == DRM_FORMAT_YVU420) {
fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR],
fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_CR],
EXYNOS_CIOCBSA(buf_id));
fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB],
fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_CB],
EXYNOS_CIOCRSA(buf_id));
} else {
fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB],
fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_CB],
EXYNOS_CIOCBSA(buf_id));
fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR],
fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_CR],
EXYNOS_CIOCRSA(buf_id));
}
break;
case IPP_BUF_DEQUEUE:
fimc_write(0x0, EXYNOS_CIOYSA(buf_id));
fimc_write(0x0, EXYNOS_CIOCBSA(buf_id));
fimc_write(0x0, EXYNOS_CIOCRSA(buf_id));
fimc_write(ctx, 0x0, EXYNOS_CIOYSA(buf_id));
fimc_write(ctx, 0x0, EXYNOS_CIOCBSA(buf_id));
fimc_write(ctx, 0x0, EXYNOS_CIOCRSA(buf_id));
break;
default:
/* bypass */
@ -1342,11 +1305,7 @@ static irqreturn_t fimc_irq_handler(int irq, void *dev_id)
static int fimc_init_prop_list(struct exynos_drm_ippdrv *ippdrv)
{
struct drm_exynos_ipp_prop_list *prop_list;
prop_list = devm_kzalloc(ippdrv->dev, sizeof(*prop_list), GFP_KERNEL);
if (!prop_list)
return -ENOMEM;
struct drm_exynos_ipp_prop_list *prop_list = &ippdrv->prop_list;
prop_list->version = 1;
prop_list->writeback = 1;
@ -1371,8 +1330,6 @@ static int fimc_init_prop_list(struct exynos_drm_ippdrv *ippdrv)
prop_list->scale_min.hsize = FIMC_SCALE_MIN;
prop_list->scale_min.vsize = FIMC_SCALE_MIN;
ippdrv->prop_list = prop_list;
return 0;
}
@ -1395,7 +1352,7 @@ static int fimc_ippdrv_check_property(struct device *dev,
{
struct fimc_context *ctx = get_fimc_context(dev);
struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
struct drm_exynos_ipp_prop_list *pp = ippdrv->prop_list;
struct drm_exynos_ipp_prop_list *pp = &ippdrv->prop_list;
struct drm_exynos_ipp_config *config;
struct drm_exynos_pos *pos;
struct drm_exynos_sz *sz;
@ -1508,15 +1465,15 @@ static void fimc_clear_addr(struct fimc_context *ctx)
int i;
for (i = 0; i < FIMC_MAX_SRC; i++) {
fimc_write(0, EXYNOS_CIIYSA(i));
fimc_write(0, EXYNOS_CIICBSA(i));
fimc_write(0, EXYNOS_CIICRSA(i));
fimc_write(ctx, 0, EXYNOS_CIIYSA(i));
fimc_write(ctx, 0, EXYNOS_CIICBSA(i));
fimc_write(ctx, 0, EXYNOS_CIICRSA(i));
}
for (i = 0; i < FIMC_MAX_DST; i++) {
fimc_write(0, EXYNOS_CIOYSA(i));
fimc_write(0, EXYNOS_CIOCBSA(i));
fimc_write(0, EXYNOS_CIOCRSA(i));
fimc_write(ctx, 0, EXYNOS_CIOYSA(i));
fimc_write(ctx, 0, EXYNOS_CIOCBSA(i));
fimc_write(ctx, 0, EXYNOS_CIOCRSA(i));
}
}
@ -1556,7 +1513,7 @@ static int fimc_ippdrv_start(struct device *dev, enum drm_exynos_ipp_cmd cmd)
property = &c_node->property;
fimc_handle_irq(ctx, true, false, true);
fimc_mask_irq(ctx, true);
for_each_ipp_ops(i) {
config = &property->config[i];
@ -1582,10 +1539,10 @@ static int fimc_ippdrv_start(struct device *dev, enum drm_exynos_ipp_cmd cmd)
fimc_handle_lastend(ctx, false);
/* setup dma */
cfg0 = fimc_read(EXYNOS_MSCTRL);
cfg0 = fimc_read(ctx, EXYNOS_MSCTRL);
cfg0 &= ~EXYNOS_MSCTRL_INPUT_MASK;
cfg0 |= EXYNOS_MSCTRL_INPUT_MEMORY;
fimc_write(cfg0, EXYNOS_MSCTRL);
fimc_write(ctx, cfg0, EXYNOS_MSCTRL);
break;
case IPP_CMD_WB:
fimc_set_type_ctrl(ctx, FIMC_WB_A);
@ -1610,41 +1567,33 @@ static int fimc_ippdrv_start(struct device *dev, enum drm_exynos_ipp_cmd cmd)
}
/* Reset status */
fimc_write(0x0, EXYNOS_CISTATUS);
fimc_write(ctx, 0x0, EXYNOS_CISTATUS);
cfg0 = fimc_read(EXYNOS_CIIMGCPT);
cfg0 = fimc_read(ctx, EXYNOS_CIIMGCPT);
cfg0 &= ~EXYNOS_CIIMGCPT_IMGCPTEN_SC;
cfg0 |= EXYNOS_CIIMGCPT_IMGCPTEN_SC;
/* Scaler */
cfg1 = fimc_read(EXYNOS_CISCCTRL);
cfg1 = fimc_read(ctx, EXYNOS_CISCCTRL);
cfg1 &= ~EXYNOS_CISCCTRL_SCAN_MASK;
cfg1 |= (EXYNOS_CISCCTRL_PROGRESSIVE |
EXYNOS_CISCCTRL_SCALERSTART);
fimc_write(cfg1, EXYNOS_CISCCTRL);
fimc_write(ctx, cfg1, EXYNOS_CISCCTRL);
/* Enable image capture*/
cfg0 |= EXYNOS_CIIMGCPT_IMGCPTEN;
fimc_write(cfg0, EXYNOS_CIIMGCPT);
fimc_write(ctx, cfg0, EXYNOS_CIIMGCPT);
/* Disable frame end irq */
cfg0 = fimc_read(EXYNOS_CIGCTRL);
cfg0 &= ~EXYNOS_CIGCTRL_IRQ_END_DISABLE;
fimc_write(cfg0, EXYNOS_CIGCTRL);
fimc_clear_bits(ctx, EXYNOS_CIGCTRL, EXYNOS_CIGCTRL_IRQ_END_DISABLE);
cfg0 = fimc_read(EXYNOS_CIOCTRL);
cfg0 &= ~EXYNOS_CIOCTRL_WEAVE_MASK;
fimc_write(cfg0, EXYNOS_CIOCTRL);
fimc_clear_bits(ctx, EXYNOS_CIOCTRL, EXYNOS_CIOCTRL_WEAVE_MASK);
if (cmd == IPP_CMD_M2M) {
cfg0 = fimc_read(EXYNOS_MSCTRL);
cfg0 |= EXYNOS_MSCTRL_ENVID;
fimc_write(cfg0, EXYNOS_MSCTRL);
fimc_set_bits(ctx, EXYNOS_MSCTRL, EXYNOS_MSCTRL_ENVID);
cfg0 = fimc_read(EXYNOS_MSCTRL);
cfg0 |= EXYNOS_MSCTRL_ENVID;
fimc_write(cfg0, EXYNOS_MSCTRL);
fimc_set_bits(ctx, EXYNOS_MSCTRL, EXYNOS_MSCTRL_ENVID);
}
return 0;
@ -1661,10 +1610,10 @@ static void fimc_ippdrv_stop(struct device *dev, enum drm_exynos_ipp_cmd cmd)
switch (cmd) {
case IPP_CMD_M2M:
/* Source clear */
cfg = fimc_read(EXYNOS_MSCTRL);
cfg = fimc_read(ctx, EXYNOS_MSCTRL);
cfg &= ~EXYNOS_MSCTRL_INPUT_MASK;
cfg &= ~EXYNOS_MSCTRL_ENVID;
fimc_write(cfg, EXYNOS_MSCTRL);
fimc_write(ctx, cfg, EXYNOS_MSCTRL);
break;
case IPP_CMD_WB:
exynos_drm_ippnb_send_event(IPP_SET_WRITEBACK, (void *)&set_wb);
@ -1675,25 +1624,20 @@ static void fimc_ippdrv_stop(struct device *dev, enum drm_exynos_ipp_cmd cmd)
break;
}
fimc_handle_irq(ctx, false, false, true);
fimc_mask_irq(ctx, false);
/* reset sequence */
fimc_write(0x0, EXYNOS_CIFCNTSEQ);
fimc_write(ctx, 0x0, EXYNOS_CIFCNTSEQ);
/* Scaler disable */
cfg = fimc_read(EXYNOS_CISCCTRL);
cfg &= ~EXYNOS_CISCCTRL_SCALERSTART;
fimc_write(cfg, EXYNOS_CISCCTRL);
fimc_clear_bits(ctx, EXYNOS_CISCCTRL, EXYNOS_CISCCTRL_SCALERSTART);
/* Disable image capture */
cfg = fimc_read(EXYNOS_CIIMGCPT);
cfg &= ~(EXYNOS_CIIMGCPT_IMGCPTEN_SC | EXYNOS_CIIMGCPT_IMGCPTEN);
fimc_write(cfg, EXYNOS_CIIMGCPT);
fimc_clear_bits(ctx, EXYNOS_CIIMGCPT,
EXYNOS_CIIMGCPT_IMGCPTEN_SC | EXYNOS_CIIMGCPT_IMGCPTEN);
/* Enable frame end irq */
cfg = fimc_read(EXYNOS_CIGCTRL);
cfg |= EXYNOS_CIGCTRL_IRQ_END_DISABLE;
fimc_write(cfg, EXYNOS_CIGCTRL);
fimc_set_bits(ctx, EXYNOS_CIGCTRL, EXYNOS_CIGCTRL_IRQ_END_DISABLE);
}
static void fimc_put_clocks(struct fimc_context *ctx)
@ -1848,7 +1792,7 @@ static int fimc_probe(struct platform_device *pdev)
DRM_DEBUG_KMS("id[%d]ippdrv[0x%x]\n", ctx->id, (int)ippdrv);
mutex_init(&ctx->lock);
spin_lock_init(&ctx->lock);
platform_set_drvdata(pdev, ctx);
pm_runtime_set_active(dev);
@ -1879,7 +1823,6 @@ static int fimc_remove(struct platform_device *pdev)
struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
exynos_drm_ippdrv_unregister(ippdrv);
mutex_destroy(&ctx->lock);
fimc_put_clocks(ctx);
pm_runtime_set_suspended(dev);

View File

@ -19,6 +19,7 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/pm_runtime.h>
#include <linux/component.h>
#include <video/of_display_timing.h>
#include <video/of_videomode.h>
@ -38,6 +39,7 @@
*/
#define FIMD_DEFAULT_FRAMERATE 60
#define MIN_FB_WIDTH_FOR_16WORD_BURST 128
/* position control register for hardware window 0, 2 ~ 4.*/
#define VIDOSD_A(win) (VIDOSD_BASE + 0x00 + (win) * 16)
@ -122,6 +124,7 @@ struct fimd_context {
struct exynos_drm_panel_info panel;
struct fimd_driver_data *driver_data;
struct exynos_drm_display *display;
};
static const struct of_device_id fimd_driver_dt_match[] = {
@ -143,13 +146,57 @@ static inline struct fimd_driver_data *drm_fimd_get_driver_data(
return (struct fimd_driver_data *)of_id->data;
}
static int fimd_mgr_initialize(struct exynos_drm_manager *mgr,
struct drm_device *drm_dev, int pipe)
static void fimd_wait_for_vblank(struct exynos_drm_manager *mgr)
{
struct fimd_context *ctx = mgr->ctx;
ctx->drm_dev = drm_dev;
ctx->pipe = pipe;
if (ctx->suspended)
return;
atomic_set(&ctx->wait_vsync_event, 1);
/*
* wait for FIMD to signal VSYNC interrupt or return after
* timeout which is set to 50ms (refresh rate of 20).
*/
if (!wait_event_timeout(ctx->wait_vsync_queue,
!atomic_read(&ctx->wait_vsync_event),
HZ/20))
DRM_DEBUG_KMS("vblank wait timed out.\n");
}
static void fimd_clear_channel(struct exynos_drm_manager *mgr)
{
struct fimd_context *ctx = mgr->ctx;
int win, ch_enabled = 0;
DRM_DEBUG_KMS("%s\n", __FILE__);
/* Check if any channel is enabled. */
for (win = 0; win < WINDOWS_NR; win++) {
u32 val = readl(ctx->regs + SHADOWCON);
if (val & SHADOWCON_CHx_ENABLE(win)) {
val &= ~SHADOWCON_CHx_ENABLE(win);
writel(val, ctx->regs + SHADOWCON);
ch_enabled = 1;
}
}
/* Wait for vsync, as disable channel takes effect at next vsync */
if (ch_enabled)
fimd_wait_for_vblank(mgr);
}
static int fimd_mgr_initialize(struct exynos_drm_manager *mgr,
struct drm_device *drm_dev)
{
struct fimd_context *ctx = mgr->ctx;
struct exynos_drm_private *priv;
priv = drm_dev->dev_private;
mgr->drm_dev = ctx->drm_dev = drm_dev;
mgr->pipe = ctx->pipe = priv->pipe++;
/*
* enable drm irq mode.
@ -169,8 +216,14 @@ static int fimd_mgr_initialize(struct exynos_drm_manager *mgr,
drm_dev->vblank_disable_allowed = true;
/* attach this sub driver to iommu mapping if supported. */
if (is_drm_iommu_supported(ctx->drm_dev))
if (is_drm_iommu_supported(ctx->drm_dev)) {
/*
* If any channel is already active, iommu will throw
* a PAGE FAULT when enabled. So clear any channel if enabled.
*/
fimd_clear_channel(mgr);
drm_iommu_attach_device(ctx->drm_dev, ctx->dev);
}
return 0;
}
@ -324,25 +377,6 @@ static void fimd_disable_vblank(struct exynos_drm_manager *mgr)
}
}
static void fimd_wait_for_vblank(struct exynos_drm_manager *mgr)
{
struct fimd_context *ctx = mgr->ctx;
if (ctx->suspended)
return;
atomic_set(&ctx->wait_vsync_event, 1);
/*
* wait for FIMD to signal VSYNC interrupt or return after
* timeout which is set to 50ms (refresh rate of 20).
*/
if (!wait_event_timeout(ctx->wait_vsync_queue,
!atomic_read(&ctx->wait_vsync_event),
HZ/20))
DRM_DEBUG_KMS("vblank wait timed out.\n");
}
static void fimd_win_mode_set(struct exynos_drm_manager *mgr,
struct exynos_drm_overlay *overlay)
{
@ -446,6 +480,19 @@ static void fimd_win_set_pixfmt(struct fimd_context *ctx, unsigned int win)
DRM_DEBUG_KMS("bpp = %d\n", win_data->bpp);
/*
* In case of exynos, setting dma-burst to 16Word causes permanent
* tearing for very small buffers, e.g. cursor buffer. Burst Mode
* switching which is based on overlay size is not recommended as
* overlay size varies alot towards the end of the screen and rapid
* movement causes unstable DMA which results into iommu crash/tear.
*/
if (win_data->fb_width < MIN_FB_WIDTH_FOR_16WORD_BURST) {
val &= ~WINCONx_BURSTLEN_MASK;
val |= WINCONx_BURSTLEN_4WORD;
}
writel(val, ctx->regs + WINCON(win));
}
@ -656,19 +703,6 @@ static void fimd_win_disable(struct exynos_drm_manager *mgr, int zpos)
win_data->enabled = false;
}
static void fimd_clear_win(struct fimd_context *ctx, int win)
{
writel(0, ctx->regs + WINCON(win));
writel(0, ctx->regs + VIDOSD_A(win));
writel(0, ctx->regs + VIDOSD_B(win));
writel(0, ctx->regs + VIDOSD_C(win));
if (win == 1 || win == 2)
writel(0, ctx->regs + VIDOSD_D(win));
fimd_shadow_protect_win(ctx, win, false);
}
static void fimd_window_suspend(struct exynos_drm_manager *mgr)
{
struct fimd_context *ctx = mgr->ctx;
@ -803,8 +837,6 @@ static void fimd_dpms(struct exynos_drm_manager *mgr, int mode)
}
static struct exynos_drm_manager_ops fimd_manager_ops = {
.initialize = fimd_mgr_initialize,
.remove = fimd_mgr_remove,
.dpms = fimd_dpms,
.mode_fixup = fimd_mode_fixup,
.mode_set = fimd_mode_set,
@ -849,20 +881,64 @@ out:
return IRQ_HANDLED;
}
static int fimd_bind(struct device *dev, struct device *master, void *data)
{
struct fimd_context *ctx = fimd_manager.ctx;
struct drm_device *drm_dev = data;
fimd_mgr_initialize(&fimd_manager, drm_dev);
exynos_drm_crtc_create(&fimd_manager);
if (ctx->display)
exynos_drm_create_enc_conn(drm_dev, ctx->display);
return 0;
}
static void fimd_unbind(struct device *dev, struct device *master,
void *data)
{
struct exynos_drm_manager *mgr = dev_get_drvdata(dev);
struct fimd_context *ctx = fimd_manager.ctx;
struct drm_crtc *crtc = mgr->crtc;
fimd_dpms(mgr, DRM_MODE_DPMS_OFF);
if (ctx->display)
exynos_dpi_remove(dev);
fimd_mgr_remove(mgr);
crtc->funcs->destroy(crtc);
}
static const struct component_ops fimd_component_ops = {
.bind = fimd_bind,
.unbind = fimd_unbind,
};
static int fimd_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct fimd_context *ctx;
struct resource *res;
int win;
int ret = -EINVAL;
if (!dev->of_node)
return -ENODEV;
ret = exynos_drm_component_add(&pdev->dev, EXYNOS_DEVICE_TYPE_CRTC,
fimd_manager.type);
if (ret)
return ret;
if (!dev->of_node) {
ret = -ENODEV;
goto err_del_component;
}
ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
if (!ctx) {
ret = -ENOMEM;
goto err_del_component;
}
ctx->dev = dev;
ctx->suspended = true;
@ -875,32 +951,37 @@ static int fimd_probe(struct platform_device *pdev)
ctx->bus_clk = devm_clk_get(dev, "fimd");
if (IS_ERR(ctx->bus_clk)) {
dev_err(dev, "failed to get bus clock\n");
return PTR_ERR(ctx->bus_clk);
ret = PTR_ERR(ctx->bus_clk);
goto err_del_component;
}
ctx->lcd_clk = devm_clk_get(dev, "sclk_fimd");
if (IS_ERR(ctx->lcd_clk)) {
dev_err(dev, "failed to get lcd clock\n");
return PTR_ERR(ctx->lcd_clk);
ret = PTR_ERR(ctx->lcd_clk);
goto err_del_component;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
ctx->regs = devm_ioremap_resource(dev, res);
if (IS_ERR(ctx->regs))
return PTR_ERR(ctx->regs);
if (IS_ERR(ctx->regs)) {
ret = PTR_ERR(ctx->regs);
goto err_del_component;
}
res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "vsync");
if (!res) {
dev_err(dev, "irq request failed.\n");
return -ENXIO;
ret = -ENXIO;
goto err_del_component;
}
ret = devm_request_irq(dev, res->start, fimd_irq_handler,
0, "drm_fimd", ctx);
if (ret) {
dev_err(dev, "irq request failed.\n");
return ret;
goto err_del_component;
}
ctx->driver_data = drm_fimd_get_driver_data(pdev);
@ -910,30 +991,34 @@ static int fimd_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, &fimd_manager);
fimd_manager.ctx = ctx;
exynos_drm_manager_register(&fimd_manager);
exynos_dpi_probe(ctx->dev);
ctx->display = exynos_dpi_probe(dev);
if (IS_ERR(ctx->display))
return PTR_ERR(ctx->display);
pm_runtime_enable(dev);
pm_runtime_enable(&pdev->dev);
for (win = 0; win < WINDOWS_NR; win++)
fimd_clear_win(ctx, win);
ret = component_add(&pdev->dev, &fimd_component_ops);
if (ret)
goto err_disable_pm_runtime;
return 0;
return ret;
err_disable_pm_runtime:
pm_runtime_disable(&pdev->dev);
err_del_component:
exynos_drm_component_del(&pdev->dev, EXYNOS_DEVICE_TYPE_CRTC);
return ret;
}
static int fimd_remove(struct platform_device *pdev)
{
struct exynos_drm_manager *mgr = platform_get_drvdata(pdev);
exynos_dpi_remove(&pdev->dev);
exynos_drm_manager_unregister(&fimd_manager);
fimd_dpms(mgr, DRM_MODE_DPMS_OFF);
pm_runtime_disable(&pdev->dev);
component_del(&pdev->dev, &fimd_component_ops);
exynos_drm_component_del(&pdev->dev, EXYNOS_DEVICE_TYPE_CRTC);
return 0;
}

View File

@ -612,22 +612,20 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
args->pitch = args->width * ((args->bpp + 7) / 8);
args->size = args->pitch * args->height;
exynos_gem_obj = exynos_drm_gem_create(dev, EXYNOS_BO_CONTIG |
EXYNOS_BO_WC, args->size);
/*
* If physically contiguous memory allocation fails and if IOMMU is
* supported then try to get buffer from non physically contiguous
* memory area.
*/
if (IS_ERR(exynos_gem_obj) && is_drm_iommu_supported(dev)) {
dev_warn(dev->dev, "contiguous FB allocation failed, falling back to non-contiguous\n");
if (is_drm_iommu_supported(dev)) {
exynos_gem_obj = exynos_drm_gem_create(dev,
EXYNOS_BO_NONCONTIG | EXYNOS_BO_WC,
args->size);
EXYNOS_BO_NONCONTIG | EXYNOS_BO_WC,
args->size);
} else {
exynos_gem_obj = exynos_drm_gem_create(dev,
EXYNOS_BO_CONTIG | EXYNOS_BO_WC,
args->size);
}
if (IS_ERR(exynos_gem_obj))
if (IS_ERR(exynos_gem_obj)) {
dev_warn(dev->dev, "FB allocation failed.\n");
return PTR_ERR(exynos_gem_obj);
}
ret = exynos_drm_gem_handle_create(&exynos_gem_obj->base, file_priv,
&args->handle);

View File

@ -1335,11 +1335,7 @@ static irqreturn_t gsc_irq_handler(int irq, void *dev_id)
static int gsc_init_prop_list(struct exynos_drm_ippdrv *ippdrv)
{
struct drm_exynos_ipp_prop_list *prop_list;
prop_list = devm_kzalloc(ippdrv->dev, sizeof(*prop_list), GFP_KERNEL);
if (!prop_list)
return -ENOMEM;
struct drm_exynos_ipp_prop_list *prop_list = &ippdrv->prop_list;
prop_list->version = 1;
prop_list->writeback = 1;
@ -1363,8 +1359,6 @@ static int gsc_init_prop_list(struct exynos_drm_ippdrv *ippdrv)
prop_list->scale_min.hsize = GSC_SCALE_MIN;
prop_list->scale_min.vsize = GSC_SCALE_MIN;
ippdrv->prop_list = prop_list;
return 0;
}
@ -1387,7 +1381,7 @@ static int gsc_ippdrv_check_property(struct device *dev,
{
struct gsc_context *ctx = get_gsc_context(dev);
struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
struct drm_exynos_ipp_prop_list *pp = ippdrv->prop_list;
struct drm_exynos_ipp_prop_list *pp = &ippdrv->prop_list;
struct drm_exynos_ipp_config *config;
struct drm_exynos_pos *pos;
struct drm_exynos_sz *sz;

View File

@ -167,6 +167,13 @@ static int ipp_create_id(struct idr *id_idr, struct mutex *lock, void *obj,
return 0;
}
static void ipp_remove_id(struct idr *id_idr, struct mutex *lock, u32 id)
{
mutex_lock(lock);
idr_remove(id_idr, id);
mutex_unlock(lock);
}
static void *ipp_find_obj(struct idr *id_idr, struct mutex *lock, u32 id)
{
void *obj;
@ -276,11 +283,6 @@ static struct exynos_drm_ippdrv *ipp_find_drv_by_handle(u32 prop_id)
DRM_DEBUG_KMS("prop_id[%d]\n", prop_id);
if (list_empty(&exynos_drm_ippdrv_list)) {
DRM_DEBUG_KMS("ippdrv_list is empty.\n");
return ERR_PTR(-ENODEV);
}
/*
* This case is search ipp driver by prop_id handle.
* sometimes, ipp subsystem find driver by prop_id.
@ -289,11 +291,14 @@ static struct exynos_drm_ippdrv *ipp_find_drv_by_handle(u32 prop_id)
list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]\n", count++, (int)ippdrv);
if (!list_empty(&ippdrv->cmd_list)) {
list_for_each_entry(c_node, &ippdrv->cmd_list, list)
if (c_node->property.prop_id == prop_id)
return ippdrv;
mutex_lock(&ippdrv->cmd_lock);
list_for_each_entry(c_node, &ippdrv->cmd_list, list) {
if (c_node->property.prop_id == prop_id) {
mutex_unlock(&ippdrv->cmd_lock);
return ippdrv;
}
}
mutex_unlock(&ippdrv->cmd_lock);
}
return ERR_PTR(-ENODEV);
@ -325,6 +330,7 @@ int exynos_drm_ipp_get_property(struct drm_device *drm_dev, void *data,
if (!prop_list->ipp_id) {
list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list)
count++;
/*
* Supports ippdrv list count for user application.
* First step user application getting ippdrv count.
@ -346,7 +352,7 @@ int exynos_drm_ipp_get_property(struct drm_device *drm_dev, void *data,
return PTR_ERR(ippdrv);
}
prop_list = ippdrv->prop_list;
*prop_list = ippdrv->prop_list;
}
return 0;
@ -386,9 +392,11 @@ static int ipp_find_and_set_property(struct drm_exynos_ipp_property *property)
* when we find this command no using prop_id.
* return property information set in this command node.
*/
mutex_lock(&ippdrv->cmd_lock);
list_for_each_entry(c_node, &ippdrv->cmd_list, list) {
if ((c_node->property.prop_id == prop_id) &&
(c_node->state == IPP_STATE_STOP)) {
mutex_unlock(&ippdrv->cmd_lock);
DRM_DEBUG_KMS("found cmd[%d]ippdrv[0x%x]\n",
property->cmd, (int)ippdrv);
@ -396,6 +404,7 @@ static int ipp_find_and_set_property(struct drm_exynos_ipp_property *property)
return 0;
}
}
mutex_unlock(&ippdrv->cmd_lock);
DRM_ERROR("failed to search property.\n");
@ -499,7 +508,7 @@ int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
c_node->start_work = ipp_create_cmd_work();
if (IS_ERR(c_node->start_work)) {
DRM_ERROR("failed to create start work.\n");
goto err_clear;
goto err_remove_id;
}
c_node->stop_work = ipp_create_cmd_work();
@ -514,7 +523,7 @@ int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
goto err_free_stop;
}
mutex_init(&c_node->cmd_lock);
mutex_init(&c_node->lock);
mutex_init(&c_node->mem_lock);
mutex_init(&c_node->event_lock);
@ -526,7 +535,9 @@ int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
INIT_LIST_HEAD(&c_node->event_list);
list_splice_init(&priv->event_list, &c_node->event_list);
mutex_lock(&ippdrv->cmd_lock);
list_add_tail(&c_node->list, &ippdrv->cmd_list);
mutex_unlock(&ippdrv->cmd_lock);
/* make dedicated state without m2m */
if (!ipp_is_m2m_cmd(property->cmd))
@ -538,18 +549,24 @@ err_free_stop:
kfree(c_node->stop_work);
err_free_start:
kfree(c_node->start_work);
err_remove_id:
ipp_remove_id(&ctx->prop_idr, &ctx->prop_lock, property->prop_id);
err_clear:
kfree(c_node);
return ret;
}
static void ipp_clean_cmd_node(struct drm_exynos_ipp_cmd_node *c_node)
static void ipp_clean_cmd_node(struct ipp_context *ctx,
struct drm_exynos_ipp_cmd_node *c_node)
{
/* delete list */
list_del(&c_node->list);
ipp_remove_id(&ctx->prop_idr, &ctx->prop_lock,
c_node->property.prop_id);
/* destroy mutex */
mutex_destroy(&c_node->cmd_lock);
mutex_destroy(&c_node->lock);
mutex_destroy(&c_node->mem_lock);
mutex_destroy(&c_node->event_lock);
@ -567,17 +584,10 @@ static int ipp_check_mem_list(struct drm_exynos_ipp_cmd_node *c_node)
struct list_head *head;
int ret, i, count[EXYNOS_DRM_OPS_MAX] = { 0, };
mutex_lock(&c_node->mem_lock);
for_each_ipp_ops(i) {
/* source/destination memory list */
head = &c_node->mem_list[i];
if (list_empty(head)) {
DRM_DEBUG_KMS("%s memory empty.\n", i ? "dst" : "src");
continue;
}
/* find memory node entry */
list_for_each_entry(m_node, head, list) {
DRM_DEBUG_KMS("%s,count[%d]m_node[0x%x]\n",
@ -602,8 +612,6 @@ static int ipp_check_mem_list(struct drm_exynos_ipp_cmd_node *c_node)
ret = max(count[EXYNOS_DRM_OPS_SRC],
count[EXYNOS_DRM_OPS_DST]);
mutex_unlock(&c_node->mem_lock);
return ret;
}
@ -646,16 +654,13 @@ static int ipp_set_mem_node(struct exynos_drm_ippdrv *ippdrv,
return -EFAULT;
}
mutex_lock(&c_node->mem_lock);
DRM_DEBUG_KMS("ops_id[%d]\n", m_node->ops_id);
/* get operations callback */
ops = ippdrv->ops[m_node->ops_id];
if (!ops) {
DRM_ERROR("not support ops.\n");
ret = -EFAULT;
goto err_unlock;
return -EFAULT;
}
/* set address and enable irq */
@ -664,12 +669,10 @@ static int ipp_set_mem_node(struct exynos_drm_ippdrv *ippdrv,
m_node->buf_id, IPP_BUF_ENQUEUE);
if (ret) {
DRM_ERROR("failed to set addr.\n");
goto err_unlock;
return ret;
}
}
err_unlock:
mutex_unlock(&c_node->mem_lock);
return ret;
}
@ -684,11 +687,9 @@ static struct drm_exynos_ipp_mem_node
void *addr;
int i;
mutex_lock(&c_node->mem_lock);
m_node = kzalloc(sizeof(*m_node), GFP_KERNEL);
if (!m_node)
goto err_unlock;
return ERR_PTR(-ENOMEM);
/* clear base address for error handling */
memset(&buf_info, 0x0, sizeof(buf_info));
@ -722,15 +723,14 @@ static struct drm_exynos_ipp_mem_node
m_node->filp = file;
m_node->buf_info = buf_info;
mutex_lock(&c_node->mem_lock);
list_add_tail(&m_node->list, &c_node->mem_list[qbuf->ops_id]);
mutex_unlock(&c_node->mem_lock);
return m_node;
err_clear:
kfree(m_node);
err_unlock:
mutex_unlock(&c_node->mem_lock);
return ERR_PTR(-EFAULT);
}
@ -747,13 +747,6 @@ static int ipp_put_mem_node(struct drm_device *drm_dev,
return -EFAULT;
}
if (list_empty(&m_node->list)) {
DRM_ERROR("empty memory node.\n");
return -ENOMEM;
}
mutex_lock(&c_node->mem_lock);
DRM_DEBUG_KMS("ops_id[%d]\n", m_node->ops_id);
/* put gem buffer */
@ -768,8 +761,6 @@ static int ipp_put_mem_node(struct drm_device *drm_dev,
list_del(&m_node->list);
kfree(m_node);
mutex_unlock(&c_node->mem_lock);
return 0;
}
@ -805,7 +796,9 @@ static int ipp_get_event(struct drm_device *drm_dev,
e->base.event = &e->event.base;
e->base.file_priv = file;
e->base.destroy = ipp_free_event;
mutex_lock(&c_node->event_lock);
list_add_tail(&e->base.link, &c_node->event_list);
mutex_unlock(&c_node->event_lock);
return 0;
}
@ -816,11 +809,7 @@ static void ipp_put_event(struct drm_exynos_ipp_cmd_node *c_node,
struct drm_exynos_ipp_send_event *e, *te;
int count = 0;
if (list_empty(&c_node->event_list)) {
DRM_DEBUG_KMS("event_list is empty.\n");
return;
}
mutex_lock(&c_node->event_lock);
list_for_each_entry_safe(e, te, &c_node->event_list, base.link) {
DRM_DEBUG_KMS("count[%d]e[0x%x]\n", count++, (int)e);
@ -841,9 +830,13 @@ static void ipp_put_event(struct drm_exynos_ipp_cmd_node *c_node,
/* delete list */
list_del(&e->base.link);
kfree(e);
return;
goto out_unlock;
}
}
out_unlock:
mutex_unlock(&c_node->event_lock);
return;
}
static void ipp_handle_cmd_work(struct device *dev,
@ -887,7 +880,9 @@ static int ipp_queue_buf_with_run(struct device *dev,
return 0;
}
mutex_lock(&c_node->mem_lock);
if (!ipp_check_mem_list(c_node)) {
mutex_unlock(&c_node->mem_lock);
DRM_DEBUG_KMS("empty memory.\n");
return 0;
}
@ -904,10 +899,12 @@ static int ipp_queue_buf_with_run(struct device *dev,
} else {
ret = ipp_set_mem_node(ippdrv, c_node, m_node);
if (ret) {
mutex_unlock(&c_node->mem_lock);
DRM_ERROR("failed to set m node.\n");
return ret;
}
}
mutex_unlock(&c_node->mem_lock);
return 0;
}
@ -918,15 +915,15 @@ static void ipp_clean_queue_buf(struct drm_device *drm_dev,
{
struct drm_exynos_ipp_mem_node *m_node, *tm_node;
if (!list_empty(&c_node->mem_list[qbuf->ops_id])) {
/* delete list */
list_for_each_entry_safe(m_node, tm_node,
&c_node->mem_list[qbuf->ops_id], list) {
if (m_node->buf_id == qbuf->buf_id &&
m_node->ops_id == qbuf->ops_id)
ipp_put_mem_node(drm_dev, c_node, m_node);
}
/* delete list */
mutex_lock(&c_node->mem_lock);
list_for_each_entry_safe(m_node, tm_node,
&c_node->mem_list[qbuf->ops_id], list) {
if (m_node->buf_id == qbuf->buf_id &&
m_node->ops_id == qbuf->ops_id)
ipp_put_mem_node(drm_dev, c_node, m_node);
}
mutex_unlock(&c_node->mem_lock);
}
int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev, void *data,
@ -998,7 +995,7 @@ int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev, void *data,
}
break;
case IPP_BUF_DEQUEUE:
mutex_lock(&c_node->cmd_lock);
mutex_lock(&c_node->lock);
/* put event for destination buffer */
if (qbuf->ops_id == EXYNOS_DRM_OPS_DST)
@ -1006,7 +1003,7 @@ int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev, void *data,
ipp_clean_queue_buf(drm_dev, c_node, qbuf);
mutex_unlock(&c_node->cmd_lock);
mutex_unlock(&c_node->lock);
break;
default:
DRM_ERROR("invalid buffer control.\n");
@ -1109,12 +1106,12 @@ int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev, void *data,
case IPP_CTRL_PLAY:
if (pm_runtime_suspended(ippdrv->dev))
pm_runtime_get_sync(ippdrv->dev);
c_node->state = IPP_STATE_START;
cmd_work = c_node->start_work;
cmd_work->ctrl = cmd_ctrl->ctrl;
ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
c_node->state = IPP_STATE_START;
break;
case IPP_CTRL_STOP:
cmd_work = c_node->stop_work;
@ -1129,10 +1126,12 @@ int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev, void *data,
c_node->state = IPP_STATE_STOP;
ippdrv->dedicated = false;
ipp_clean_cmd_node(c_node);
mutex_lock(&ippdrv->cmd_lock);
ipp_clean_cmd_node(ctx, c_node);
if (list_empty(&ippdrv->cmd_list))
pm_runtime_put_sync(ippdrv->dev);
mutex_unlock(&ippdrv->cmd_lock);
break;
case IPP_CTRL_PAUSE:
cmd_work = c_node->stop_work;
@ -1260,9 +1259,11 @@ static int ipp_start_property(struct exynos_drm_ippdrv *ippdrv,
/* store command info in ippdrv */
ippdrv->c_node = c_node;
mutex_lock(&c_node->mem_lock);
if (!ipp_check_mem_list(c_node)) {
DRM_DEBUG_KMS("empty memory.\n");
return -ENOMEM;
ret = -ENOMEM;
goto err_unlock;
}
/* set current property in ippdrv */
@ -1270,7 +1271,7 @@ static int ipp_start_property(struct exynos_drm_ippdrv *ippdrv,
if (ret) {
DRM_ERROR("failed to set property.\n");
ippdrv->c_node = NULL;
return ret;
goto err_unlock;
}
/* check command */
@ -1285,7 +1286,7 @@ static int ipp_start_property(struct exynos_drm_ippdrv *ippdrv,
if (!m_node) {
DRM_ERROR("failed to get node.\n");
ret = -EFAULT;
return ret;
goto err_unlock;
}
DRM_DEBUG_KMS("m_node[0x%x]\n", (int)m_node);
@ -1293,7 +1294,7 @@ static int ipp_start_property(struct exynos_drm_ippdrv *ippdrv,
ret = ipp_set_mem_node(ippdrv, c_node, m_node);
if (ret) {
DRM_ERROR("failed to set m node.\n");
return ret;
goto err_unlock;
}
}
break;
@ -1305,7 +1306,7 @@ static int ipp_start_property(struct exynos_drm_ippdrv *ippdrv,
ret = ipp_set_mem_node(ippdrv, c_node, m_node);
if (ret) {
DRM_ERROR("failed to set m node.\n");
return ret;
goto err_unlock;
}
}
break;
@ -1317,14 +1318,16 @@ static int ipp_start_property(struct exynos_drm_ippdrv *ippdrv,
ret = ipp_set_mem_node(ippdrv, c_node, m_node);
if (ret) {
DRM_ERROR("failed to set m node.\n");
return ret;
goto err_unlock;
}
}
break;
default:
DRM_ERROR("invalid operations.\n");
return -EINVAL;
ret = -EINVAL;
goto err_unlock;
}
mutex_unlock(&c_node->mem_lock);
DRM_DEBUG_KMS("cmd[%d]\n", property->cmd);
@ -1333,11 +1336,17 @@ static int ipp_start_property(struct exynos_drm_ippdrv *ippdrv,
ret = ippdrv->start(ippdrv->dev, property->cmd);
if (ret) {
DRM_ERROR("failed to start ops.\n");
ippdrv->c_node = NULL;
return ret;
}
}
return 0;
err_unlock:
mutex_unlock(&c_node->mem_lock);
ippdrv->c_node = NULL;
return ret;
}
static int ipp_stop_property(struct drm_device *drm_dev,
@ -1354,6 +1363,8 @@ static int ipp_stop_property(struct drm_device *drm_dev,
/* put event */
ipp_put_event(c_node, NULL);
mutex_lock(&c_node->mem_lock);
/* check command */
switch (property->cmd) {
case IPP_CMD_M2M:
@ -1361,11 +1372,6 @@ static int ipp_stop_property(struct drm_device *drm_dev,
/* source/destination memory list */
head = &c_node->mem_list[i];
if (list_empty(head)) {
DRM_DEBUG_KMS("mem_list is empty.\n");
break;
}
list_for_each_entry_safe(m_node, tm_node,
head, list) {
ret = ipp_put_mem_node(drm_dev, c_node,
@ -1381,11 +1387,6 @@ static int ipp_stop_property(struct drm_device *drm_dev,
/* destination memory list */
head = &c_node->mem_list[EXYNOS_DRM_OPS_DST];
if (list_empty(head)) {
DRM_DEBUG_KMS("mem_list is empty.\n");
break;
}
list_for_each_entry_safe(m_node, tm_node, head, list) {
ret = ipp_put_mem_node(drm_dev, c_node, m_node);
if (ret) {
@ -1398,11 +1399,6 @@ static int ipp_stop_property(struct drm_device *drm_dev,
/* source memory list */
head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
if (list_empty(head)) {
DRM_DEBUG_KMS("mem_list is empty.\n");
break;
}
list_for_each_entry_safe(m_node, tm_node, head, list) {
ret = ipp_put_mem_node(drm_dev, c_node, m_node);
if (ret) {
@ -1418,6 +1414,8 @@ static int ipp_stop_property(struct drm_device *drm_dev,
}
err_clear:
mutex_unlock(&c_node->mem_lock);
/* stop operations */
if (ippdrv->stop)
ippdrv->stop(ippdrv->dev, property->cmd);
@ -1446,7 +1444,7 @@ void ipp_sched_cmd(struct work_struct *work)
return;
}
mutex_lock(&c_node->cmd_lock);
mutex_lock(&c_node->lock);
property = &c_node->property;
@ -1494,7 +1492,7 @@ void ipp_sched_cmd(struct work_struct *work)
DRM_DEBUG_KMS("ctrl[%d] done.\n", cmd_work->ctrl);
err_unlock:
mutex_unlock(&c_node->cmd_lock);
mutex_unlock(&c_node->lock);
}
static int ipp_send_event(struct exynos_drm_ippdrv *ippdrv,
@ -1524,14 +1522,18 @@ static int ipp_send_event(struct exynos_drm_ippdrv *ippdrv,
return -EINVAL;
}
mutex_lock(&c_node->event_lock);
if (list_empty(&c_node->event_list)) {
DRM_DEBUG_KMS("event list is empty.\n");
return 0;
ret = 0;
goto err_event_unlock;
}
mutex_lock(&c_node->mem_lock);
if (!ipp_check_mem_list(c_node)) {
DRM_DEBUG_KMS("empty memory.\n");
return 0;
ret = 0;
goto err_mem_unlock;
}
/* check command */
@ -1545,7 +1547,8 @@ static int ipp_send_event(struct exynos_drm_ippdrv *ippdrv,
struct drm_exynos_ipp_mem_node, list);
if (!m_node) {
DRM_ERROR("empty memory node.\n");
return -ENOMEM;
ret = -ENOMEM;
goto err_mem_unlock;
}
tbuf_id[i] = m_node->buf_id;
@ -1567,7 +1570,8 @@ static int ipp_send_event(struct exynos_drm_ippdrv *ippdrv,
m_node = ipp_find_mem_node(c_node, &qbuf);
if (!m_node) {
DRM_ERROR("empty memory node.\n");
return -ENOMEM;
ret = -ENOMEM;
goto err_mem_unlock;
}
tbuf_id[EXYNOS_DRM_OPS_DST] = m_node->buf_id;
@ -1584,7 +1588,8 @@ static int ipp_send_event(struct exynos_drm_ippdrv *ippdrv,
struct drm_exynos_ipp_mem_node, list);
if (!m_node) {
DRM_ERROR("empty memory node.\n");
return -ENOMEM;
ret = -ENOMEM;
goto err_mem_unlock;
}
tbuf_id[EXYNOS_DRM_OPS_SRC] = m_node->buf_id;
@ -1595,8 +1600,10 @@ static int ipp_send_event(struct exynos_drm_ippdrv *ippdrv,
break;
default:
DRM_ERROR("invalid operations.\n");
return -EINVAL;
ret = -EINVAL;
goto err_mem_unlock;
}
mutex_unlock(&c_node->mem_lock);
if (tbuf_id[EXYNOS_DRM_OPS_DST] != buf_id[EXYNOS_DRM_OPS_DST])
DRM_ERROR("failed to match buf_id[%d %d]prop_id[%d]\n",
@ -1611,11 +1618,6 @@ static int ipp_send_event(struct exynos_drm_ippdrv *ippdrv,
e = list_first_entry(&c_node->event_list,
struct drm_exynos_ipp_send_event, base.link);
if (!e) {
DRM_ERROR("empty event.\n");
return -EINVAL;
}
do_gettimeofday(&now);
DRM_DEBUG_KMS("tv_sec[%ld]tv_usec[%ld]\n", now.tv_sec, now.tv_usec);
e->event.tv_sec = now.tv_sec;
@ -1630,11 +1632,18 @@ static int ipp_send_event(struct exynos_drm_ippdrv *ippdrv,
list_move_tail(&e->base.link, &e->base.file_priv->event_list);
wake_up_interruptible(&e->base.file_priv->event_wait);
spin_unlock_irqrestore(&drm_dev->event_lock, flags);
mutex_unlock(&c_node->event_lock);
DRM_DEBUG_KMS("done cmd[%d]prop_id[%d]buf_id[%d]\n",
property->cmd, property->prop_id, tbuf_id[EXYNOS_DRM_OPS_DST]);
return 0;
err_mem_unlock:
mutex_unlock(&c_node->mem_lock);
err_event_unlock:
mutex_unlock(&c_node->event_lock);
return ret;
}
void ipp_sched_event(struct work_struct *work)
@ -1676,8 +1685,6 @@ void ipp_sched_event(struct work_struct *work)
goto err_completion;
}
mutex_lock(&c_node->event_lock);
ret = ipp_send_event(ippdrv, c_node, event_work->buf_id);
if (ret) {
DRM_ERROR("failed to send event.\n");
@ -1687,8 +1694,6 @@ void ipp_sched_event(struct work_struct *work)
err_completion:
if (ipp_is_m2m_cmd(c_node->property.cmd))
complete(&c_node->start_complete);
mutex_unlock(&c_node->event_lock);
}
static int ipp_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
@ -1699,23 +1704,21 @@ static int ipp_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
/* get ipp driver entry */
list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
u32 ipp_id;
ippdrv->drm_dev = drm_dev;
ret = ipp_create_id(&ctx->ipp_idr, &ctx->ipp_lock, ippdrv,
&ippdrv->ipp_id);
if (ret) {
&ipp_id);
if (ret || ipp_id == 0) {
DRM_ERROR("failed to create id.\n");
goto err_idr;
goto err;
}
DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]ipp_id[%d]\n",
count++, (int)ippdrv, ippdrv->ipp_id);
count++, (int)ippdrv, ipp_id);
if (ippdrv->ipp_id == 0) {
DRM_ERROR("failed to get ipp_id[%d]\n",
ippdrv->ipp_id);
goto err_idr;
}
ippdrv->prop_list.ipp_id = ipp_id;
/* store parent device for node */
ippdrv->parent_dev = dev;
@ -1724,39 +1727,46 @@ static int ipp_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
ippdrv->event_workq = ctx->event_workq;
ippdrv->sched_event = ipp_sched_event;
INIT_LIST_HEAD(&ippdrv->cmd_list);
mutex_init(&ippdrv->cmd_lock);
if (is_drm_iommu_supported(drm_dev)) {
ret = drm_iommu_attach_device(drm_dev, ippdrv->dev);
if (ret) {
DRM_ERROR("failed to activate iommu\n");
goto err_iommu;
goto err;
}
}
}
return 0;
err_iommu:
err:
/* get ipp driver entry */
list_for_each_entry_reverse(ippdrv, &exynos_drm_ippdrv_list, drv_list)
list_for_each_entry_continue_reverse(ippdrv, &exynos_drm_ippdrv_list,
drv_list) {
if (is_drm_iommu_supported(drm_dev))
drm_iommu_detach_device(drm_dev, ippdrv->dev);
err_idr:
idr_destroy(&ctx->ipp_idr);
idr_destroy(&ctx->prop_idr);
ipp_remove_id(&ctx->ipp_idr, &ctx->ipp_lock,
ippdrv->prop_list.ipp_id);
}
return ret;
}
static void ipp_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
{
struct exynos_drm_ippdrv *ippdrv;
struct ipp_context *ctx = get_ipp_context(dev);
/* get ipp driver entry */
list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
if (is_drm_iommu_supported(drm_dev))
drm_iommu_detach_device(drm_dev, ippdrv->dev);
ipp_remove_id(&ctx->ipp_idr, &ctx->ipp_lock,
ippdrv->prop_list.ipp_id);
ippdrv->drm_dev = NULL;
exynos_drm_ippdrv_unregister(ippdrv);
}
@ -1787,20 +1797,14 @@ static void ipp_subdrv_close(struct drm_device *drm_dev, struct device *dev,
struct drm_exynos_file_private *file_priv = file->driver_priv;
struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
struct exynos_drm_ippdrv *ippdrv = NULL;
struct ipp_context *ctx = get_ipp_context(dev);
struct drm_exynos_ipp_cmd_node *c_node, *tc_node;
int count = 0;
DRM_DEBUG_KMS("for priv[0x%x]\n", (int)priv);
if (list_empty(&exynos_drm_ippdrv_list)) {
DRM_DEBUG_KMS("ippdrv_list is empty.\n");
goto err_clear;
}
list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
if (list_empty(&ippdrv->cmd_list))
continue;
mutex_lock(&ippdrv->cmd_lock);
list_for_each_entry_safe(c_node, tc_node,
&ippdrv->cmd_list, list) {
DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]\n",
@ -1820,14 +1824,14 @@ static void ipp_subdrv_close(struct drm_device *drm_dev, struct device *dev,
}
ippdrv->dedicated = false;
ipp_clean_cmd_node(c_node);
ipp_clean_cmd_node(ctx, c_node);
if (list_empty(&ippdrv->cmd_list))
pm_runtime_put_sync(ippdrv->dev);
}
}
mutex_unlock(&ippdrv->cmd_lock);
}
err_clear:
kfree(priv);
return;
}

View File

@ -52,7 +52,7 @@ struct drm_exynos_ipp_cmd_work {
* @list: list head to command queue information.
* @event_list: list head of event.
* @mem_list: list head to source,destination memory queue information.
* @cmd_lock: lock for synchronization of access to ioctl.
* @lock: lock for synchronization of access to ioctl.
* @mem_lock: lock for synchronization of access to memory nodes.
* @event_lock: lock for synchronization of access to scheduled event.
* @start_complete: completion of start of command.
@ -68,7 +68,7 @@ struct drm_exynos_ipp_cmd_node {
struct list_head list;
struct list_head event_list;
struct list_head mem_list[EXYNOS_DRM_OPS_MAX];
struct mutex cmd_lock;
struct mutex lock;
struct mutex mem_lock;
struct mutex event_lock;
struct completion start_complete;
@ -83,7 +83,7 @@ struct drm_exynos_ipp_cmd_node {
/*
* A structure of buffer information.
*
* @gem_objs: Y, Cb, Cr each gem object.
* @handles: Y, Cb, Cr each gem object handle.
* @base: Y, Cb, Cr each planar address.
*/
struct drm_exynos_ipp_buf_info {
@ -142,12 +142,12 @@ struct exynos_drm_ipp_ops {
* @parent_dev: parent device information.
* @dev: platform device.
* @drm_dev: drm device.
* @ipp_id: id of ipp driver.
* @dedicated: dedicated ipp device.
* @ops: source, destination operations.
* @event_workq: event work queue.
* @c_node: current command information.
* @cmd_list: list head for command information.
* @cmd_lock: lock for synchronization of access to cmd_list.
* @prop_list: property informations of current ipp driver.
* @check_property: check property about format, size, buffer.
* @reset: reset ipp block.
@ -160,13 +160,13 @@ struct exynos_drm_ippdrv {
struct device *parent_dev;
struct device *dev;
struct drm_device *drm_dev;
u32 ipp_id;
bool dedicated;
struct exynos_drm_ipp_ops *ops[EXYNOS_DRM_OPS_MAX];
struct workqueue_struct *event_workq;
struct drm_exynos_ipp_cmd_node *c_node;
struct list_head cmd_list;
struct drm_exynos_ipp_prop_list *prop_list;
struct mutex cmd_lock;
struct drm_exynos_ipp_prop_list prop_list;
int (*check_property)(struct device *dev,
struct drm_exynos_ipp_property *property);

View File

@ -158,8 +158,9 @@ static irqreturn_t rotator_irq_handler(int irq, void *arg)
rot->cur_buf_id[EXYNOS_DRM_OPS_DST];
queue_work(ippdrv->event_workq,
(struct work_struct *)event_work);
} else
} else {
DRM_ERROR("the SFR is set illegally\n");
}
return IRQ_HANDLED;
}
@ -469,11 +470,7 @@ static struct exynos_drm_ipp_ops rot_dst_ops = {
static int rotator_init_prop_list(struct exynos_drm_ippdrv *ippdrv)
{
struct drm_exynos_ipp_prop_list *prop_list;
prop_list = devm_kzalloc(ippdrv->dev, sizeof(*prop_list), GFP_KERNEL);
if (!prop_list)
return -ENOMEM;
struct drm_exynos_ipp_prop_list *prop_list = &ippdrv->prop_list;
prop_list->version = 1;
prop_list->flip = (1 << EXYNOS_DRM_FLIP_VERTICAL) |
@ -486,8 +483,6 @@ static int rotator_init_prop_list(struct exynos_drm_ippdrv *ippdrv)
prop_list->crop = 0;
prop_list->scale = 0;
ippdrv->prop_list = prop_list;
return 0;
}

View File

@ -51,6 +51,7 @@ struct vidi_context {
struct drm_crtc *crtc;
struct drm_encoder *encoder;
struct drm_connector connector;
struct exynos_drm_subdrv subdrv;
struct vidi_win_data win_data[WINDOWS_NR];
struct edid *raw_edid;
unsigned int clkdiv;
@ -294,14 +295,13 @@ static void vidi_dpms(struct exynos_drm_manager *mgr, int mode)
}
static int vidi_mgr_initialize(struct exynos_drm_manager *mgr,
struct drm_device *drm_dev, int pipe)
struct drm_device *drm_dev)
{
struct vidi_context *ctx = mgr->ctx;
struct exynos_drm_private *priv = drm_dev->dev_private;
DRM_ERROR("vidi initialize ct=%p dev=%p pipe=%d\n", ctx, drm_dev, pipe);
ctx->drm_dev = drm_dev;
ctx->pipe = pipe;
mgr->drm_dev = ctx->drm_dev = drm_dev;
mgr->pipe = ctx->pipe = priv->pipe++;
/*
* enable drm irq mode.
@ -324,7 +324,6 @@ static int vidi_mgr_initialize(struct exynos_drm_manager *mgr,
}
static struct exynos_drm_manager_ops vidi_manager_ops = {
.initialize = vidi_mgr_initialize,
.dpms = vidi_dpms,
.commit = vidi_commit,
.enable_vblank = vidi_enable_vblank,
@ -533,12 +532,6 @@ static int vidi_get_modes(struct drm_connector *connector)
return drm_add_edid_modes(connector, edid);
}
static int vidi_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
return MODE_OK;
}
static struct drm_encoder *vidi_best_encoder(struct drm_connector *connector)
{
struct vidi_context *ctx = ctx_from_connector(connector);
@ -548,7 +541,6 @@ static struct drm_encoder *vidi_best_encoder(struct drm_connector *connector)
static struct drm_connector_helper_funcs vidi_connector_helper_funcs = {
.get_modes = vidi_get_modes,
.mode_valid = vidi_mode_valid,
.best_encoder = vidi_best_encoder,
};
@ -586,13 +578,38 @@ static struct exynos_drm_display vidi_display = {
.ops = &vidi_display_ops,
};
static int vidi_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
{
struct exynos_drm_manager *mgr = get_vidi_mgr(dev);
struct vidi_context *ctx = mgr->ctx;
struct drm_crtc *crtc = ctx->crtc;
int ret;
vidi_mgr_initialize(mgr, drm_dev);
ret = exynos_drm_crtc_create(&vidi_manager);
if (ret) {
DRM_ERROR("failed to create crtc.\n");
return ret;
}
ret = exynos_drm_create_enc_conn(drm_dev, &vidi_display);
if (ret) {
crtc->funcs->destroy(crtc);
DRM_ERROR("failed to create encoder and connector.\n");
return ret;
}
return 0;
}
static int vidi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct exynos_drm_subdrv *subdrv;
struct vidi_context *ctx;
int ret;
ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
@ -607,28 +624,43 @@ static int vidi_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, &vidi_manager);
ret = device_create_file(dev, &dev_attr_connection);
if (ret < 0)
DRM_INFO("failed to create connection sysfs.\n");
subdrv = &ctx->subdrv;
subdrv->dev = &pdev->dev;
subdrv->probe = vidi_subdrv_probe;
exynos_drm_manager_register(&vidi_manager);
exynos_drm_display_register(&vidi_display);
ret = exynos_drm_subdrv_register(subdrv);
if (ret < 0) {
dev_err(&pdev->dev, "failed to register drm vidi device\n");
return ret;
}
ret = device_create_file(&pdev->dev, &dev_attr_connection);
if (ret < 0) {
exynos_drm_subdrv_unregister(subdrv);
DRM_INFO("failed to create connection sysfs.\n");
}
return 0;
}
static int vidi_remove(struct platform_device *pdev)
{
struct vidi_context *ctx = platform_get_drvdata(pdev);
exynos_drm_display_unregister(&vidi_display);
exynos_drm_manager_unregister(&vidi_manager);
struct exynos_drm_manager *mgr = platform_get_drvdata(pdev);
struct vidi_context *ctx = mgr->ctx;
struct drm_encoder *encoder = ctx->encoder;
struct drm_crtc *crtc = mgr->crtc;
if (ctx->raw_edid != (struct edid *)fake_edid_info) {
kfree(ctx->raw_edid);
ctx->raw_edid = NULL;
return -EINVAL;
}
crtc->funcs->destroy(crtc);
encoder->funcs->destroy(encoder);
drm_connector_cleanup(&ctx->connector);
return 0;
}
@ -640,3 +672,31 @@ struct platform_driver vidi_driver = {
.owner = THIS_MODULE,
},
};
int exynos_drm_probe_vidi(void)
{
struct platform_device *pdev;
int ret;
pdev = platform_device_register_simple("exynos-drm-vidi", -1, NULL, 0);
if (IS_ERR(pdev))
return PTR_ERR(pdev);
ret = platform_driver_register(&vidi_driver);
if (ret) {
platform_device_unregister(pdev);
return ret;
}
return ret;
}
void exynos_drm_remove_vidi(void)
{
struct vidi_context *ctx = vidi_manager.ctx;
struct exynos_drm_subdrv *subdrv = &ctx->subdrv;
struct platform_device *pdev = to_platform_device(subdrv->dev);
platform_driver_unregister(&vidi_driver);
platform_device_unregister(pdev);
}

File diff suppressed because it is too large Load Diff

View File

@ -1,23 +0,0 @@
/*
*
* Copyright (c) 2011 Samsung Electronics Co., Ltd.
* Authors:
* Inki Dae <inki.dae@samsung.com>
* Seung-Woo Kim <sw0312.kim@samsung.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#ifndef _EXYNOS_HDMI_H_
#define _EXYNOS_HDMI_H_
void hdmi_attach_ddc_client(struct i2c_client *ddc);
void hdmi_attach_hdmiphy_client(struct i2c_client *hdmiphy);
extern struct i2c_driver hdmiphy_driver;
extern struct i2c_driver ddc_driver;
#endif

View File

@ -1,65 +0,0 @@
/*
* Copyright (C) 2011 Samsung Electronics Co.Ltd
* Authors:
* Seung-Woo Kim <sw0312.kim@samsung.com>
* Inki Dae <inki.dae@samsung.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
*/
#include <drm/drmP.h>
#include <linux/kernel.h>
#include <linux/i2c.h>
#include <linux/of.h>
#include "exynos_drm_drv.h"
#include "exynos_hdmi.h"
static int hdmiphy_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
hdmi_attach_hdmiphy_client(client);
dev_info(&client->adapter->dev, "attached s5p_hdmiphy "
"into i2c adapter successfully\n");
return 0;
}
static int hdmiphy_remove(struct i2c_client *client)
{
dev_info(&client->adapter->dev, "detached s5p_hdmiphy "
"from i2c adapter successfully\n");
return 0;
}
static struct of_device_id hdmiphy_match_types[] = {
{
.compatible = "samsung,exynos5-hdmiphy",
}, {
.compatible = "samsung,exynos4210-hdmiphy",
}, {
.compatible = "samsung,exynos4212-hdmiphy",
}, {
/* end node */
}
};
struct i2c_driver hdmiphy_driver = {
.driver = {
.name = "exynos-hdmiphy",
.owner = THIS_MODULE,
.of_match_table = hdmiphy_match_types,
},
.probe = hdmiphy_probe,
.remove = hdmiphy_remove,
.command = NULL,
};
EXPORT_SYMBOL(hdmiphy_driver);

View File

@ -31,6 +31,7 @@
#include <linux/clk.h>
#include <linux/regulator/consumer.h>
#include <linux/of.h>
#include <linux/component.h>
#include <drm/exynos_drm.h>
@ -830,13 +831,15 @@ static int vp_resources_init(struct mixer_context *mixer_ctx)
}
static int mixer_initialize(struct exynos_drm_manager *mgr,
struct drm_device *drm_dev, int pipe)
struct drm_device *drm_dev)
{
int ret;
struct mixer_context *mixer_ctx = mgr->ctx;
struct exynos_drm_private *priv;
priv = drm_dev->dev_private;
mixer_ctx->drm_dev = drm_dev;
mixer_ctx->pipe = pipe;
mgr->drm_dev = mixer_ctx->drm_dev = drm_dev;
mgr->pipe = mixer_ctx->pipe = priv->pipe++;
/* acquire resources: regs, irqs, clocks */
ret = mixer_resources_init(mixer_ctx);
@ -1142,8 +1145,6 @@ int mixer_check_mode(struct drm_display_mode *mode)
}
static struct exynos_drm_manager_ops mixer_manager_ops = {
.initialize = mixer_initialize,
.remove = mixer_mgr_remove,
.dpms = mixer_dpms,
.enable_vblank = mixer_enable_vblank,
.disable_vblank = mixer_disable_vblank,
@ -1200,11 +1201,13 @@ static struct of_device_id mixer_match_types[] = {
}
};
static int mixer_probe(struct platform_device *pdev)
static int mixer_bind(struct device *dev, struct device *manager, void *data)
{
struct device *dev = &pdev->dev;
struct platform_device *pdev = to_platform_device(dev);
struct drm_device *drm_dev = data;
struct mixer_context *ctx;
struct mixer_drv_data *drv;
int ret;
dev_info(dev, "probe start\n");
@ -1233,19 +1236,61 @@ static int mixer_probe(struct platform_device *pdev)
atomic_set(&ctx->wait_vsync_event, 0);
mixer_manager.ctx = ctx;
ret = mixer_initialize(&mixer_manager, drm_dev);
if (ret)
return ret;
platform_set_drvdata(pdev, &mixer_manager);
exynos_drm_manager_register(&mixer_manager);
ret = exynos_drm_crtc_create(&mixer_manager);
if (ret) {
mixer_mgr_remove(&mixer_manager);
return ret;
}
pm_runtime_enable(dev);
return 0;
}
static void mixer_unbind(struct device *dev, struct device *master, void *data)
{
struct exynos_drm_manager *mgr = dev_get_drvdata(dev);
struct drm_crtc *crtc = mgr->crtc;
dev_info(dev, "remove successful\n");
mixer_mgr_remove(mgr);
pm_runtime_disable(dev);
crtc->funcs->destroy(crtc);
}
static const struct component_ops mixer_component_ops = {
.bind = mixer_bind,
.unbind = mixer_unbind,
};
static int mixer_probe(struct platform_device *pdev)
{
int ret;
ret = exynos_drm_component_add(&pdev->dev, EXYNOS_DEVICE_TYPE_CRTC,
mixer_manager.type);
if (ret)
return ret;
ret = component_add(&pdev->dev, &mixer_component_ops);
if (ret)
exynos_drm_component_del(&pdev->dev, EXYNOS_DEVICE_TYPE_CRTC);
return ret;
}
static int mixer_remove(struct platform_device *pdev)
{
dev_info(&pdev->dev, "remove successful\n");
pm_runtime_disable(&pdev->dev);
component_del(&pdev->dev, &mixer_component_ops);
exynos_drm_component_del(&pdev->dev, EXYNOS_DEVICE_TYPE_CRTC);
return 0;
}

View File

@ -578,4 +578,20 @@
#define HDMI_TG_VACT_ST4_H HDMI_TG_BASE(0x0074)
#define HDMI_TG_3D HDMI_TG_BASE(0x00F0)
/* HDMI PHY Registers Offsets*/
#define HDMIPHY_POWER (0x74 >> 2)
#define HDMIPHY_MODE_SET_DONE (0x7c >> 2)
/* HDMI PHY Values */
#define HDMI_PHY_POWER_ON 0x80
#define HDMI_PHY_POWER_OFF 0xff
/* HDMI PHY Values */
#define HDMI_PHY_DISABLE_MODE_SET 0x80
#define HDMI_PHY_ENABLE_MODE_SET 0x00
/* PMU Registers for PHY */
#define PMU_HDMI_PHY_CONTROL 0x700
#define PMU_HDMI_PHY_ENABLE_BIT BIT(0)
#endif /* SAMSUNG_REGS_HDMI_H */

View File

@ -148,7 +148,7 @@ static int handle_dsi_error(struct mdfld_dsi_pkg_sender *sender, u32 mask)
break;
case BIT(14):
/*wait for all fifo empty*/
/*wait_for_all_fifos_empty(sender)*/;
/*wait_for_all_fifos_empty(sender)*/
break;
case BIT(15):
dev_dbg(sender->dev->dev, "No Action required\n");

View File

@ -112,11 +112,9 @@ static void psb_driver_lastclose(struct drm_device *dev)
struct drm_psb_private *dev_priv = dev->dev_private;
struct psb_fbdev *fbdev = dev_priv->fbdev;
drm_modeset_lock_all(dev);
ret = drm_fb_helper_restore_fbdev_mode(&fbdev->psb_fb_helper);
ret = drm_fb_helper_restore_fbdev_mode_unlocked(&fbdev->psb_fb_helper);
if (ret)
DRM_DEBUG("failed to restore crtc mode\n");
drm_modeset_unlock_all(dev);
return;
}
@ -354,7 +352,7 @@ static int psb_driver_load(struct drm_device *dev, unsigned long flags)
PSB_WVDC32(0xFFFFFFFF, PSB_INT_MASK_R);
spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
drm_irq_install(dev);
drm_irq_install(dev, dev->pdev->irq);
dev->vblank_disable_allowed = true;
dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
@ -479,7 +477,7 @@ static struct drm_driver driver = {
.lastclose = psb_driver_lastclose,
.preclose = psb_driver_preclose,
.num_ioctls = DRM_ARRAY_SIZE(psb_ioctls),
.num_ioctls = ARRAY_SIZE(psb_ioctls),
.device_is_agp = psb_driver_device_is_agp,
.irq_preinstall = psb_irq_preinstall,
.irq_postinstall = psb_irq_postinstall,

View File

@ -568,11 +568,11 @@ static irqreturn_t tda998x_irq_thread(int irq, void *data)
static uint8_t tda998x_cksum(uint8_t *buf, size_t bytes)
{
uint8_t sum = 0;
int sum = 0;
while (bytes--)
sum += *buf++;
return (255 - sum) + 1;
sum -= *buf++;
return sum;
}
#define HB(x) (x)

View File

@ -1251,7 +1251,7 @@ const struct drm_ioctl_desc i810_ioctls[] = {
DRM_IOCTL_DEF_DRV(I810_FLIP, i810_flip_bufs, DRM_AUTH|DRM_UNLOCKED),
};
int i810_max_ioctl = DRM_ARRAY_SIZE(i810_ioctls);
int i810_max_ioctl = ARRAY_SIZE(i810_ioctls);
/**
* Determine if the device really is AGP or not.

View File

@ -5,6 +5,7 @@ config DRM_I915
depends on (AGP || AGP=n)
select INTEL_GTT
select AGP_INTEL if AGP
select INTERVAL_TREE
# we need shmfs for the swappable backing store, and in particular
# the shmem_readpage() which depends upon tmpfs
select SHMEM
@ -71,7 +72,7 @@ config DRM_I915_PRELIMINARY_HW_SUPPORT
config DRM_I915_UMS
bool "Enable userspace modesetting on Intel hardware (DEPRECATED)"
depends on DRM_I915
depends on DRM_I915 && BROKEN
default n
help
Choose this option if you still need userspace modesetting.

View File

@ -18,6 +18,7 @@ i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o
# GEM code
i915-y += i915_cmd_parser.o \
i915_gem_context.o \
i915_gem_render_state.o \
i915_gem_debug.o \
i915_gem_dmabuf.o \
i915_gem_evict.o \
@ -26,12 +27,18 @@ i915-y += i915_cmd_parser.o \
i915_gem.o \
i915_gem_stolen.o \
i915_gem_tiling.o \
i915_gem_userptr.o \
i915_gpu_error.o \
i915_irq.o \
i915_trace_points.o \
intel_ringbuffer.o \
intel_uncore.o
# autogenerated null render state
i915-y += intel_renderstate_gen6.o \
intel_renderstate_gen7.o \
intel_renderstate_gen8.o
# modesetting core code
i915-y += intel_bios.o \
intel_display.o \
@ -55,6 +62,7 @@ i915-y += dvo_ch7017.o \
intel_dsi_cmd.o \
intel_dsi.o \
intel_dsi_pll.o \
intel_dsi_panel_vbt.o \
intel_dvo.o \
intel_hdmi.o \
intel_i2c.o \

View File

@ -160,7 +160,7 @@ static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
if (i2c_transfer(adapter, msgs, 2) == 2) {
*ch = in_buf[0];
return true;
};
}
if (!ch7xxx->quiet) {
DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n",

View File

@ -195,7 +195,7 @@ static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data)
if (i2c_transfer(adapter, msgs, 3) == 3) {
*data = (in_buf[1] << 8) | in_buf[0];
return true;
};
}
if (!priv->quiet) {
DRM_DEBUG_KMS("Unable to read register 0x%02x from "

View File

@ -121,7 +121,7 @@ static bool ns2501_readb(struct intel_dvo_device *dvo, int addr, uint8_t * ch)
if (i2c_transfer(adapter, msgs, 2) == 2) {
*ch = in_buf[0];
return true;
};
}
if (!ns->quiet) {
DRM_DEBUG_KMS
@ -233,9 +233,8 @@ static enum drm_mode_status ns2501_mode_valid(struct intel_dvo_device *dvo,
struct drm_display_mode *mode)
{
DRM_DEBUG_KMS
("%s: is mode valid (hdisplay=%d,htotal=%d,vdisplay=%d,vtotal=%d)\n",
__FUNCTION__, mode->hdisplay, mode->htotal, mode->vdisplay,
mode->vtotal);
("is mode valid (hdisplay=%d,htotal=%d,vdisplay=%d,vtotal=%d)\n",
mode->hdisplay, mode->htotal, mode->vdisplay, mode->vtotal);
/*
* Currently, these are all the modes I have data from.
@ -261,9 +260,8 @@ static void ns2501_mode_set(struct intel_dvo_device *dvo,
struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
DRM_DEBUG_KMS
("%s: set mode (hdisplay=%d,htotal=%d,vdisplay=%d,vtotal=%d).\n",
__FUNCTION__, mode->hdisplay, mode->htotal, mode->vdisplay,
mode->vtotal);
("set mode (hdisplay=%d,htotal=%d,vdisplay=%d,vtotal=%d).\n",
mode->hdisplay, mode->htotal, mode->vdisplay, mode->vtotal);
/*
* Where do I find the native resolution for which scaling is not required???
@ -277,8 +275,7 @@ static void ns2501_mode_set(struct intel_dvo_device *dvo,
if (mode->hdisplay == 800 && mode->vdisplay == 600) {
/* mode 277 */
ns->reg_8_shadow &= ~NS2501_8_BPAS;
DRM_DEBUG_KMS("%s: switching to 800x600\n",
__FUNCTION__);
DRM_DEBUG_KMS("switching to 800x600\n");
/*
* No, I do not know where this data comes from.
@ -341,8 +338,7 @@ static void ns2501_mode_set(struct intel_dvo_device *dvo,
} else if (mode->hdisplay == 640 && mode->vdisplay == 480) {
/* mode 274 */
DRM_DEBUG_KMS("%s: switching to 640x480\n",
__FUNCTION__);
DRM_DEBUG_KMS("switching to 640x480\n");
/*
* No, I do not know where this data comes from.
* It is just what the video bios left in the DVO, so
@ -406,8 +402,7 @@ static void ns2501_mode_set(struct intel_dvo_device *dvo,
} else if (mode->hdisplay == 1024 && mode->vdisplay == 768) {
/* mode 280 */
DRM_DEBUG_KMS("%s: switching to 1024x768\n",
__FUNCTION__);
DRM_DEBUG_KMS("switching to 1024x768\n");
/*
* This might or might not work, actually. I'm silently
* assuming here that the native panel resolution is
@ -458,8 +453,7 @@ static void ns2501_dpms(struct intel_dvo_device *dvo, bool enable)
struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
unsigned char ch;
DRM_DEBUG_KMS("%s: Trying set the dpms of the DVO to %i\n",
__FUNCTION__, enable);
DRM_DEBUG_KMS("Trying set the dpms of the DVO to %i\n", enable);
ch = ns->reg_8_shadow;

View File

@ -93,7 +93,7 @@ static bool sil164_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
if (i2c_transfer(adapter, msgs, 2) == 2) {
*ch = in_buf[0];
return true;
};
}
if (!sil->quiet) {
DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n",

View File

@ -118,7 +118,7 @@ static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
if (i2c_transfer(adapter, msgs, 2) == 2) {
*ch = in_buf[0];
return true;
};
}
if (!tfp->quiet) {
DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n",

View File

@ -28,7 +28,7 @@
#include "i915_drv.h"
/**
* DOC: i915 batch buffer command parser
* DOC: batch buffer command parser
*
* Motivation:
* Certain OpenGL features (e.g. transform feedback, performance monitoring)
@ -86,6 +86,367 @@
* general bitmasking mechanism.
*/
#define STD_MI_OPCODE_MASK 0xFF800000
#define STD_3D_OPCODE_MASK 0xFFFF0000
#define STD_2D_OPCODE_MASK 0xFFC00000
#define STD_MFX_OPCODE_MASK 0xFFFF0000
#define CMD(op, opm, f, lm, fl, ...) \
{ \
.flags = (fl) | ((f) ? CMD_DESC_FIXED : 0), \
.cmd = { (op), (opm) }, \
.length = { (lm) }, \
__VA_ARGS__ \
}
/* Convenience macros to compress the tables */
#define SMI STD_MI_OPCODE_MASK
#define S3D STD_3D_OPCODE_MASK
#define S2D STD_2D_OPCODE_MASK
#define SMFX STD_MFX_OPCODE_MASK
#define F true
#define S CMD_DESC_SKIP
#define R CMD_DESC_REJECT
#define W CMD_DESC_REGISTER
#define B CMD_DESC_BITMASK
#define M CMD_DESC_MASTER
/* Command Mask Fixed Len Action
---------------------------------------------------------- */
static const struct drm_i915_cmd_descriptor common_cmds[] = {
CMD( MI_NOOP, SMI, F, 1, S ),
CMD( MI_USER_INTERRUPT, SMI, F, 1, R ),
CMD( MI_WAIT_FOR_EVENT, SMI, F, 1, M ),
CMD( MI_ARB_CHECK, SMI, F, 1, S ),
CMD( MI_REPORT_HEAD, SMI, F, 1, S ),
CMD( MI_SUSPEND_FLUSH, SMI, F, 1, S ),
CMD( MI_SEMAPHORE_MBOX, SMI, !F, 0xFF, R ),
CMD( MI_STORE_DWORD_INDEX, SMI, !F, 0xFF, R ),
CMD( MI_LOAD_REGISTER_IMM(1), SMI, !F, 0xFF, W,
.reg = { .offset = 1, .mask = 0x007FFFFC } ),
CMD( MI_STORE_REGISTER_MEM(1), SMI, !F, 0xFF, W | B,
.reg = { .offset = 1, .mask = 0x007FFFFC },
.bits = {{
.offset = 0,
.mask = MI_GLOBAL_GTT,
.expected = 0,
}}, ),
CMD( MI_LOAD_REGISTER_MEM, SMI, !F, 0xFF, W | B,
.reg = { .offset = 1, .mask = 0x007FFFFC },
.bits = {{
.offset = 0,
.mask = MI_GLOBAL_GTT,
.expected = 0,
}}, ),
CMD( MI_BATCH_BUFFER_START, SMI, !F, 0xFF, S ),
};
static const struct drm_i915_cmd_descriptor render_cmds[] = {
CMD( MI_FLUSH, SMI, F, 1, S ),
CMD( MI_ARB_ON_OFF, SMI, F, 1, R ),
CMD( MI_PREDICATE, SMI, F, 1, S ),
CMD( MI_TOPOLOGY_FILTER, SMI, F, 1, S ),
CMD( MI_DISPLAY_FLIP, SMI, !F, 0xFF, R ),
CMD( MI_SET_CONTEXT, SMI, !F, 0xFF, R ),
CMD( MI_URB_CLEAR, SMI, !F, 0xFF, S ),
CMD( MI_STORE_DWORD_IMM, SMI, !F, 0x3F, B,
.bits = {{
.offset = 0,
.mask = MI_GLOBAL_GTT,
.expected = 0,
}}, ),
CMD( MI_UPDATE_GTT, SMI, !F, 0xFF, R ),
CMD( MI_CLFLUSH, SMI, !F, 0x3FF, B,
.bits = {{
.offset = 0,
.mask = MI_GLOBAL_GTT,
.expected = 0,
}}, ),
CMD( MI_REPORT_PERF_COUNT, SMI, !F, 0x3F, B,
.bits = {{
.offset = 1,
.mask = MI_REPORT_PERF_COUNT_GGTT,
.expected = 0,
}}, ),
CMD( MI_CONDITIONAL_BATCH_BUFFER_END, SMI, !F, 0xFF, B,
.bits = {{
.offset = 0,
.mask = MI_GLOBAL_GTT,
.expected = 0,
}}, ),
CMD( GFX_OP_3DSTATE_VF_STATISTICS, S3D, F, 1, S ),
CMD( PIPELINE_SELECT, S3D, F, 1, S ),
CMD( MEDIA_VFE_STATE, S3D, !F, 0xFFFF, B,
.bits = {{
.offset = 2,
.mask = MEDIA_VFE_STATE_MMIO_ACCESS_MASK,
.expected = 0,
}}, ),
CMD( GPGPU_OBJECT, S3D, !F, 0xFF, S ),
CMD( GPGPU_WALKER, S3D, !F, 0xFF, S ),
CMD( GFX_OP_3DSTATE_SO_DECL_LIST, S3D, !F, 0x1FF, S ),
CMD( GFX_OP_PIPE_CONTROL(5), S3D, !F, 0xFF, B,
.bits = {{
.offset = 1,
.mask = (PIPE_CONTROL_MMIO_WRITE | PIPE_CONTROL_NOTIFY),
.expected = 0,
},
{
.offset = 1,
.mask = (PIPE_CONTROL_GLOBAL_GTT_IVB |
PIPE_CONTROL_STORE_DATA_INDEX),
.expected = 0,
.condition_offset = 1,
.condition_mask = PIPE_CONTROL_POST_SYNC_OP_MASK,
}}, ),
};
static const struct drm_i915_cmd_descriptor hsw_render_cmds[] = {
CMD( MI_SET_PREDICATE, SMI, F, 1, S ),
CMD( MI_RS_CONTROL, SMI, F, 1, S ),
CMD( MI_URB_ATOMIC_ALLOC, SMI, F, 1, S ),
CMD( MI_RS_CONTEXT, SMI, F, 1, S ),
CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, M ),
CMD( MI_LOAD_SCAN_LINES_EXCL, SMI, !F, 0x3F, R ),
CMD( MI_LOAD_REGISTER_REG, SMI, !F, 0xFF, R ),
CMD( MI_RS_STORE_DATA_IMM, SMI, !F, 0xFF, S ),
CMD( MI_LOAD_URB_MEM, SMI, !F, 0xFF, S ),
CMD( MI_STORE_URB_MEM, SMI, !F, 0xFF, S ),
CMD( GFX_OP_3DSTATE_DX9_CONSTANTF_VS, S3D, !F, 0x7FF, S ),
CMD( GFX_OP_3DSTATE_DX9_CONSTANTF_PS, S3D, !F, 0x7FF, S ),
CMD( GFX_OP_3DSTATE_BINDING_TABLE_EDIT_VS, S3D, !F, 0x1FF, S ),
CMD( GFX_OP_3DSTATE_BINDING_TABLE_EDIT_GS, S3D, !F, 0x1FF, S ),
CMD( GFX_OP_3DSTATE_BINDING_TABLE_EDIT_HS, S3D, !F, 0x1FF, S ),
CMD( GFX_OP_3DSTATE_BINDING_TABLE_EDIT_DS, S3D, !F, 0x1FF, S ),
CMD( GFX_OP_3DSTATE_BINDING_TABLE_EDIT_PS, S3D, !F, 0x1FF, S ),
};
static const struct drm_i915_cmd_descriptor video_cmds[] = {
CMD( MI_ARB_ON_OFF, SMI, F, 1, R ),
CMD( MI_STORE_DWORD_IMM, SMI, !F, 0xFF, B,
.bits = {{
.offset = 0,
.mask = MI_GLOBAL_GTT,
.expected = 0,
}}, ),
CMD( MI_UPDATE_GTT, SMI, !F, 0x3F, R ),
CMD( MI_FLUSH_DW, SMI, !F, 0x3F, B,
.bits = {{
.offset = 0,
.mask = MI_FLUSH_DW_NOTIFY,
.expected = 0,
},
{
.offset = 1,
.mask = MI_FLUSH_DW_USE_GTT,
.expected = 0,
.condition_offset = 0,
.condition_mask = MI_FLUSH_DW_OP_MASK,
},
{
.offset = 0,
.mask = MI_FLUSH_DW_STORE_INDEX,
.expected = 0,
.condition_offset = 0,
.condition_mask = MI_FLUSH_DW_OP_MASK,
}}, ),
CMD( MI_CONDITIONAL_BATCH_BUFFER_END, SMI, !F, 0xFF, B,
.bits = {{
.offset = 0,
.mask = MI_GLOBAL_GTT,
.expected = 0,
}}, ),
/*
* MFX_WAIT doesn't fit the way we handle length for most commands.
* It has a length field but it uses a non-standard length bias.
* It is always 1 dword though, so just treat it as fixed length.
*/
CMD( MFX_WAIT, SMFX, F, 1, S ),
};
static const struct drm_i915_cmd_descriptor vecs_cmds[] = {
CMD( MI_ARB_ON_OFF, SMI, F, 1, R ),
CMD( MI_STORE_DWORD_IMM, SMI, !F, 0xFF, B,
.bits = {{
.offset = 0,
.mask = MI_GLOBAL_GTT,
.expected = 0,
}}, ),
CMD( MI_UPDATE_GTT, SMI, !F, 0x3F, R ),
CMD( MI_FLUSH_DW, SMI, !F, 0x3F, B,
.bits = {{
.offset = 0,
.mask = MI_FLUSH_DW_NOTIFY,
.expected = 0,
},
{
.offset = 1,
.mask = MI_FLUSH_DW_USE_GTT,
.expected = 0,
.condition_offset = 0,
.condition_mask = MI_FLUSH_DW_OP_MASK,
},
{
.offset = 0,
.mask = MI_FLUSH_DW_STORE_INDEX,
.expected = 0,
.condition_offset = 0,
.condition_mask = MI_FLUSH_DW_OP_MASK,
}}, ),
CMD( MI_CONDITIONAL_BATCH_BUFFER_END, SMI, !F, 0xFF, B,
.bits = {{
.offset = 0,
.mask = MI_GLOBAL_GTT,
.expected = 0,
}}, ),
};
static const struct drm_i915_cmd_descriptor blt_cmds[] = {
CMD( MI_DISPLAY_FLIP, SMI, !F, 0xFF, R ),
CMD( MI_STORE_DWORD_IMM, SMI, !F, 0x3FF, B,
.bits = {{
.offset = 0,
.mask = MI_GLOBAL_GTT,
.expected = 0,
}}, ),
CMD( MI_UPDATE_GTT, SMI, !F, 0x3F, R ),
CMD( MI_FLUSH_DW, SMI, !F, 0x3F, B,
.bits = {{
.offset = 0,
.mask = MI_FLUSH_DW_NOTIFY,
.expected = 0,
},
{
.offset = 1,
.mask = MI_FLUSH_DW_USE_GTT,
.expected = 0,
.condition_offset = 0,
.condition_mask = MI_FLUSH_DW_OP_MASK,
},
{
.offset = 0,
.mask = MI_FLUSH_DW_STORE_INDEX,
.expected = 0,
.condition_offset = 0,
.condition_mask = MI_FLUSH_DW_OP_MASK,
}}, ),
CMD( COLOR_BLT, S2D, !F, 0x3F, S ),
CMD( SRC_COPY_BLT, S2D, !F, 0x3F, S ),
};
static const struct drm_i915_cmd_descriptor hsw_blt_cmds[] = {
CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, M ),
CMD( MI_LOAD_SCAN_LINES_EXCL, SMI, !F, 0x3F, R ),
};
#undef CMD
#undef SMI
#undef S3D
#undef S2D
#undef SMFX
#undef F
#undef S
#undef R
#undef W
#undef B
#undef M
static const struct drm_i915_cmd_table gen7_render_cmds[] = {
{ common_cmds, ARRAY_SIZE(common_cmds) },
{ render_cmds, ARRAY_SIZE(render_cmds) },
};
static const struct drm_i915_cmd_table hsw_render_ring_cmds[] = {
{ common_cmds, ARRAY_SIZE(common_cmds) },
{ render_cmds, ARRAY_SIZE(render_cmds) },
{ hsw_render_cmds, ARRAY_SIZE(hsw_render_cmds) },
};
static const struct drm_i915_cmd_table gen7_video_cmds[] = {
{ common_cmds, ARRAY_SIZE(common_cmds) },
{ video_cmds, ARRAY_SIZE(video_cmds) },
};
static const struct drm_i915_cmd_table hsw_vebox_cmds[] = {
{ common_cmds, ARRAY_SIZE(common_cmds) },
{ vecs_cmds, ARRAY_SIZE(vecs_cmds) },
};
static const struct drm_i915_cmd_table gen7_blt_cmds[] = {
{ common_cmds, ARRAY_SIZE(common_cmds) },
{ blt_cmds, ARRAY_SIZE(blt_cmds) },
};
static const struct drm_i915_cmd_table hsw_blt_ring_cmds[] = {
{ common_cmds, ARRAY_SIZE(common_cmds) },
{ blt_cmds, ARRAY_SIZE(blt_cmds) },
{ hsw_blt_cmds, ARRAY_SIZE(hsw_blt_cmds) },
};
/*
* Register whitelists, sorted by increasing register offset.
*
* Some registers that userspace accesses are 64 bits. The register
* access commands only allow 32-bit accesses. Hence, we have to include
* entries for both halves of the 64-bit registers.
*/
/* Convenience macro for adding 64-bit registers */
#define REG64(addr) (addr), (addr + sizeof(u32))
static const u32 gen7_render_regs[] = {
REG64(HS_INVOCATION_COUNT),
REG64(DS_INVOCATION_COUNT),
REG64(IA_VERTICES_COUNT),
REG64(IA_PRIMITIVES_COUNT),
REG64(VS_INVOCATION_COUNT),
REG64(GS_INVOCATION_COUNT),
REG64(GS_PRIMITIVES_COUNT),
REG64(CL_INVOCATION_COUNT),
REG64(CL_PRIMITIVES_COUNT),
REG64(PS_INVOCATION_COUNT),
REG64(PS_DEPTH_COUNT),
OACONTROL, /* Only allowed for LRI and SRM. See below. */
GEN7_3DPRIM_END_OFFSET,
GEN7_3DPRIM_START_VERTEX,
GEN7_3DPRIM_VERTEX_COUNT,
GEN7_3DPRIM_INSTANCE_COUNT,
GEN7_3DPRIM_START_INSTANCE,
GEN7_3DPRIM_BASE_VERTEX,
REG64(GEN7_SO_NUM_PRIMS_WRITTEN(0)),
REG64(GEN7_SO_NUM_PRIMS_WRITTEN(1)),
REG64(GEN7_SO_NUM_PRIMS_WRITTEN(2)),
REG64(GEN7_SO_NUM_PRIMS_WRITTEN(3)),
REG64(GEN7_SO_PRIM_STORAGE_NEEDED(0)),
REG64(GEN7_SO_PRIM_STORAGE_NEEDED(1)),
REG64(GEN7_SO_PRIM_STORAGE_NEEDED(2)),
REG64(GEN7_SO_PRIM_STORAGE_NEEDED(3)),
GEN7_SO_WRITE_OFFSET(0),
GEN7_SO_WRITE_OFFSET(1),
GEN7_SO_WRITE_OFFSET(2),
GEN7_SO_WRITE_OFFSET(3),
};
static const u32 gen7_blt_regs[] = {
BCS_SWCTRL,
};
static const u32 ivb_master_regs[] = {
FORCEWAKE_MT,
DERRMR,
GEN7_PIPE_DE_LOAD_SL(PIPE_A),
GEN7_PIPE_DE_LOAD_SL(PIPE_B),
GEN7_PIPE_DE_LOAD_SL(PIPE_C),
};
static const u32 hsw_master_regs[] = {
FORCEWAKE_MT,
DERRMR,
};
#undef REG64
static u32 gen7_render_get_cmd_length_mask(u32 cmd_header)
{
u32 client = (cmd_header & INSTR_CLIENT_MASK) >> INSTR_CLIENT_SHIFT;
@ -137,15 +498,18 @@ static u32 gen7_blt_get_cmd_length_mask(u32 cmd_header)
return 0;
}
static void validate_cmds_sorted(struct intel_ring_buffer *ring)
static bool validate_cmds_sorted(struct intel_engine_cs *ring,
const struct drm_i915_cmd_table *cmd_tables,
int cmd_table_count)
{
int i;
bool ret = true;
if (!ring->cmd_tables || ring->cmd_table_count == 0)
return;
if (!cmd_tables || cmd_table_count == 0)
return true;
for (i = 0; i < ring->cmd_table_count; i++) {
const struct drm_i915_cmd_table *table = &ring->cmd_tables[i];
for (i = 0; i < cmd_table_count; i++) {
const struct drm_i915_cmd_table *table = &cmd_tables[i];
u32 previous = 0;
int j;
@ -154,35 +518,107 @@ static void validate_cmds_sorted(struct intel_ring_buffer *ring)
&table->table[i];
u32 curr = desc->cmd.value & desc->cmd.mask;
if (curr < previous)
if (curr < previous) {
DRM_ERROR("CMD: table not sorted ring=%d table=%d entry=%d cmd=0x%08X prev=0x%08X\n",
ring->id, i, j, curr, previous);
ret = false;
}
previous = curr;
}
}
return ret;
}
static void check_sorted(int ring_id, const u32 *reg_table, int reg_count)
static bool check_sorted(int ring_id, const u32 *reg_table, int reg_count)
{
int i;
u32 previous = 0;
bool ret = true;
for (i = 0; i < reg_count; i++) {
u32 curr = reg_table[i];
if (curr < previous)
if (curr < previous) {
DRM_ERROR("CMD: table not sorted ring=%d entry=%d reg=0x%08X prev=0x%08X\n",
ring_id, i, curr, previous);
ret = false;
}
previous = curr;
}
return ret;
}
static void validate_regs_sorted(struct intel_ring_buffer *ring)
static bool validate_regs_sorted(struct intel_engine_cs *ring)
{
check_sorted(ring->id, ring->reg_table, ring->reg_count);
check_sorted(ring->id, ring->master_reg_table, ring->master_reg_count);
return check_sorted(ring->id, ring->reg_table, ring->reg_count) &&
check_sorted(ring->id, ring->master_reg_table,
ring->master_reg_count);
}
struct cmd_node {
const struct drm_i915_cmd_descriptor *desc;
struct hlist_node node;
};
/*
* Different command ranges have different numbers of bits for the opcode. For
* example, MI commands use bits 31:23 while 3D commands use bits 31:16. The
* problem is that, for example, MI commands use bits 22:16 for other fields
* such as GGTT vs PPGTT bits. If we include those bits in the mask then when
* we mask a command from a batch it could hash to the wrong bucket due to
* non-opcode bits being set. But if we don't include those bits, some 3D
* commands may hash to the same bucket due to not including opcode bits that
* make the command unique. For now, we will risk hashing to the same bucket.
*
* If we attempt to generate a perfect hash, we should be able to look at bits
* 31:29 of a command from a batch buffer and use the full mask for that
* client. The existing INSTR_CLIENT_MASK/SHIFT defines can be used for this.
*/
#define CMD_HASH_MASK STD_MI_OPCODE_MASK
static int init_hash_table(struct intel_engine_cs *ring,
const struct drm_i915_cmd_table *cmd_tables,
int cmd_table_count)
{
int i, j;
hash_init(ring->cmd_hash);
for (i = 0; i < cmd_table_count; i++) {
const struct drm_i915_cmd_table *table = &cmd_tables[i];
for (j = 0; j < table->count; j++) {
const struct drm_i915_cmd_descriptor *desc =
&table->table[j];
struct cmd_node *desc_node =
kmalloc(sizeof(*desc_node), GFP_KERNEL);
if (!desc_node)
return -ENOMEM;
desc_node->desc = desc;
hash_add(ring->cmd_hash, &desc_node->node,
desc->cmd.value & CMD_HASH_MASK);
}
}
return 0;
}
static void fini_hash_table(struct intel_engine_cs *ring)
{
struct hlist_node *tmp;
struct cmd_node *desc_node;
int i;
hash_for_each_safe(ring->cmd_hash, i, tmp, desc_node, node) {
hash_del(&desc_node->node);
kfree(desc_node);
}
}
/**
@ -190,25 +626,74 @@ static void validate_regs_sorted(struct intel_ring_buffer *ring)
* @ring: the ringbuffer to initialize
*
* Optionally initializes fields related to batch buffer command parsing in the
* struct intel_ring_buffer based on whether the platform requires software
* struct intel_engine_cs based on whether the platform requires software
* command parsing.
*
* Return: non-zero if initialization fails
*/
void i915_cmd_parser_init_ring(struct intel_ring_buffer *ring)
int i915_cmd_parser_init_ring(struct intel_engine_cs *ring)
{
const struct drm_i915_cmd_table *cmd_tables;
int cmd_table_count;
int ret;
if (!IS_GEN7(ring->dev))
return;
return 0;
switch (ring->id) {
case RCS:
if (IS_HASWELL(ring->dev)) {
cmd_tables = hsw_render_ring_cmds;
cmd_table_count =
ARRAY_SIZE(hsw_render_ring_cmds);
} else {
cmd_tables = gen7_render_cmds;
cmd_table_count = ARRAY_SIZE(gen7_render_cmds);
}
ring->reg_table = gen7_render_regs;
ring->reg_count = ARRAY_SIZE(gen7_render_regs);
if (IS_HASWELL(ring->dev)) {
ring->master_reg_table = hsw_master_regs;
ring->master_reg_count = ARRAY_SIZE(hsw_master_regs);
} else {
ring->master_reg_table = ivb_master_regs;
ring->master_reg_count = ARRAY_SIZE(ivb_master_regs);
}
ring->get_cmd_length_mask = gen7_render_get_cmd_length_mask;
break;
case VCS:
cmd_tables = gen7_video_cmds;
cmd_table_count = ARRAY_SIZE(gen7_video_cmds);
ring->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
break;
case BCS:
if (IS_HASWELL(ring->dev)) {
cmd_tables = hsw_blt_ring_cmds;
cmd_table_count = ARRAY_SIZE(hsw_blt_ring_cmds);
} else {
cmd_tables = gen7_blt_cmds;
cmd_table_count = ARRAY_SIZE(gen7_blt_cmds);
}
ring->reg_table = gen7_blt_regs;
ring->reg_count = ARRAY_SIZE(gen7_blt_regs);
if (IS_HASWELL(ring->dev)) {
ring->master_reg_table = hsw_master_regs;
ring->master_reg_count = ARRAY_SIZE(hsw_master_regs);
} else {
ring->master_reg_table = ivb_master_regs;
ring->master_reg_count = ARRAY_SIZE(ivb_master_regs);
}
ring->get_cmd_length_mask = gen7_blt_get_cmd_length_mask;
break;
case VECS:
cmd_tables = hsw_vebox_cmds;
cmd_table_count = ARRAY_SIZE(hsw_vebox_cmds);
/* VECS can use the same length_mask function as VCS */
ring->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
break;
@ -218,18 +703,45 @@ void i915_cmd_parser_init_ring(struct intel_ring_buffer *ring)
BUG();
}
validate_cmds_sorted(ring);
validate_regs_sorted(ring);
BUG_ON(!validate_cmds_sorted(ring, cmd_tables, cmd_table_count));
BUG_ON(!validate_regs_sorted(ring));
ret = init_hash_table(ring, cmd_tables, cmd_table_count);
if (ret) {
DRM_ERROR("CMD: cmd_parser_init failed!\n");
fini_hash_table(ring);
return ret;
}
ring->needs_cmd_parser = true;
return 0;
}
/**
* i915_cmd_parser_fini_ring() - clean up cmd parser related fields
* @ring: the ringbuffer to clean up
*
* Releases any resources related to command parsing that may have been
* initialized for the specified ring.
*/
void i915_cmd_parser_fini_ring(struct intel_engine_cs *ring)
{
if (!ring->needs_cmd_parser)
return;
fini_hash_table(ring);
}
static const struct drm_i915_cmd_descriptor*
find_cmd_in_table(const struct drm_i915_cmd_table *table,
find_cmd_in_table(struct intel_engine_cs *ring,
u32 cmd_header)
{
int i;
struct cmd_node *desc_node;
for (i = 0; i < table->count; i++) {
const struct drm_i915_cmd_descriptor *desc = &table->table[i];
hash_for_each_possible(ring->cmd_hash, desc_node, node,
cmd_header & CMD_HASH_MASK) {
const struct drm_i915_cmd_descriptor *desc = desc_node->desc;
u32 masked_cmd = desc->cmd.mask & cmd_header;
u32 masked_value = desc->cmd.value & desc->cmd.mask;
@ -249,20 +761,16 @@ find_cmd_in_table(const struct drm_i915_cmd_table *table,
* ring's default length encoding and returns default_desc.
*/
static const struct drm_i915_cmd_descriptor*
find_cmd(struct intel_ring_buffer *ring,
find_cmd(struct intel_engine_cs *ring,
u32 cmd_header,
struct drm_i915_cmd_descriptor *default_desc)
{
const struct drm_i915_cmd_descriptor *desc;
u32 mask;
int i;
for (i = 0; i < ring->cmd_table_count; i++) {
const struct drm_i915_cmd_descriptor *desc;
desc = find_cmd_in_table(&ring->cmd_tables[i], cmd_header);
if (desc)
return desc;
}
desc = find_cmd_in_table(ring, cmd_header);
if (desc)
return desc;
mask = ring->get_cmd_length_mask(cmd_header);
if (!mask)
@ -329,15 +837,112 @@ finish:
*
* Return: true if the ring requires software command parsing
*/
bool i915_needs_cmd_parser(struct intel_ring_buffer *ring)
bool i915_needs_cmd_parser(struct intel_engine_cs *ring)
{
/* No command tables indicates a platform without parsing */
if (!ring->cmd_tables)
struct drm_i915_private *dev_priv = ring->dev->dev_private;
if (!ring->needs_cmd_parser)
return false;
/*
* XXX: VLV is Gen7 and therefore has cmd_tables, but has PPGTT
* disabled. That will cause all of the parser's PPGTT checks to
* fail. For now, disable parsing when PPGTT is off.
*/
if (!dev_priv->mm.aliasing_ppgtt)
return false;
return (i915.enable_cmd_parser == 1);
}
static bool check_cmd(const struct intel_engine_cs *ring,
const struct drm_i915_cmd_descriptor *desc,
const u32 *cmd,
const bool is_master,
bool *oacontrol_set)
{
if (desc->flags & CMD_DESC_REJECT) {
DRM_DEBUG_DRIVER("CMD: Rejected command: 0x%08X\n", *cmd);
return false;
}
if ((desc->flags & CMD_DESC_MASTER) && !is_master) {
DRM_DEBUG_DRIVER("CMD: Rejected master-only command: 0x%08X\n",
*cmd);
return false;
}
if (desc->flags & CMD_DESC_REGISTER) {
u32 reg_addr = cmd[desc->reg.offset] & desc->reg.mask;
/*
* OACONTROL requires some special handling for writes. We
* want to make sure that any batch which enables OA also
* disables it before the end of the batch. The goal is to
* prevent one process from snooping on the perf data from
* another process. To do that, we need to check the value
* that will be written to the register. Hence, limit
* OACONTROL writes to only MI_LOAD_REGISTER_IMM commands.
*/
if (reg_addr == OACONTROL) {
if (desc->cmd.value == MI_LOAD_REGISTER_MEM)
return false;
if (desc->cmd.value == MI_LOAD_REGISTER_IMM(1))
*oacontrol_set = (cmd[2] != 0);
}
if (!valid_reg(ring->reg_table,
ring->reg_count, reg_addr)) {
if (!is_master ||
!valid_reg(ring->master_reg_table,
ring->master_reg_count,
reg_addr)) {
DRM_DEBUG_DRIVER("CMD: Rejected register 0x%08X in command: 0x%08X (ring=%d)\n",
reg_addr,
*cmd,
ring->id);
return false;
}
}
}
if (desc->flags & CMD_DESC_BITMASK) {
int i;
for (i = 0; i < MAX_CMD_DESC_BITMASKS; i++) {
u32 dword;
if (desc->bits[i].mask == 0)
break;
if (desc->bits[i].condition_mask != 0) {
u32 offset =
desc->bits[i].condition_offset;
u32 condition = cmd[offset] &
desc->bits[i].condition_mask;
if (condition == 0)
continue;
}
dword = cmd[desc->bits[i].offset] &
desc->bits[i].mask;
if (dword != desc->bits[i].expected) {
DRM_DEBUG_DRIVER("CMD: Rejected command 0x%08X for bitmask 0x%08X (exp=0x%08X act=0x%08X) (ring=%d)\n",
*cmd,
desc->bits[i].mask,
desc->bits[i].expected,
dword, ring->id);
return false;
}
}
}
return true;
}
#define LENGTH_BIAS 2
/**
@ -352,7 +957,7 @@ bool i915_needs_cmd_parser(struct intel_ring_buffer *ring)
*
* Return: non-zero if the parser finds violations or otherwise fails
*/
int i915_parse_cmds(struct intel_ring_buffer *ring,
int i915_parse_cmds(struct intel_engine_cs *ring,
struct drm_i915_gem_object *batch_obj,
u32 batch_start_offset,
bool is_master)
@ -361,6 +966,7 @@ int i915_parse_cmds(struct intel_ring_buffer *ring,
u32 *cmd, *batch_base, *batch_end;
struct drm_i915_cmd_descriptor default_desc = { 0 };
int needs_clflush = 0;
bool oacontrol_set = false; /* OACONTROL tracking. See check_cmd() */
ret = i915_gem_obj_prepare_shmem_read(batch_obj, &needs_clflush);
if (ret) {
@ -402,76 +1008,27 @@ int i915_parse_cmds(struct intel_ring_buffer *ring,
length = ((*cmd & desc->length.mask) + LENGTH_BIAS);
if ((batch_end - cmd) < length) {
DRM_DEBUG_DRIVER("CMD: Command length exceeds batch length: 0x%08X length=%d batchlen=%td\n",
DRM_DEBUG_DRIVER("CMD: Command length exceeds batch length: 0x%08X length=%u batchlen=%td\n",
*cmd,
length,
(unsigned long)(batch_end - cmd));
batch_end - cmd);
ret = -EINVAL;
break;
}
if (desc->flags & CMD_DESC_REJECT) {
DRM_DEBUG_DRIVER("CMD: Rejected command: 0x%08X\n", *cmd);
if (!check_cmd(ring, desc, cmd, is_master, &oacontrol_set)) {
ret = -EINVAL;
break;
}
if ((desc->flags & CMD_DESC_MASTER) && !is_master) {
DRM_DEBUG_DRIVER("CMD: Rejected master-only command: 0x%08X\n",
*cmd);
ret = -EINVAL;
break;
}
if (desc->flags & CMD_DESC_REGISTER) {
u32 reg_addr = cmd[desc->reg.offset] & desc->reg.mask;
if (!valid_reg(ring->reg_table,
ring->reg_count, reg_addr)) {
if (!is_master ||
!valid_reg(ring->master_reg_table,
ring->master_reg_count,
reg_addr)) {
DRM_DEBUG_DRIVER("CMD: Rejected register 0x%08X in command: 0x%08X (ring=%d)\n",
reg_addr,
*cmd,
ring->id);
ret = -EINVAL;
break;
}
}
}
if (desc->flags & CMD_DESC_BITMASK) {
int i;
for (i = 0; i < MAX_CMD_DESC_BITMASKS; i++) {
u32 dword;
if (desc->bits[i].mask == 0)
break;
dword = cmd[desc->bits[i].offset] &
desc->bits[i].mask;
if (dword != desc->bits[i].expected) {
DRM_DEBUG_DRIVER("CMD: Rejected command 0x%08X for bitmask 0x%08X (exp=0x%08X act=0x%08X) (ring=%d)\n",
*cmd,
desc->bits[i].mask,
desc->bits[i].expected,
dword, ring->id);
ret = -EINVAL;
break;
}
}
if (ret)
break;
}
cmd += length;
}
if (oacontrol_set) {
DRM_DEBUG_DRIVER("CMD: batch set OACONTROL but did not clear it\n");
ret = -EINVAL;
}
if (cmd >= batch_end) {
DRM_DEBUG_DRIVER("CMD: Got to the end of the buffer w/o a BBE cmd!\n");
ret = -EINVAL;
@ -483,3 +1040,22 @@ int i915_parse_cmds(struct intel_ring_buffer *ring,
return ret;
}
/**
* i915_cmd_parser_get_version() - get the cmd parser version number
*
* The cmd parser maintains a simple increasing integer version number suitable
* for passing to userspace clients to determine what operations are permitted.
*
* Return: the current version number of the cmd parser
*/
int i915_cmd_parser_get_version(void)
{
/*
* Command parser version history
*
* 1. Initial version. Checks batches and reports violations, but leaves
* hardware parsing enabled (so does not allow new use cases).
*/
return 1;
}

View File

@ -79,7 +79,7 @@ drm_add_fake_info_node(struct drm_minor *minor,
static int i915_capabilities(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
const struct intel_device_info *info = INTEL_INFO(dev);
@ -172,7 +172,7 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
seq_printf(m, " (%s)", obj->ring->name);
}
static void describe_ctx(struct seq_file *m, struct i915_hw_context *ctx)
static void describe_ctx(struct seq_file *m, struct intel_context *ctx)
{
seq_putc(m, ctx->is_initialized ? 'I' : 'i');
seq_putc(m, ctx->remap_slice ? 'R' : 'r');
@ -181,7 +181,7 @@ static void describe_ctx(struct seq_file *m, struct i915_hw_context *ctx)
static int i915_gem_object_list_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_info_node *node = m->private;
uintptr_t list = (uintptr_t) node->info_ent->data;
struct list_head *head;
struct drm_device *dev = node->minor->dev;
@ -239,7 +239,7 @@ static int obj_rank_by_stolen(void *priv,
static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
@ -371,7 +371,7 @@ static int per_file_stats(int id, void *ptr, void *data)
static int i915_gem_object_info(struct seq_file *m, void* data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 count, mappable_count, purgeable_count;
@ -474,7 +474,7 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
static int i915_gem_gtt_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
uintptr_t list = (uintptr_t) node->info_ent->data;
struct drm_i915_private *dev_priv = dev->dev_private;
@ -509,12 +509,12 @@ static int i915_gem_gtt_info(struct seq_file *m, void *data)
static int i915_gem_pageflip_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
unsigned long flags;
struct intel_crtc *crtc;
list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
for_each_intel_crtc(dev, crtc) {
const char pipe = pipe_name(crtc->pipe);
const char plane = plane_name(crtc->plane);
struct intel_unpin_work *work;
@ -559,10 +559,10 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
static int i915_gem_request_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring;
struct intel_engine_cs *ring;
struct drm_i915_gem_request *gem_request;
int ret, count, i;
@ -594,7 +594,7 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
}
static void i915_ring_seqno_info(struct seq_file *m,
struct intel_ring_buffer *ring)
struct intel_engine_cs *ring)
{
if (ring->get_seqno) {
seq_printf(m, "Current sequence (%s): %u\n",
@ -604,10 +604,10 @@ static void i915_ring_seqno_info(struct seq_file *m,
static int i915_gem_seqno_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring;
struct intel_engine_cs *ring;
int ret, i;
ret = mutex_lock_interruptible(&dev->struct_mutex);
@ -627,10 +627,10 @@ static int i915_gem_seqno_info(struct seq_file *m, void *data)
static int i915_interrupt_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring;
struct intel_engine_cs *ring;
int ret, i, pipe;
ret = mutex_lock_interruptible(&dev->struct_mutex);
@ -638,7 +638,47 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
return ret;
intel_runtime_pm_get(dev_priv);
if (INTEL_INFO(dev)->gen >= 8) {
if (IS_CHERRYVIEW(dev)) {
int i;
seq_printf(m, "Master Interrupt Control:\t%08x\n",
I915_READ(GEN8_MASTER_IRQ));
seq_printf(m, "Display IER:\t%08x\n",
I915_READ(VLV_IER));
seq_printf(m, "Display IIR:\t%08x\n",
I915_READ(VLV_IIR));
seq_printf(m, "Display IIR_RW:\t%08x\n",
I915_READ(VLV_IIR_RW));
seq_printf(m, "Display IMR:\t%08x\n",
I915_READ(VLV_IMR));
for_each_pipe(pipe)
seq_printf(m, "Pipe %c stat:\t%08x\n",
pipe_name(pipe),
I915_READ(PIPESTAT(pipe)));
seq_printf(m, "Port hotplug:\t%08x\n",
I915_READ(PORT_HOTPLUG_EN));
seq_printf(m, "DPFLIPSTAT:\t%08x\n",
I915_READ(VLV_DPFLIPSTAT));
seq_printf(m, "DPINVGTT:\t%08x\n",
I915_READ(DPINVGTT));
for (i = 0; i < 4; i++) {
seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
i, I915_READ(GEN8_GT_IMR(i)));
seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
i, I915_READ(GEN8_GT_IIR(i)));
seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
i, I915_READ(GEN8_GT_IER(i)));
}
seq_printf(m, "PCU interrupt mask:\t%08x\n",
I915_READ(GEN8_PCU_IMR));
seq_printf(m, "PCU interrupt identity:\t%08x\n",
I915_READ(GEN8_PCU_IIR));
seq_printf(m, "PCU interrupt enable:\t%08x\n",
I915_READ(GEN8_PCU_IER));
} else if (INTEL_INFO(dev)->gen >= 8) {
seq_printf(m, "Master Interrupt Control:\t%08x\n",
I915_READ(GEN8_MASTER_IRQ));
@ -768,7 +808,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int i, ret;
@ -797,10 +837,10 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
static int i915_hws_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring;
struct intel_engine_cs *ring;
const u32 *hws;
int i;
@ -945,7 +985,7 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops,
static int i915_rstdby_delays(struct seq_file *m, void *unused)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u16 crstanddelay;
@ -966,9 +1006,9 @@ static int i915_rstdby_delays(struct seq_file *m, void *unused)
return 0;
}
static int i915_cur_delayinfo(struct seq_file *m, void *unused)
static int i915_frequency_info(struct seq_file *m, void *unused)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int ret = 0;
@ -991,6 +1031,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
u32 rpmodectl, rpinclimit, rpdeclimit;
u32 rpstat, cagf, reqf;
u32 rpupei, rpcurup, rpprevup;
u32 rpdownei, rpcurdown, rpprevdown;
@ -1011,6 +1052,10 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
reqf >>= 25;
reqf *= GT_FREQUENCY_MULTIPLIER;
rpmodectl = I915_READ(GEN6_RP_CONTROL);
rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD);
rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD);
rpstat = I915_READ(GEN6_RPSTAT1);
rpupei = I915_READ(GEN6_RP_CUR_UP_EI);
rpcurup = I915_READ(GEN6_RP_CUR_UP);
@ -1027,14 +1072,23 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
mutex_unlock(&dev->struct_mutex);
seq_printf(m, "PM IER=0x%08x IMR=0x%08x ISR=0x%08x IIR=0x%08x, MASK=0x%08x\n",
I915_READ(GEN6_PMIER),
I915_READ(GEN6_PMIMR),
I915_READ(GEN6_PMISR),
I915_READ(GEN6_PMIIR),
I915_READ(GEN6_PMINTRMSK));
seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
seq_printf(m, "Render p-state ratio: %d\n",
(gt_perf_status & 0xff00) >> 8);
seq_printf(m, "Render p-state VID: %d\n",
gt_perf_status & 0xff);
seq_printf(m, "Render p-state limit: %d\n",
rp_state_limits & 0xff);
seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl);
seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit);
seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
seq_printf(m, "CAGF: %dMHz\n", cagf);
seq_printf(m, "RP CUR UP EI: %dus\n", rpupei &
@ -1094,7 +1148,7 @@ out:
static int i915_delayfreq_table(struct seq_file *m, void *unused)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 delayfreq;
@ -1125,7 +1179,7 @@ static inline int MAP_TO_MV(int map)
static int i915_inttoext_table(struct seq_file *m, void *unused)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 inttoext;
@ -1149,7 +1203,7 @@ static int i915_inttoext_table(struct seq_file *m, void *unused)
static int ironlake_drpc_info(struct seq_file *m)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 rgvmodectl, rstdbyctl;
@ -1219,15 +1273,19 @@ static int ironlake_drpc_info(struct seq_file *m)
static int vlv_drpc_info(struct seq_file *m)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 rpmodectl1, rcctl1;
unsigned fw_rendercount = 0, fw_mediacount = 0;
intel_runtime_pm_get(dev_priv);
rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
rcctl1 = I915_READ(GEN6_RC_CONTROL);
intel_runtime_pm_put(dev_priv);
seq_printf(m, "Video Turbo Mode: %s\n",
yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO));
seq_printf(m, "Turbo enabled: %s\n",
@ -1247,6 +1305,11 @@ static int vlv_drpc_info(struct seq_file *m)
(I915_READ(VLV_GTLC_PW_STATUS) &
VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
seq_printf(m, "Render RC6 residency since boot: %u\n",
I915_READ(VLV_GT_RENDER_RC6));
seq_printf(m, "Media RC6 residency since boot: %u\n",
I915_READ(VLV_GT_MEDIA_RC6));
spin_lock_irq(&dev_priv->uncore.lock);
fw_rendercount = dev_priv->uncore.fw_rendercount;
fw_mediacount = dev_priv->uncore.fw_mediacount;
@ -1263,7 +1326,7 @@ static int vlv_drpc_info(struct seq_file *m)
static int gen6_drpc_info(struct seq_file *m)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 rpmodectl1, gt_core_status, rcctl1, rc6vids = 0;
@ -1362,7 +1425,7 @@ static int gen6_drpc_info(struct seq_file *m)
static int i915_drpc_info(struct seq_file *m, void *unused)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
if (IS_VALLEYVIEW(dev))
@ -1375,7 +1438,7 @@ static int i915_drpc_info(struct seq_file *m, void *unused)
static int i915_fbc_status(struct seq_file *m, void *unused)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@ -1437,7 +1500,7 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
static int i915_ips_status(struct seq_file *m, void *unused)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@ -1460,7 +1523,7 @@ static int i915_ips_status(struct seq_file *m, void *unused)
static int i915_sr_status(struct seq_file *m, void *unused)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
bool sr_enabled = false;
@ -1486,7 +1549,7 @@ static int i915_sr_status(struct seq_file *m, void *unused)
static int i915_emon_status(struct seq_file *m, void *unused)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long temp, chipset, gfx;
@ -1514,7 +1577,7 @@ static int i915_emon_status(struct seq_file *m, void *unused)
static int i915_ring_freq_table(struct seq_file *m, void *unused)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int ret = 0;
@ -1557,7 +1620,7 @@ out:
static int i915_gfxec(struct seq_file *m, void *unused)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
@ -1577,7 +1640,7 @@ static int i915_gfxec(struct seq_file *m, void *unused)
static int i915_opregion(struct seq_file *m, void *unused)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_opregion *opregion = &dev_priv->opregion;
@ -1605,7 +1668,7 @@ out:
static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct intel_fbdev *ifbdev = NULL;
struct intel_framebuffer *fb;
@ -1651,11 +1714,11 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
static int i915_context_status(struct seq_file *m, void *unused)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring;
struct i915_hw_context *ctx;
struct intel_engine_cs *ring;
struct intel_context *ctx;
int ret, i;
ret = mutex_lock_interruptible(&dev->mode_config.mutex);
@ -1675,6 +1738,9 @@ static int i915_context_status(struct seq_file *m, void *unused)
}
list_for_each_entry(ctx, &dev_priv->context_list, link) {
if (ctx->obj == NULL)
continue;
seq_puts(m, "HW context ");
describe_ctx(m, ctx);
for_each_ring(ring, dev_priv, i)
@ -1692,7 +1758,7 @@ static int i915_context_status(struct seq_file *m, void *unused)
static int i915_gen6_forcewake_count_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned forcewake_count = 0, fw_rendercount = 0, fw_mediacount = 0;
@ -1740,7 +1806,7 @@ static const char *swizzle_string(unsigned swizzle)
static int i915_swizzle_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
@ -1788,10 +1854,14 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
static int per_file_ctx(int id, void *ptr, void *data)
{
struct i915_hw_context *ctx = ptr;
struct intel_context *ctx = ptr;
struct seq_file *m = data;
struct i915_hw_ppgtt *ppgtt = ctx_to_ppgtt(ctx);
if (i915_gem_context_is_default(ctx))
seq_puts(m, " default context:\n");
else
seq_printf(m, " context %d:\n", ctx->id);
ppgtt->debug_dump(ppgtt, m);
return 0;
@ -1800,7 +1870,7 @@ static int per_file_ctx(int id, void *ptr, void *data)
static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring;
struct intel_engine_cs *ring;
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
int unused, i;
@ -1816,8 +1886,7 @@ static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev)
u64 pdp = I915_READ(ring->mmio_base + offset + 4);
pdp <<= 32;
pdp |= I915_READ(ring->mmio_base + offset);
for (i = 0; i < 4; i++)
seq_printf(m, "\tPDP%d 0x%016llx\n", i, pdp);
seq_printf(m, "\tPDP%d 0x%016llx\n", i, pdp);
}
}
}
@ -1825,7 +1894,7 @@ static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev)
static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring;
struct intel_engine_cs *ring;
struct drm_file *file;
int i;
@ -1852,12 +1921,9 @@ static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev)
list_for_each_entry_reverse(file, &dev->filelist, lhead) {
struct drm_i915_file_private *file_priv = file->driver_priv;
struct i915_hw_ppgtt *pvt_ppgtt;
pvt_ppgtt = ctx_to_ppgtt(file_priv->private_default_ctx);
seq_printf(m, "proc: %s\n",
get_pid_task(file->pid, PIDTYPE_PID)->comm);
seq_puts(m, " default context:\n");
idr_for_each(&file_priv->context_idr, per_file_ctx, m);
}
seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK));
@ -1865,7 +1931,7 @@ static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev)
static int i915_ppgtt_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@ -1885,56 +1951,9 @@ static int i915_ppgtt_info(struct seq_file *m, void *data)
return 0;
}
static int i915_dpio_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
if (!IS_VALLEYVIEW(dev)) {
seq_puts(m, "unsupported\n");
return 0;
}
ret = mutex_lock_interruptible(&dev_priv->dpio_lock);
if (ret)
return ret;
seq_printf(m, "DPIO_CTL: 0x%08x\n", I915_READ(DPIO_CTL));
seq_printf(m, "DPIO PLL DW3 CH0 : 0x%08x\n",
vlv_dpio_read(dev_priv, PIPE_A, VLV_PLL_DW3(0)));
seq_printf(m, "DPIO PLL DW3 CH1: 0x%08x\n",
vlv_dpio_read(dev_priv, PIPE_A, VLV_PLL_DW3(1)));
seq_printf(m, "DPIO PLL DW5 CH0: 0x%08x\n",
vlv_dpio_read(dev_priv, PIPE_A, VLV_PLL_DW5(0)));
seq_printf(m, "DPIO PLL DW5 CH1: 0x%08x\n",
vlv_dpio_read(dev_priv, PIPE_A, VLV_PLL_DW5(1)));
seq_printf(m, "DPIO PLL DW7 CH0: 0x%08x\n",
vlv_dpio_read(dev_priv, PIPE_A, VLV_PLL_DW7(0)));
seq_printf(m, "DPIO PLL DW7 CH1: 0x%08x\n",
vlv_dpio_read(dev_priv, PIPE_A, VLV_PLL_DW7(1)));
seq_printf(m, "DPIO PLL DW10 CH0: 0x%08x\n",
vlv_dpio_read(dev_priv, PIPE_A, VLV_PLL_DW10(0)));
seq_printf(m, "DPIO PLL DW10 CH1: 0x%08x\n",
vlv_dpio_read(dev_priv, PIPE_A, VLV_PLL_DW10(1)));
seq_printf(m, "DPIO_FASTCLK_DISABLE: 0x%08x\n",
vlv_dpio_read(dev_priv, PIPE_A, VLV_CMN_DW0));
mutex_unlock(&dev_priv->dpio_lock);
return 0;
}
static int i915_llc(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@ -2040,11 +2059,11 @@ static int i915_energy_uJ(struct seq_file *m, void *data)
static int i915_pc8_status(struct seq_file *m, void *unused)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
if (!IS_HASWELL(dev)) {
if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) {
seq_puts(m, "not supported\n");
return 0;
}
@ -2115,7 +2134,7 @@ static const char *power_domain_str(enum intel_display_power_domain domain)
static int i915_power_domain_info(struct seq_file *m, void *unused)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_power_domains *power_domains = &dev_priv->power_domains;
@ -2170,7 +2189,7 @@ static void intel_encoder_info(struct seq_file *m,
struct intel_crtc *intel_crtc,
struct intel_encoder *intel_encoder)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_crtc *crtc = &intel_crtc->base;
struct intel_connector *intel_connector;
@ -2178,12 +2197,12 @@ static void intel_encoder_info(struct seq_file *m,
encoder = &intel_encoder->base;
seq_printf(m, "\tencoder %d: type: %s, connectors:\n",
encoder->base.id, drm_get_encoder_name(encoder));
encoder->base.id, encoder->name);
for_each_connector_on_encoder(dev, encoder, intel_connector) {
struct drm_connector *connector = &intel_connector->base;
seq_printf(m, "\t\tconnector %d: type: %s, status: %s",
connector->base.id,
drm_get_connector_name(connector),
connector->name,
drm_get_connector_status_name(connector->status));
if (connector->status == connector_status_connected) {
struct drm_display_mode *mode = &crtc->mode;
@ -2197,7 +2216,7 @@ static void intel_encoder_info(struct seq_file *m,
static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_crtc *crtc = &intel_crtc->base;
struct intel_encoder *intel_encoder;
@ -2254,7 +2273,7 @@ static void intel_connector_info(struct seq_file *m,
struct drm_display_mode *mode;
seq_printf(m, "connector %d: type %s, status: %s\n",
connector->base.id, drm_get_connector_name(connector),
connector->base.id, connector->name,
drm_get_connector_status_name(connector->status));
if (connector->status == connector_status_connected) {
seq_printf(m, "\tname: %s\n", connector->display_info.name);
@ -2286,10 +2305,8 @@ static bool cursor_active(struct drm_device *dev, int pipe)
if (IS_845G(dev) || IS_I865G(dev))
state = I915_READ(_CURACNTR) & CURSOR_ENABLE;
else if (INTEL_INFO(dev)->gen <= 6 || IS_VALLEYVIEW(dev))
state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
else
state = I915_READ(CURCNTR_IVB(pipe)) & CURSOR_MODE;
state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
return state;
}
@ -2299,10 +2316,7 @@ static bool cursor_position(struct drm_device *dev, int pipe, int *x, int *y)
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pos;
if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev) || IS_BROADWELL(dev))
pos = I915_READ(CURPOS_IVB(pipe));
else
pos = I915_READ(CURPOS(pipe));
pos = I915_READ(CURPOS(pipe));
*x = (pos >> CURSOR_X_SHIFT) & CURSOR_POS_MASK;
if (pos & (CURSOR_POS_SIGN << CURSOR_X_SHIFT))
@ -2317,7 +2331,7 @@ static bool cursor_position(struct drm_device *dev, int pipe, int *x, int *y)
static int i915_display_info(struct seq_file *m, void *unused)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *crtc;
@ -2327,7 +2341,7 @@ static int i915_display_info(struct seq_file *m, void *unused)
drm_modeset_lock_all(dev);
seq_printf(m, "CRTC info\n");
seq_printf(m, "---------\n");
list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
for_each_intel_crtc(dev, crtc) {
bool active;
int x, y;
@ -2339,10 +2353,14 @@ static int i915_display_info(struct seq_file *m, void *unused)
active = cursor_position(dev, crtc->pipe, &x, &y);
seq_printf(m, "\tcursor visible? %s, position (%d, %d), addr 0x%08x, active? %s\n",
yesno(crtc->cursor_visible),
yesno(crtc->cursor_base),
x, y, crtc->cursor_addr,
yesno(active));
}
seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n",
yesno(!crtc->cpu_fifo_underrun_disabled),
yesno(!crtc->pch_fifo_underrun_disabled));
}
seq_printf(m, "\n");
@ -2595,7 +2613,7 @@ static int i9xx_pipe_crc_auto_source(struct drm_device *dev, enum pipe pipe,
*source = INTEL_PIPE_CRC_SOURCE_PIPE;
mutex_lock(&dev->mode_config.mutex);
drm_modeset_lock_all(dev);
list_for_each_entry(encoder, &dev->mode_config.encoder_list,
base.head) {
if (!encoder->base.crtc)
@ -2631,7 +2649,7 @@ static int i9xx_pipe_crc_auto_source(struct drm_device *dev, enum pipe pipe,
break;
}
}
mutex_unlock(&dev->mode_config.mutex);
drm_modeset_unlock_all(dev);
return ret;
}
@ -3106,7 +3124,7 @@ static const struct file_operations i915_display_crc_ctl_fops = {
static void wm_latency_show(struct seq_file *m, const uint16_t wm[5])
{
struct drm_device *dev = m->private;
int num_levels = IS_HASWELL(dev) || IS_BROADWELL(dev) ? 5 : 4;
int num_levels = ilk_wm_max_level(dev) + 1;
int level;
drm_modeset_lock_all(dev);
@ -3189,7 +3207,7 @@ static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
struct seq_file *m = file->private_data;
struct drm_device *dev = m->private;
uint16_t new[5] = { 0 };
int num_levels = IS_HASWELL(dev) || IS_BROADWELL(dev) ? 5 : 4;
int num_levels = ilk_wm_max_level(dev) + 1;
int level;
int ret;
char tmp[32];
@ -3286,9 +3304,15 @@ static int
i915_wedged_set(void *data, u64 val)
{
struct drm_device *dev = data;
struct drm_i915_private *dev_priv = dev->dev_private;
intel_runtime_pm_get(dev_priv);
i915_handle_error(dev, val,
"Manually setting wedged to %llu", val);
intel_runtime_pm_put(dev_priv);
return 0;
}
@ -3774,7 +3798,7 @@ static const struct drm_info_list i915_debugfs_list[] = {
{"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS},
{"i915_gem_hws_vebox", i915_hws_info, 0, (void *)VECS},
{"i915_rstdby_delays", i915_rstdby_delays, 0},
{"i915_cur_delayinfo", i915_cur_delayinfo, 0},
{"i915_frequency_info", i915_frequency_info, 0},
{"i915_delayfreq_table", i915_delayfreq_table, 0},
{"i915_inttoext_table", i915_inttoext_table, 0},
{"i915_drpc_info", i915_drpc_info, 0},
@ -3790,7 +3814,6 @@ static const struct drm_info_list i915_debugfs_list[] = {
{"i915_gen6_forcewake_count", i915_gen6_forcewake_count_info, 0},
{"i915_swizzle_info", i915_swizzle_info, 0},
{"i915_ppgtt_info", i915_ppgtt_info, 0},
{"i915_dpio", i915_dpio_info, 0},
{"i915_llc", i915_llc, 0},
{"i915_edp_psr_status", i915_edp_psr_status, 0},
{"i915_sink_crc_eDP1", i915_sink_crc, 0},

View File

@ -44,6 +44,7 @@
#include <acpi/video.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
#include <linux/oom.h>
#define LP_RING(d) (&((struct drm_i915_private *)(d))->ring[RCS])
@ -63,7 +64,7 @@
* has access to the ring.
*/
#define RING_LOCK_TEST_WITH_RETURN(dev, file) do { \
if (LP_RING(dev->dev_private)->obj == NULL) \
if (LP_RING(dev->dev_private)->buffer->obj == NULL) \
LOCK_TEST_WITH_RETURN(dev, file); \
} while (0)
@ -119,7 +120,7 @@ static void i915_write_hws_pga(struct drm_device *dev)
static void i915_free_hws(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring = LP_RING(dev_priv);
struct intel_engine_cs *ring = LP_RING(dev_priv);
if (dev_priv->status_page_dmah) {
drm_pci_free(dev, dev_priv->status_page_dmah);
@ -139,7 +140,8 @@ void i915_kernel_lost_context(struct drm_device * dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv;
struct intel_ring_buffer *ring = LP_RING(dev_priv);
struct intel_engine_cs *ring = LP_RING(dev_priv);
struct intel_ringbuffer *ringbuf = ring->buffer;
/*
* We should never lose context on the ring with modesetting
@ -148,17 +150,17 @@ void i915_kernel_lost_context(struct drm_device * dev)
if (drm_core_check_feature(dev, DRIVER_MODESET))
return;
ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
ring->space = ring->head - (ring->tail + I915_RING_FREE_SPACE);
if (ring->space < 0)
ring->space += ring->size;
ringbuf->head = I915_READ_HEAD(ring) & HEAD_ADDR;
ringbuf->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
ringbuf->space = ringbuf->head - (ringbuf->tail + I915_RING_FREE_SPACE);
if (ringbuf->space < 0)
ringbuf->space += ringbuf->size;
if (!dev->primary->master)
return;
master_priv = dev->primary->master->driver_priv;
if (ring->head == ring->tail && master_priv->sarea_priv)
if (ringbuf->head == ringbuf->tail && master_priv->sarea_priv)
master_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY;
}
@ -201,7 +203,7 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
}
if (init->ring_size != 0) {
if (LP_RING(dev_priv)->obj != NULL) {
if (LP_RING(dev_priv)->buffer->obj != NULL) {
i915_dma_cleanup(dev);
DRM_ERROR("Client tried to initialize ringbuffer in "
"GEM mode\n");
@ -234,11 +236,11 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
static int i915_dma_resume(struct drm_device * dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring = LP_RING(dev_priv);
struct intel_engine_cs *ring = LP_RING(dev_priv);
DRM_DEBUG_DRIVER("%s\n", __func__);
if (ring->virtual_start == NULL) {
if (ring->buffer->virtual_start == NULL) {
DRM_ERROR("can not ioremap virtual address for"
" ring buffer\n");
return -ENOMEM;
@ -360,7 +362,7 @@ static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords)
struct drm_i915_private *dev_priv = dev->dev_private;
int i, ret;
if ((dwords+1) * sizeof(int) >= LP_RING(dev_priv)->size - 8)
if ((dwords+1) * sizeof(int) >= LP_RING(dev_priv)->buffer->size - 8)
return -EINVAL;
for (i = 0; i < dwords;) {
@ -782,7 +784,7 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
int ret = 0;
struct intel_ring_buffer *ring = LP_RING(dev_priv);
struct intel_engine_cs *ring = LP_RING(dev_priv);
DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr,
READ_BREADCRUMB(dev_priv));
@ -823,7 +825,7 @@ static int i915_irq_emit(struct drm_device *dev, void *data,
if (drm_core_check_feature(dev, DRIVER_MODESET))
return -ENODEV;
if (!dev_priv || !LP_RING(dev_priv)->virtual_start) {
if (!dev_priv || !LP_RING(dev_priv)->buffer->virtual_start) {
DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
@ -1017,6 +1019,9 @@ static int i915_getparam(struct drm_device *dev, void *data,
case I915_PARAM_HAS_EXEC_HANDLE_LUT:
value = 1;
break;
case I915_PARAM_CMD_PARSER_VERSION:
value = i915_cmd_parser_get_version();
break;
default:
DRM_DEBUG("Unknown parameter %d\n", param->param);
return -EINVAL;
@ -1070,7 +1075,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
{
struct drm_i915_private *dev_priv = dev->dev_private;
drm_i915_hws_addr_t *hws = data;
struct intel_ring_buffer *ring;
struct intel_engine_cs *ring;
if (drm_core_check_feature(dev, DRIVER_MODESET))
return -ENODEV;
@ -1277,12 +1282,13 @@ static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_
static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
bool can_switch;
spin_lock(&dev->count_lock);
can_switch = (dev->open_count == 0);
spin_unlock(&dev->count_lock);
return can_switch;
/*
* FIXME: open_count is protected by drm_global_mutex but that would lead to
* locking inversion with the driver load path. And the access here is
* completely racy anyway. So don't bother with locking for now.
*/
return dev->open_count == 0;
}
static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
@ -1326,7 +1332,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
intel_power_domains_init_hw(dev_priv);
ret = drm_irq_install(dev);
ret = drm_irq_install(dev, dev->pdev->irq);
if (ret)
goto cleanup_gem_stolen;
@ -1336,7 +1342,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
ret = i915_gem_init(dev);
if (ret)
goto cleanup_power;
goto cleanup_irq;
INIT_WORK(&dev_priv->console_resume_work, intel_console_resume);
@ -1345,10 +1351,8 @@ static int i915_load_modeset_init(struct drm_device *dev)
/* Always safe in the mode setting case. */
/* FIXME: do pre/post-mode set stuff in core KMS code */
dev->vblank_disable_allowed = true;
if (INTEL_INFO(dev)->num_pipes == 0) {
intel_display_power_put(dev_priv, POWER_DOMAIN_VGA);
if (INTEL_INFO(dev)->num_pipes == 0)
return 0;
}
ret = intel_fbdev_init(dev);
if (ret)
@ -1383,8 +1387,7 @@ cleanup_gem:
mutex_unlock(&dev->struct_mutex);
WARN_ON(dev_priv->mm.aliasing_ppgtt);
drm_mm_takedown(&dev_priv->gtt.base.mm);
cleanup_power:
intel_display_power_put(dev_priv, POWER_DOMAIN_VGA);
cleanup_irq:
drm_irq_uninstall(dev);
cleanup_gem_stolen:
i915_gem_cleanup_stolen(dev);
@ -1739,8 +1742,8 @@ out_power_well:
intel_power_domains_remove(dev_priv);
drm_vblank_cleanup(dev);
out_gem_unload:
if (dev_priv->mm.inactive_shrinker.scan_objects)
unregister_shrinker(&dev_priv->mm.inactive_shrinker);
WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier));
unregister_shrinker(&dev_priv->mm.shrinker);
if (dev->pdev->msi_enabled)
pci_disable_msi(dev->pdev);
@ -1791,8 +1794,8 @@ int i915_driver_unload(struct drm_device *dev)
i915_teardown_sysfs(dev);
if (dev_priv->mm.inactive_shrinker.scan_objects)
unregister_shrinker(&dev_priv->mm.inactive_shrinker);
WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier));
unregister_shrinker(&dev_priv->mm.shrinker);
io_mapping_free(dev_priv->gtt.mappable);
arch_phys_wc_del(dev_priv->gtt.mtrr);
@ -1864,7 +1867,7 @@ int i915_driver_unload(struct drm_device *dev)
kmem_cache_destroy(dev_priv->slab);
pci_dev_put(dev_priv->bridge_dev);
kfree(dev->dev_private);
kfree(dev_priv);
return 0;
}
@ -1925,6 +1928,8 @@ void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
{
struct drm_i915_file_private *file_priv = file->driver_priv;
if (file_priv && file_priv->bsd_ring)
file_priv->bsd_ring = NULL;
kfree(file_priv);
}
@ -1978,9 +1983,10 @@ const struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_get_reset_stats_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
};
int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
int i915_max_ioctl = ARRAY_SIZE(i915_ioctls);
/*
* This is really ugly: Because old userspace abused the linux agp interface to

Some files were not shown because too many files have changed in this diff Show More