Merge branch 'drm-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6
* 'drm-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6: drm/radeon: switch to using late_initcall radeon legacy chips: tv dac bg/dac adj updates drm/radeon: introduce kernel modesetting for radeon hardware drm: Add the TTM GPU memory manager subsystem. drm: Memory fragmentation from lost alignment blocks drm/radeon: fix mobility flags on new PCI IDs.
This commit is contained in:
commit
8d15b0ec32
|
@ -18,6 +18,14 @@ menuconfig DRM
|
|||
details. You should also select and configure AGP
|
||||
(/dev/agpgart) support.
|
||||
|
||||
config DRM_TTM
|
||||
tristate
|
||||
depends on DRM
|
||||
help
|
||||
GPU memory management subsystem for devices with multiple
|
||||
GPU memory types. Will be enabled automatically if a device driver
|
||||
uses it.
|
||||
|
||||
config DRM_TDFX
|
||||
tristate "3dfx Banshee/Voodoo3+"
|
||||
depends on DRM && PCI
|
||||
|
@ -36,6 +44,11 @@ config DRM_R128
|
|||
config DRM_RADEON
|
||||
tristate "ATI Radeon"
|
||||
depends on DRM && PCI
|
||||
select FB_CFB_FILLRECT
|
||||
select FB_CFB_COPYAREA
|
||||
select FB_CFB_IMAGEBLIT
|
||||
select FB
|
||||
select FRAMEBUFFER_CONSOLE if !EMBEDDED
|
||||
help
|
||||
Choose this option if you have an ATI Radeon graphics card. There
|
||||
are both PCI and AGP versions. You don't need to choose this to
|
||||
|
|
|
@ -26,4 +26,4 @@ obj-$(CONFIG_DRM_I915) += i915/
|
|||
obj-$(CONFIG_DRM_SIS) += sis/
|
||||
obj-$(CONFIG_DRM_SAVAGE)+= savage/
|
||||
obj-$(CONFIG_DRM_VIA) +=via/
|
||||
|
||||
obj-$(CONFIG_DRM_TTM) += ttm/
|
||||
|
|
|
@ -188,36 +188,34 @@ static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent,
|
|||
|
||||
|
||||
|
||||
struct drm_mm_node *drm_mm_get_block(struct drm_mm_node * parent,
|
||||
struct drm_mm_node *drm_mm_get_block(struct drm_mm_node *node,
|
||||
unsigned long size, unsigned alignment)
|
||||
{
|
||||
|
||||
struct drm_mm_node *align_splitoff = NULL;
|
||||
struct drm_mm_node *child;
|
||||
unsigned tmp = 0;
|
||||
|
||||
if (alignment)
|
||||
tmp = parent->start % alignment;
|
||||
tmp = node->start % alignment;
|
||||
|
||||
if (tmp) {
|
||||
align_splitoff =
|
||||
drm_mm_split_at_start(parent, alignment - tmp, 0);
|
||||
drm_mm_split_at_start(node, alignment - tmp, 0);
|
||||
if (unlikely(align_splitoff == NULL))
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (parent->size == size) {
|
||||
list_del_init(&parent->fl_entry);
|
||||
parent->free = 0;
|
||||
return parent;
|
||||
if (node->size == size) {
|
||||
list_del_init(&node->fl_entry);
|
||||
node->free = 0;
|
||||
} else {
|
||||
child = drm_mm_split_at_start(parent, size, 0);
|
||||
node = drm_mm_split_at_start(node, size, 0);
|
||||
}
|
||||
|
||||
if (align_splitoff)
|
||||
drm_mm_put_block(align_splitoff);
|
||||
|
||||
return child;
|
||||
return node;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(drm_mm_get_block);
|
||||
|
|
|
@ -0,0 +1,34 @@
|
|||
config DRM_RADEON_KMS
|
||||
bool "Enable modesetting on radeon by default"
|
||||
depends on DRM_RADEON
|
||||
select DRM_TTM
|
||||
help
|
||||
Choose this option if you want kernel modesetting enabled by default,
|
||||
and you have a new enough userspace to support this. Running old
|
||||
userspaces with this enabled will cause pain.
|
||||
|
||||
When kernel modesetting is enabled the IOCTL of radeon/drm
|
||||
driver are considered as invalid and an error message is printed
|
||||
in the log and they return failure.
|
||||
|
||||
KMS enabled userspace will use new API to talk with the radeon/drm
|
||||
driver. The new API provide functions to create/destroy/share/mmap
|
||||
buffer object which are then managed by the kernel memory manager
|
||||
(here TTM). In order to submit command to the GPU the userspace
|
||||
provide a buffer holding the command stream, along this buffer
|
||||
userspace have to provide a list of buffer object used by the
|
||||
command stream. The kernel radeon driver will then place buffer
|
||||
in GPU accessible memory and will update command stream to reflect
|
||||
the position of the different buffers.
|
||||
|
||||
The kernel will also perform security check on command stream
|
||||
provided by the user, we want to catch and forbid any illegal use
|
||||
of the GPU such as DMA into random system memory or into memory
|
||||
not owned by the process supplying the command stream. This part
|
||||
of the code is still incomplete and this why we propose that patch
|
||||
as a staging driver addition, future security might forbid current
|
||||
experimental userspace to run.
|
||||
|
||||
This code support the following hardware : R1XX,R2XX,R3XX,R4XX,R5XX
|
||||
(radeon up to X1950). Works is underway to provide support for R6XX,
|
||||
R7XX and newer hardware (radeon from HD2XXX to HD4XXX).
|
|
@ -3,7 +3,17 @@
|
|||
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
|
||||
|
||||
ccflags-y := -Iinclude/drm
|
||||
radeon-y := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o radeon_irq.o r300_cmdbuf.o r600_cp.o
|
||||
radeon-y := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o \
|
||||
radeon_irq.o r300_cmdbuf.o r600_cp.o
|
||||
|
||||
radeon-$(CONFIG_DRM_RADEON_KMS) += radeon_device.o radeon_kms.o \
|
||||
radeon_atombios.o radeon_agp.o atombios_crtc.o radeon_combios.o \
|
||||
atom.o radeon_fence.o radeon_ttm.o radeon_object.o radeon_gart.o \
|
||||
radeon_legacy_crtc.o radeon_legacy_encoders.o radeon_connectors.o \
|
||||
radeon_encoders.o radeon_display.o radeon_cursor.o radeon_i2c.o \
|
||||
radeon_clocks.o radeon_fb.o radeon_gem.o radeon_ring.o radeon_irq_kms.o \
|
||||
radeon_cs.o radeon_bios.o radeon_benchmark.o r100.o r300.o r420.o \
|
||||
rs400.o rs600.o rs690.o rv515.o r520.o r600.o rs780.o rv770.o
|
||||
|
||||
radeon-$(CONFIG_COMPAT) += radeon_ioc32.o
|
||||
|
||||
|
|
|
@ -0,0 +1,578 @@
|
|||
/*
|
||||
* Copyright 2006-2007 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
/* based on stg/asic_reg/drivers/inc/asic_reg/ObjectID.h ver 23 */
|
||||
|
||||
#ifndef _OBJECTID_H
|
||||
#define _OBJECTID_H
|
||||
|
||||
#if defined(_X86_)
|
||||
#pragma pack(1)
|
||||
#endif
|
||||
|
||||
/****************************************************/
|
||||
/* Graphics Object Type Definition */
|
||||
/****************************************************/
|
||||
#define GRAPH_OBJECT_TYPE_NONE 0x0
|
||||
#define GRAPH_OBJECT_TYPE_GPU 0x1
|
||||
#define GRAPH_OBJECT_TYPE_ENCODER 0x2
|
||||
#define GRAPH_OBJECT_TYPE_CONNECTOR 0x3
|
||||
#define GRAPH_OBJECT_TYPE_ROUTER 0x4
|
||||
/* deleted */
|
||||
|
||||
/****************************************************/
|
||||
/* Encoder Object ID Definition */
|
||||
/****************************************************/
|
||||
#define ENCODER_OBJECT_ID_NONE 0x00
|
||||
|
||||
/* Radeon Class Display Hardware */
|
||||
#define ENCODER_OBJECT_ID_INTERNAL_LVDS 0x01
|
||||
#define ENCODER_OBJECT_ID_INTERNAL_TMDS1 0x02
|
||||
#define ENCODER_OBJECT_ID_INTERNAL_TMDS2 0x03
|
||||
#define ENCODER_OBJECT_ID_INTERNAL_DAC1 0x04
|
||||
#define ENCODER_OBJECT_ID_INTERNAL_DAC2 0x05 /* TV/CV DAC */
|
||||
#define ENCODER_OBJECT_ID_INTERNAL_SDVOA 0x06
|
||||
#define ENCODER_OBJECT_ID_INTERNAL_SDVOB 0x07
|
||||
|
||||
/* External Third Party Encoders */
|
||||
#define ENCODER_OBJECT_ID_SI170B 0x08
|
||||
#define ENCODER_OBJECT_ID_CH7303 0x09
|
||||
#define ENCODER_OBJECT_ID_CH7301 0x0A
|
||||
#define ENCODER_OBJECT_ID_INTERNAL_DVO1 0x0B /* This belongs to Radeon Class Display Hardware */
|
||||
#define ENCODER_OBJECT_ID_EXTERNAL_SDVOA 0x0C
|
||||
#define ENCODER_OBJECT_ID_EXTERNAL_SDVOB 0x0D
|
||||
#define ENCODER_OBJECT_ID_TITFP513 0x0E
|
||||
#define ENCODER_OBJECT_ID_INTERNAL_LVTM1 0x0F /* not used for Radeon */
|
||||
#define ENCODER_OBJECT_ID_VT1623 0x10
|
||||
#define ENCODER_OBJECT_ID_HDMI_SI1930 0x11
|
||||
#define ENCODER_OBJECT_ID_HDMI_INTERNAL 0x12
|
||||
/* Kaleidoscope (KLDSCP) Class Display Hardware (internal) */
|
||||
#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1 0x13
|
||||
#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1 0x14
|
||||
#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1 0x15
|
||||
#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2 0x16 /* Shared with CV/TV and CRT */
|
||||
#define ENCODER_OBJECT_ID_SI178 0X17 /* External TMDS (dual link, no HDCP.) */
|
||||
#define ENCODER_OBJECT_ID_MVPU_FPGA 0x18 /* MVPU FPGA chip */
|
||||
#define ENCODER_OBJECT_ID_INTERNAL_DDI 0x19
|
||||
#define ENCODER_OBJECT_ID_VT1625 0x1A
|
||||
#define ENCODER_OBJECT_ID_HDMI_SI1932 0x1B
|
||||
#define ENCODER_OBJECT_ID_DP_AN9801 0x1C
|
||||
#define ENCODER_OBJECT_ID_DP_DP501 0x1D
|
||||
#define ENCODER_OBJECT_ID_INTERNAL_UNIPHY 0x1E
|
||||
#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA 0x1F
|
||||
#define ENCODER_OBJECT_ID_INTERNAL_UNIPHY1 0x20
|
||||
#define ENCODER_OBJECT_ID_INTERNAL_UNIPHY2 0x21
|
||||
|
||||
#define ENCODER_OBJECT_ID_GENERAL_EXTERNAL_DVO 0xFF
|
||||
|
||||
/****************************************************/
|
||||
/* Connector Object ID Definition */
|
||||
/****************************************************/
|
||||
#define CONNECTOR_OBJECT_ID_NONE 0x00
|
||||
#define CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I 0x01
|
||||
#define CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I 0x02
|
||||
#define CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D 0x03
|
||||
#define CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D 0x04
|
||||
#define CONNECTOR_OBJECT_ID_VGA 0x05
|
||||
#define CONNECTOR_OBJECT_ID_COMPOSITE 0x06
|
||||
#define CONNECTOR_OBJECT_ID_SVIDEO 0x07
|
||||
#define CONNECTOR_OBJECT_ID_YPbPr 0x08
|
||||
#define CONNECTOR_OBJECT_ID_D_CONNECTOR 0x09
|
||||
#define CONNECTOR_OBJECT_ID_9PIN_DIN 0x0A /* Supports both CV & TV */
|
||||
#define CONNECTOR_OBJECT_ID_SCART 0x0B
|
||||
#define CONNECTOR_OBJECT_ID_HDMI_TYPE_A 0x0C
|
||||
#define CONNECTOR_OBJECT_ID_HDMI_TYPE_B 0x0D
|
||||
#define CONNECTOR_OBJECT_ID_LVDS 0x0E
|
||||
#define CONNECTOR_OBJECT_ID_7PIN_DIN 0x0F
|
||||
#define CONNECTOR_OBJECT_ID_PCIE_CONNECTOR 0x10
|
||||
#define CONNECTOR_OBJECT_ID_CROSSFIRE 0x11
|
||||
#define CONNECTOR_OBJECT_ID_HARDCODE_DVI 0x12
|
||||
#define CONNECTOR_OBJECT_ID_DISPLAYPORT 0x13
|
||||
|
||||
/* deleted */
|
||||
|
||||
/****************************************************/
|
||||
/* Router Object ID Definition */
|
||||
/****************************************************/
|
||||
#define ROUTER_OBJECT_ID_NONE 0x00
|
||||
#define ROUTER_OBJECT_ID_I2C_EXTENDER_CNTL 0x01
|
||||
|
||||
/****************************************************/
|
||||
/* Graphics Object ENUM ID Definition */
|
||||
/****************************************************/
|
||||
#define GRAPH_OBJECT_ENUM_ID1 0x01
|
||||
#define GRAPH_OBJECT_ENUM_ID2 0x02
|
||||
#define GRAPH_OBJECT_ENUM_ID3 0x03
|
||||
#define GRAPH_OBJECT_ENUM_ID4 0x04
|
||||
#define GRAPH_OBJECT_ENUM_ID5 0x05
|
||||
#define GRAPH_OBJECT_ENUM_ID6 0x06
|
||||
|
||||
/****************************************************/
|
||||
/* Graphics Object ID Bit definition */
|
||||
/****************************************************/
|
||||
#define OBJECT_ID_MASK 0x00FF
|
||||
#define ENUM_ID_MASK 0x0700
|
||||
#define RESERVED1_ID_MASK 0x0800
|
||||
#define OBJECT_TYPE_MASK 0x7000
|
||||
#define RESERVED2_ID_MASK 0x8000
|
||||
|
||||
#define OBJECT_ID_SHIFT 0x00
|
||||
#define ENUM_ID_SHIFT 0x08
|
||||
#define OBJECT_TYPE_SHIFT 0x0C
|
||||
|
||||
/****************************************************/
|
||||
/* Graphics Object family definition */
|
||||
/****************************************************/
|
||||
#define CONSTRUCTOBJECTFAMILYID(GRAPHICS_OBJECT_TYPE, GRAPHICS_OBJECT_ID) \
|
||||
(GRAPHICS_OBJECT_TYPE << OBJECT_TYPE_SHIFT | \
|
||||
GRAPHICS_OBJECT_ID << OBJECT_ID_SHIFT)
|
||||
/****************************************************/
|
||||
/* GPU Object ID definition - Shared with BIOS */
|
||||
/****************************************************/
|
||||
#define GPU_ENUM_ID1 (GRAPH_OBJECT_TYPE_GPU << OBJECT_TYPE_SHIFT |\
|
||||
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT)
|
||||
|
||||
/****************************************************/
|
||||
/* Encoder Object ID definition - Shared with BIOS */
|
||||
/****************************************************/
|
||||
/*
|
||||
#define ENCODER_INTERNAL_LVDS_ENUM_ID1 0x2101
|
||||
#define ENCODER_INTERNAL_TMDS1_ENUM_ID1 0x2102
|
||||
#define ENCODER_INTERNAL_TMDS2_ENUM_ID1 0x2103
|
||||
#define ENCODER_INTERNAL_DAC1_ENUM_ID1 0x2104
|
||||
#define ENCODER_INTERNAL_DAC2_ENUM_ID1 0x2105
|
||||
#define ENCODER_INTERNAL_SDVOA_ENUM_ID1 0x2106
|
||||
#define ENCODER_INTERNAL_SDVOB_ENUM_ID1 0x2107
|
||||
#define ENCODER_SIL170B_ENUM_ID1 0x2108
|
||||
#define ENCODER_CH7303_ENUM_ID1 0x2109
|
||||
#define ENCODER_CH7301_ENUM_ID1 0x210A
|
||||
#define ENCODER_INTERNAL_DVO1_ENUM_ID1 0x210B
|
||||
#define ENCODER_EXTERNAL_SDVOA_ENUM_ID1 0x210C
|
||||
#define ENCODER_EXTERNAL_SDVOB_ENUM_ID1 0x210D
|
||||
#define ENCODER_TITFP513_ENUM_ID1 0x210E
|
||||
#define ENCODER_INTERNAL_LVTM1_ENUM_ID1 0x210F
|
||||
#define ENCODER_VT1623_ENUM_ID1 0x2110
|
||||
#define ENCODER_HDMI_SI1930_ENUM_ID1 0x2111
|
||||
#define ENCODER_HDMI_INTERNAL_ENUM_ID1 0x2112
|
||||
#define ENCODER_INTERNAL_KLDSCP_TMDS1_ENUM_ID1 0x2113
|
||||
#define ENCODER_INTERNAL_KLDSCP_DVO1_ENUM_ID1 0x2114
|
||||
#define ENCODER_INTERNAL_KLDSCP_DAC1_ENUM_ID1 0x2115
|
||||
#define ENCODER_INTERNAL_KLDSCP_DAC2_ENUM_ID1 0x2116
|
||||
#define ENCODER_SI178_ENUM_ID1 0x2117
|
||||
#define ENCODER_MVPU_FPGA_ENUM_ID1 0x2118
|
||||
#define ENCODER_INTERNAL_DDI_ENUM_ID1 0x2119
|
||||
#define ENCODER_VT1625_ENUM_ID1 0x211A
|
||||
#define ENCODER_HDMI_SI1932_ENUM_ID1 0x211B
|
||||
#define ENCODER_ENCODER_DP_AN9801_ENUM_ID1 0x211C
|
||||
#define ENCODER_DP_DP501_ENUM_ID1 0x211D
|
||||
#define ENCODER_INTERNAL_UNIPHY_ENUM_ID1 0x211E
|
||||
*/
|
||||
#define ENCODER_INTERNAL_LVDS_ENUM_ID1 \
|
||||
(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
|
||||
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
|
||||
ENCODER_OBJECT_ID_INTERNAL_LVDS << OBJECT_ID_SHIFT)
|
||||
|
||||
#define ENCODER_INTERNAL_TMDS1_ENUM_ID1 \
|
||||
(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
|
||||
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
|
||||
ENCODER_OBJECT_ID_INTERNAL_TMDS1 << OBJECT_ID_SHIFT)
|
||||
|
||||
#define ENCODER_INTERNAL_TMDS2_ENUM_ID1 \
|
||||
(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
|
||||
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
|
||||
ENCODER_OBJECT_ID_INTERNAL_TMDS2 << OBJECT_ID_SHIFT)
|
||||
|
||||
#define ENCODER_INTERNAL_DAC1_ENUM_ID1 \
|
||||
(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
|
||||
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
|
||||
ENCODER_OBJECT_ID_INTERNAL_DAC1 << OBJECT_ID_SHIFT)
|
||||
|
||||
#define ENCODER_INTERNAL_DAC2_ENUM_ID1 \
|
||||
(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
|
||||
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
|
||||
ENCODER_OBJECT_ID_INTERNAL_DAC2 << OBJECT_ID_SHIFT)
|
||||
|
||||
#define ENCODER_INTERNAL_SDVOA_ENUM_ID1 \
|
||||
(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
|
||||
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
|
||||
ENCODER_OBJECT_ID_INTERNAL_SDVOA << OBJECT_ID_SHIFT)
|
||||
|
||||
#define ENCODER_INTERNAL_SDVOA_ENUM_ID2 \
|
||||
(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
|
||||
GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
|
||||
ENCODER_OBJECT_ID_INTERNAL_SDVOA << OBJECT_ID_SHIFT)
|
||||
|
||||
#define ENCODER_INTERNAL_SDVOB_ENUM_ID1 \
|
||||
(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
|
||||
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
|
||||
ENCODER_OBJECT_ID_INTERNAL_SDVOB << OBJECT_ID_SHIFT)
|
||||
|
||||
#define ENCODER_SIL170B_ENUM_ID1 \
|
||||
(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
|
||||
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
|
||||
ENCODER_OBJECT_ID_SI170B << OBJECT_ID_SHIFT)
|
||||
|
||||
#define ENCODER_CH7303_ENUM_ID1 \
|
||||
(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
|
||||
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
|
||||
ENCODER_OBJECT_ID_CH7303 << OBJECT_ID_SHIFT)
|
||||
|
||||
#define ENCODER_CH7301_ENUM_ID1 \
|
||||
(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
|
||||
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
|
||||
ENCODER_OBJECT_ID_CH7301 << OBJECT_ID_SHIFT)
|
||||
|
||||
#define ENCODER_INTERNAL_DVO1_ENUM_ID1 \
|
||||
(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
|
||||
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
|
||||
ENCODER_OBJECT_ID_INTERNAL_DVO1 << OBJECT_ID_SHIFT)
|
||||
|
||||
#define ENCODER_EXTERNAL_SDVOA_ENUM_ID1 \
|
||||
(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
|
||||
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
|
||||
ENCODER_OBJECT_ID_EXTERNAL_SDVOA << OBJECT_ID_SHIFT)
|
||||
|
||||
#define ENCODER_EXTERNAL_SDVOA_ENUM_ID2 \
|
||||
(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
|
||||
GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
|
||||
ENCODER_OBJECT_ID_EXTERNAL_SDVOA << OBJECT_ID_SHIFT)
|
||||
|
||||
#define ENCODER_EXTERNAL_SDVOB_ENUM_ID1 \
|
||||
(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
|
||||
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
|
||||
ENCODER_OBJECT_ID_EXTERNAL_SDVOB << OBJECT_ID_SHIFT)
|
||||
|
||||
#define ENCODER_TITFP513_ENUM_ID1 \
|
||||
(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
|
||||
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
|
||||
ENCODER_OBJECT_ID_TITFP513 << OBJECT_ID_SHIFT)
|
||||
|
||||
#define ENCODER_INTERNAL_LVTM1_ENUM_ID1 \
|
||||
(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
|
||||
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
|
||||
ENCODER_OBJECT_ID_INTERNAL_LVTM1 << OBJECT_ID_SHIFT)
|
||||
|
||||
#define ENCODER_VT1623_ENUM_ID1 \
|
||||
(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
|
||||
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
|
||||
ENCODER_OBJECT_ID_VT1623 << OBJECT_ID_SHIFT)
|
||||
|
||||
#define ENCODER_HDMI_SI1930_ENUM_ID1 \
|
||||
(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
|
||||
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
|
||||
ENCODER_OBJECT_ID_HDMI_SI1930 << OBJECT_ID_SHIFT)
|
||||
|
||||
#define ENCODER_HDMI_INTERNAL_ENUM_ID1 \
|
||||
(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
|
||||
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
|
||||
ENCODER_OBJECT_ID_HDMI_INTERNAL << OBJECT_ID_SHIFT)
|
||||
|
||||
#define ENCODER_INTERNAL_KLDSCP_TMDS1_ENUM_ID1 \
|
||||
(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
|
||||
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
|
||||
ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1 << OBJECT_ID_SHIFT)
|
||||
|
||||
#define ENCODER_INTERNAL_KLDSCP_TMDS1_ENUM_ID2 \
|
||||
(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
|
||||
GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
|
||||
ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1 << OBJECT_ID_SHIFT)
|
||||
|
||||
#define ENCODER_INTERNAL_KLDSCP_DVO1_ENUM_ID1 \
|
||||
(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
|
||||
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
|
||||
ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1 << OBJECT_ID_SHIFT)
|
||||
|
||||
#define ENCODER_INTERNAL_KLDSCP_DAC1_ENUM_ID1 \
|
||||
(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
|
||||
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
|
||||
ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1 << OBJECT_ID_SHIFT)
|
||||
|
||||
#define ENCODER_INTERNAL_KLDSCP_DAC2_ENUM_ID1 \
|
||||
(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
|
||||
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
|
||||
ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2 << OBJECT_ID_SHIFT) /* Shared with CV/TV and CRT */
|
||||
|
||||
#define ENCODER_SI178_ENUM_ID1 \
|
||||
(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
|
||||
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
|
||||
ENCODER_OBJECT_ID_SI178 << OBJECT_ID_SHIFT)
|
||||
|
||||
#define ENCODER_MVPU_FPGA_ENUM_ID1 \
|
||||
(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
|
||||
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
|
||||
ENCODER_OBJECT_ID_MVPU_FPGA << OBJECT_ID_SHIFT)
|
||||
|
||||
#define ENCODER_INTERNAL_DDI_ENUM_ID1 \
|
||||
(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
|
||||
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
|
||||
ENCODER_OBJECT_ID_INTERNAL_DDI << OBJECT_ID_SHIFT)
|
||||
|
||||
#define ENCODER_VT1625_ENUM_ID1 \
|
||||
(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
|
||||
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
|
||||
ENCODER_OBJECT_ID_VT1625 << OBJECT_ID_SHIFT)
|
||||
|
||||
#define ENCODER_HDMI_SI1932_ENUM_ID1 \
|
||||
(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
|
||||
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
|
||||
ENCODER_OBJECT_ID_HDMI_SI1932 << OBJECT_ID_SHIFT)
|
||||
|
||||
#define ENCODER_DP_DP501_ENUM_ID1 \
|
||||
(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
|
||||
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
|
||||
ENCODER_OBJECT_ID_DP_DP501 << OBJECT_ID_SHIFT)
|
||||
|
||||
#define ENCODER_DP_AN9801_ENUM_ID1 \
|
||||
(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
|
||||
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
|
||||
ENCODER_OBJECT_ID_DP_AN9801 << OBJECT_ID_SHIFT)
|
||||
|
||||
#define ENCODER_INTERNAL_UNIPHY_ENUM_ID1 \
|
||||
(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
|
||||
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
|
||||
ENCODER_OBJECT_ID_INTERNAL_UNIPHY << OBJECT_ID_SHIFT)
|
||||
|
||||
#define ENCODER_INTERNAL_UNIPHY_ENUM_ID2 \
|
||||
(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
|
||||
GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
|
||||
ENCODER_OBJECT_ID_INTERNAL_UNIPHY << OBJECT_ID_SHIFT)
|
||||
|
||||
#define ENCODER_INTERNAL_KLDSCP_LVTMA_ENUM_ID1 \
|
||||
(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
|
||||
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
|
||||
ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA << OBJECT_ID_SHIFT)
|
||||
|
||||
#define ENCODER_INTERNAL_UNIPHY1_ENUM_ID1 \
|
||||
(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
|
||||
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
|
||||
ENCODER_OBJECT_ID_INTERNAL_UNIPHY1 << OBJECT_ID_SHIFT)
|
||||
|
||||
#define ENCODER_INTERNAL_UNIPHY1_ENUM_ID2 \
|
||||
(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
|
||||
GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
|
||||
ENCODER_OBJECT_ID_INTERNAL_UNIPHY1 << OBJECT_ID_SHIFT)
|
||||
|
||||
#define ENCODER_INTERNAL_UNIPHY2_ENUM_ID1 \
|
||||
(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
|
||||
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
|
||||
ENCODER_OBJECT_ID_INTERNAL_UNIPHY2 << OBJECT_ID_SHIFT)
|
||||
|
||||
#define ENCODER_INTERNAL_UNIPHY2_ENUM_ID2 \
|
||||
(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
|
||||
GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
|
||||
ENCODER_OBJECT_ID_INTERNAL_UNIPHY2 << OBJECT_ID_SHIFT)
|
||||
|
||||
#define ENCODER_GENERAL_EXTERNAL_DVO_ENUM_ID1 \
|
||||
(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
|
||||
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
|
||||
ENCODER_OBJECT_ID_GENERAL_EXTERNAL_DVO << OBJECT_ID_SHIFT)
|
||||
|
||||
/****************************************************/
|
||||
/* Connector Object ID definition - Shared with BIOS */
|
||||
/****************************************************/
|
||||
/*
|
||||
#define CONNECTOR_SINGLE_LINK_DVI_I_ENUM_ID1 0x3101
|
||||
#define CONNECTOR_DUAL_LINK_DVI_I_ENUM_ID1 0x3102
|
||||
#define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID1 0x3103
|
||||
#define CONNECTOR_DUAL_LINK_DVI_D_ENUM_ID1 0x3104
|
||||
#define CONNECTOR_VGA_ENUM_ID1 0x3105
|
||||
#define CONNECTOR_COMPOSITE_ENUM_ID1 0x3106
|
||||
#define CONNECTOR_SVIDEO_ENUM_ID1 0x3107
|
||||
#define CONNECTOR_YPbPr_ENUM_ID1 0x3108
|
||||
#define CONNECTOR_D_CONNECTORE_ENUM_ID1 0x3109
|
||||
#define CONNECTOR_9PIN_DIN_ENUM_ID1 0x310A
|
||||
#define CONNECTOR_SCART_ENUM_ID1 0x310B
|
||||
#define CONNECTOR_HDMI_TYPE_A_ENUM_ID1 0x310C
|
||||
#define CONNECTOR_HDMI_TYPE_B_ENUM_ID1 0x310D
|
||||
#define CONNECTOR_LVDS_ENUM_ID1 0x310E
|
||||
#define CONNECTOR_7PIN_DIN_ENUM_ID1 0x310F
|
||||
#define CONNECTOR_PCIE_CONNECTOR_ENUM_ID1 0x3110
|
||||
*/
|
||||
#define CONNECTOR_LVDS_ENUM_ID1 \
|
||||
(GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
|
||||
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
|
||||
CONNECTOR_OBJECT_ID_LVDS << OBJECT_ID_SHIFT)
|
||||
|
||||
#define CONNECTOR_SINGLE_LINK_DVI_I_ENUM_ID1 \
|
||||
(GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
|
||||
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
|
||||
CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I << OBJECT_ID_SHIFT)
|
||||
|
||||
#define CONNECTOR_SINGLE_LINK_DVI_I_ENUM_ID2 \
|
||||
(GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
|
||||
GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
|
||||
CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I << OBJECT_ID_SHIFT)
|
||||
|
||||
#define CONNECTOR_DUAL_LINK_DVI_I_ENUM_ID1 \
|
||||
(GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
|
||||
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
|
||||
CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I << OBJECT_ID_SHIFT)
|
||||
|
||||
#define CONNECTOR_DUAL_LINK_DVI_I_ENUM_ID2 \
|
||||
(GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
|
||||
GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
|
||||
CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I << OBJECT_ID_SHIFT)
|
||||
|
||||
#define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID1 \
|
||||
(GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
|
||||
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
|
||||
CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT)
|
||||
|
||||
#define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID2 \
|
||||
(GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
|
||||
GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
|
||||
CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT)
|
||||
|
||||
#define CONNECTOR_DUAL_LINK_DVI_D_ENUM_ID1 \
|
||||
(GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
|
||||
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
|
||||
CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D << OBJECT_ID_SHIFT)
|
||||
|
||||
#define CONNECTOR_VGA_ENUM_ID1 \
|
||||
(GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
|
||||
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
|
||||
CONNECTOR_OBJECT_ID_VGA << OBJECT_ID_SHIFT)
|
||||
|
||||
#define CONNECTOR_VGA_ENUM_ID2 \
|
||||
(GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
|
||||
GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
|
||||
CONNECTOR_OBJECT_ID_VGA << OBJECT_ID_SHIFT)
|
||||
|
||||
#define CONNECTOR_COMPOSITE_ENUM_ID1 \
|
||||
(GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
|
||||
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
|
||||
CONNECTOR_OBJECT_ID_COMPOSITE << OBJECT_ID_SHIFT)
|
||||
|
||||
#define CONNECTOR_SVIDEO_ENUM_ID1 \
|
||||
(GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
|
||||
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
|
||||
CONNECTOR_OBJECT_ID_SVIDEO << OBJECT_ID_SHIFT)
|
||||
|
||||
#define CONNECTOR_YPbPr_ENUM_ID1 \
|
||||
(GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
|
||||
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
|
||||
CONNECTOR_OBJECT_ID_YPbPr << OBJECT_ID_SHIFT)
|
||||
|
||||
#define CONNECTOR_D_CONNECTOR_ENUM_ID1 \
|
||||
(GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
|
||||
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
|
||||
CONNECTOR_OBJECT_ID_D_CONNECTOR << OBJECT_ID_SHIFT)
|
||||
|
||||
#define CONNECTOR_9PIN_DIN_ENUM_ID1 \
|
||||
(GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
|
||||
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
|
||||
CONNECTOR_OBJECT_ID_9PIN_DIN << OBJECT_ID_SHIFT)
|
||||
|
||||
#define CONNECTOR_SCART_ENUM_ID1 \
|
||||
(GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
|
||||
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
|
||||
CONNECTOR_OBJECT_ID_SCART << OBJECT_ID_SHIFT)
|
||||
|
||||
#define CONNECTOR_HDMI_TYPE_A_ENUM_ID1 \
|
||||
(GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
|
||||
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
|
||||
CONNECTOR_OBJECT_ID_HDMI_TYPE_A << OBJECT_ID_SHIFT)
|
||||
|
||||
#define CONNECTOR_HDMI_TYPE_B_ENUM_ID1 \
|
||||
(GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
|
||||
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
|
||||
CONNECTOR_OBJECT_ID_HDMI_TYPE_B << OBJECT_ID_SHIFT)
|
||||
|
||||
#define CONNECTOR_7PIN_DIN_ENUM_ID1 \
|
||||
(GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
|
||||
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
|
||||
CONNECTOR_OBJECT_ID_7PIN_DIN << OBJECT_ID_SHIFT)
|
||||
|
||||
#define CONNECTOR_PCIE_CONNECTOR_ENUM_ID1 \
|
||||
(GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
|
||||
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
|
||||
CONNECTOR_OBJECT_ID_PCIE_CONNECTOR << OBJECT_ID_SHIFT)
|
||||
|
||||
#define CONNECTOR_PCIE_CONNECTOR_ENUM_ID2 \
|
||||
(GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
|
||||
GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
|
||||
CONNECTOR_OBJECT_ID_PCIE_CONNECTOR << OBJECT_ID_SHIFT)
|
||||
|
||||
#define CONNECTOR_CROSSFIRE_ENUM_ID1 \
|
||||
(GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
|
||||
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
|
||||
CONNECTOR_OBJECT_ID_CROSSFIRE << OBJECT_ID_SHIFT)
|
||||
|
||||
#define CONNECTOR_CROSSFIRE_ENUM_ID2 \
|
||||
(GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
|
||||
GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
|
||||
CONNECTOR_OBJECT_ID_CROSSFIRE << OBJECT_ID_SHIFT)
|
||||
|
||||
#define CONNECTOR_HARDCODE_DVI_ENUM_ID1 \
|
||||
(GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
|
||||
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
|
||||
CONNECTOR_OBJECT_ID_HARDCODE_DVI << OBJECT_ID_SHIFT)
|
||||
|
||||
#define CONNECTOR_HARDCODE_DVI_ENUM_ID2 \
|
||||
(GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
|
||||
GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
|
||||
CONNECTOR_OBJECT_ID_HARDCODE_DVI << OBJECT_ID_SHIFT)
|
||||
|
||||
#define CONNECTOR_DISPLAYPORT_ENUM_ID1 \
|
||||
(GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
|
||||
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
|
||||
CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
|
||||
|
||||
#define CONNECTOR_DISPLAYPORT_ENUM_ID2 \
|
||||
(GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
|
||||
GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
|
||||
CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
|
||||
|
||||
#define CONNECTOR_DISPLAYPORT_ENUM_ID3 \
|
||||
(GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
|
||||
GRAPH_OBJECT_ENUM_ID3 << ENUM_ID_SHIFT |\
|
||||
CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
|
||||
|
||||
#define CONNECTOR_DISPLAYPORT_ENUM_ID4 \
|
||||
(GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
|
||||
GRAPH_OBJECT_ENUM_ID4 << ENUM_ID_SHIFT |\
|
||||
CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
|
||||
|
||||
/****************************************************/
|
||||
/* Router Object ID definition - Shared with BIOS */
|
||||
/****************************************************/
|
||||
#define ROUTER_I2C_EXTENDER_CNTL_ENUM_ID1 \
|
||||
(GRAPH_OBJECT_TYPE_ROUTER << OBJECT_TYPE_SHIFT |\
|
||||
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
|
||||
ROUTER_OBJECT_ID_I2C_EXTENDER_CNTL << OBJECT_ID_SHIFT)
|
||||
|
||||
/* deleted */
|
||||
|
||||
/****************************************************/
|
||||
/* Object Cap definition - Shared with BIOS */
|
||||
/****************************************************/
|
||||
#define GRAPHICS_OBJECT_CAP_I2C 0x00000001L
|
||||
#define GRAPHICS_OBJECT_CAP_TABLE_ID 0x00000002L
|
||||
|
||||
#define GRAPHICS_OBJECT_I2CCOMMAND_TABLE_ID 0x01
|
||||
#define GRAPHICS_OBJECT_HOTPLUGDETECTIONINTERUPT_TABLE_ID 0x02
|
||||
#define GRAPHICS_OBJECT_ENCODER_OUTPUT_PROTECTION_TABLE_ID 0x03
|
||||
|
||||
#if defined(_X86_)
|
||||
#pragma pack()
|
||||
#endif
|
||||
|
||||
#endif /*GRAPHICTYPE */
|
|
@ -0,0 +1,48 @@
|
|||
/*
|
||||
* Copyright 2008 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Author: Stanislaw Skowronek
|
||||
*/
|
||||
|
||||
#ifndef ATOM_BITS_H
|
||||
#define ATOM_BITS_H
|
||||
|
||||
static inline uint8_t get_u8(void *bios, int ptr)
|
||||
{
|
||||
return ((unsigned char *)bios)[ptr];
|
||||
}
|
||||
#define U8(ptr) get_u8(ctx->ctx->bios, (ptr))
|
||||
#define CU8(ptr) get_u8(ctx->bios, (ptr))
|
||||
static inline uint16_t get_u16(void *bios, int ptr)
|
||||
{
|
||||
return get_u8(bios ,ptr)|(((uint16_t)get_u8(bios, ptr+1))<<8);
|
||||
}
|
||||
#define U16(ptr) get_u16(ctx->ctx->bios, (ptr))
|
||||
#define CU16(ptr) get_u16(ctx->bios, (ptr))
|
||||
static inline uint32_t get_u32(void *bios, int ptr)
|
||||
{
|
||||
return get_u16(bios, ptr)|(((uint32_t)get_u16(bios, ptr+2))<<16);
|
||||
}
|
||||
#define U32(ptr) get_u32(ctx->ctx->bios, (ptr))
|
||||
#define CU32(ptr) get_u32(ctx->bios, (ptr))
|
||||
#define CSTR(ptr) (((char *)(ctx->bios))+(ptr))
|
||||
|
||||
#endif
|
|
@ -0,0 +1,100 @@
|
|||
/*
|
||||
* Copyright 2008 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Author: Stanislaw Skowronek
|
||||
*/
|
||||
|
||||
#ifndef ATOM_NAMES_H
|
||||
#define ATOM_NAMES_H
|
||||
|
||||
#include "atom.h"
|
||||
|
||||
#ifdef ATOM_DEBUG
|
||||
|
||||
#define ATOM_OP_NAMES_CNT 123
|
||||
static char *atom_op_names[ATOM_OP_NAMES_CNT] = {
|
||||
"RESERVED", "MOVE_REG", "MOVE_PS", "MOVE_WS", "MOVE_FB", "MOVE_PLL",
|
||||
"MOVE_MC", "AND_REG", "AND_PS", "AND_WS", "AND_FB", "AND_PLL", "AND_MC",
|
||||
"OR_REG", "OR_PS", "OR_WS", "OR_FB", "OR_PLL", "OR_MC", "SHIFT_LEFT_REG",
|
||||
"SHIFT_LEFT_PS", "SHIFT_LEFT_WS", "SHIFT_LEFT_FB", "SHIFT_LEFT_PLL",
|
||||
"SHIFT_LEFT_MC", "SHIFT_RIGHT_REG", "SHIFT_RIGHT_PS", "SHIFT_RIGHT_WS",
|
||||
"SHIFT_RIGHT_FB", "SHIFT_RIGHT_PLL", "SHIFT_RIGHT_MC", "MUL_REG",
|
||||
"MUL_PS", "MUL_WS", "MUL_FB", "MUL_PLL", "MUL_MC", "DIV_REG", "DIV_PS",
|
||||
"DIV_WS", "DIV_FB", "DIV_PLL", "DIV_MC", "ADD_REG", "ADD_PS", "ADD_WS",
|
||||
"ADD_FB", "ADD_PLL", "ADD_MC", "SUB_REG", "SUB_PS", "SUB_WS", "SUB_FB",
|
||||
"SUB_PLL", "SUB_MC", "SET_ATI_PORT", "SET_PCI_PORT", "SET_SYS_IO_PORT",
|
||||
"SET_REG_BLOCK", "SET_FB_BASE", "COMPARE_REG", "COMPARE_PS",
|
||||
"COMPARE_WS", "COMPARE_FB", "COMPARE_PLL", "COMPARE_MC", "SWITCH",
|
||||
"JUMP", "JUMP_EQUAL", "JUMP_BELOW", "JUMP_ABOVE", "JUMP_BELOW_OR_EQUAL",
|
||||
"JUMP_ABOVE_OR_EQUAL", "JUMP_NOT_EQUAL", "TEST_REG", "TEST_PS", "TEST_WS",
|
||||
"TEST_FB", "TEST_PLL", "TEST_MC", "DELAY_MILLISEC", "DELAY_MICROSEC",
|
||||
"CALL_TABLE", "REPEAT", "CLEAR_REG", "CLEAR_PS", "CLEAR_WS", "CLEAR_FB",
|
||||
"CLEAR_PLL", "CLEAR_MC", "NOP", "EOT", "MASK_REG", "MASK_PS", "MASK_WS",
|
||||
"MASK_FB", "MASK_PLL", "MASK_MC", "POST_CARD", "BEEP", "SAVE_REG",
|
||||
"RESTORE_REG", "SET_DATA_BLOCK", "XOR_REG", "XOR_PS", "XOR_WS", "XOR_FB",
|
||||
"XOR_PLL", "XOR_MC", "SHL_REG", "SHL_PS", "SHL_WS", "SHL_FB", "SHL_PLL",
|
||||
"SHL_MC", "SHR_REG", "SHR_PS", "SHR_WS", "SHR_FB", "SHR_PLL", "SHR_MC",
|
||||
"DEBUG", "CTB_DS",
|
||||
};
|
||||
|
||||
#define ATOM_TABLE_NAMES_CNT 74
|
||||
static char *atom_table_names[ATOM_TABLE_NAMES_CNT] = {
|
||||
"ASIC_Init", "GetDisplaySurfaceSize", "ASIC_RegistersInit",
|
||||
"VRAM_BlockVenderDetection", "SetClocksRatio", "MemoryControllerInit",
|
||||
"GPIO_PinInit", "MemoryParamAdjust", "DVOEncoderControl",
|
||||
"GPIOPinControl", "SetEngineClock", "SetMemoryClock", "SetPixelClock",
|
||||
"DynamicClockGating", "ResetMemoryDLL", "ResetMemoryDevice",
|
||||
"MemoryPLLInit", "EnableMemorySelfRefresh", "AdjustMemoryController",
|
||||
"EnableASIC_StaticPwrMgt", "ASIC_StaticPwrMgtStatusChange",
|
||||
"DAC_LoadDetection", "TMDS2EncoderControl", "LCD1OutputControl",
|
||||
"DAC1EncoderControl", "DAC2EncoderControl", "DVOOutputControl",
|
||||
"CV1OutputControl", "SetCRTC_DPM_State", "TVEncoderControl",
|
||||
"TMDS1EncoderControl", "LVDSEncoderControl", "TV1OutputControl",
|
||||
"EnableScaler", "BlankCRTC", "EnableCRTC", "GetPixelClock",
|
||||
"EnableVGA_Render", "EnableVGA_Access", "SetCRTC_Timing",
|
||||
"SetCRTC_OverScan", "SetCRTC_Replication", "SelectCRTC_Source",
|
||||
"EnableGraphSurfaces", "UpdateCRTC_DoubleBufferRegisters",
|
||||
"LUT_AutoFill", "EnableHW_IconCursor", "GetMemoryClock",
|
||||
"GetEngineClock", "SetCRTC_UsingDTDTiming", "TVBootUpStdPinDetection",
|
||||
"DFP2OutputControl", "VRAM_BlockDetectionByStrap", "MemoryCleanUp",
|
||||
"ReadEDIDFromHWAssistedI2C", "WriteOneByteToHWAssistedI2C",
|
||||
"ReadHWAssistedI2CStatus", "SpeedFanControl", "PowerConnectorDetection",
|
||||
"MC_Synchronization", "ComputeMemoryEnginePLL", "MemoryRefreshConversion",
|
||||
"VRAM_GetCurrentInfoBlock", "DynamicMemorySettings", "MemoryTraining",
|
||||
"EnableLVDS_SS", "DFP1OutputControl", "SetVoltage", "CRT1OutputControl",
|
||||
"CRT2OutputControl", "SetupHWAssistedI2CStatus", "ClockSource",
|
||||
"MemoryDeviceInit", "EnableYUV",
|
||||
};
|
||||
|
||||
#define ATOM_IO_NAMES_CNT 5
|
||||
static char *atom_io_names[ATOM_IO_NAMES_CNT] = {
|
||||
"MM", "PLL", "MC", "PCIE", "PCIE PORT",
|
||||
};
|
||||
|
||||
#else
|
||||
|
||||
#define ATOM_OP_NAMES_CNT 0
|
||||
#define ATOM_TABLE_NAMES_CNT 0
|
||||
#define ATOM_IO_NAMES_CNT 0
|
||||
|
||||
#endif
|
||||
|
||||
#endif
|
|
@ -0,0 +1,42 @@
|
|||
/*
|
||||
* Copyright 2008 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Author: Dave Airlie
|
||||
*/
|
||||
|
||||
#ifndef ATOM_TYPES_H
|
||||
#define ATOM_TYPES_H
|
||||
|
||||
/* sync atom types to kernel types */
|
||||
|
||||
typedef uint16_t USHORT;
|
||||
typedef uint32_t ULONG;
|
||||
typedef uint8_t UCHAR;
|
||||
|
||||
|
||||
#ifndef ATOM_BIG_ENDIAN
|
||||
#if defined(__BIG_ENDIAN)
|
||||
#define ATOM_BIG_ENDIAN 1
|
||||
#else
|
||||
#define ATOM_BIG_ENDIAN 0
|
||||
#endif
|
||||
#endif
|
||||
#endif
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,149 @@
|
|||
/*
|
||||
* Copyright 2008 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Author: Stanislaw Skowronek
|
||||
*/
|
||||
|
||||
#ifndef ATOM_H
|
||||
#define ATOM_H
|
||||
|
||||
#include <linux/types.h>
|
||||
#include "drmP.h"
|
||||
|
||||
#define ATOM_BIOS_MAGIC 0xAA55
|
||||
#define ATOM_ATI_MAGIC_PTR 0x30
|
||||
#define ATOM_ATI_MAGIC " 761295520"
|
||||
#define ATOM_ROM_TABLE_PTR 0x48
|
||||
|
||||
#define ATOM_ROM_MAGIC "ATOM"
|
||||
#define ATOM_ROM_MAGIC_PTR 4
|
||||
|
||||
#define ATOM_ROM_MSG_PTR 0x10
|
||||
#define ATOM_ROM_CMD_PTR 0x1E
|
||||
#define ATOM_ROM_DATA_PTR 0x20
|
||||
|
||||
#define ATOM_CMD_INIT 0
|
||||
#define ATOM_CMD_SETSCLK 0x0A
|
||||
#define ATOM_CMD_SETMCLK 0x0B
|
||||
#define ATOM_CMD_SETPCLK 0x0C
|
||||
|
||||
#define ATOM_DATA_FWI_PTR 0xC
|
||||
#define ATOM_DATA_IIO_PTR 0x32
|
||||
|
||||
#define ATOM_FWI_DEFSCLK_PTR 8
|
||||
#define ATOM_FWI_DEFMCLK_PTR 0xC
|
||||
#define ATOM_FWI_MAXSCLK_PTR 0x24
|
||||
#define ATOM_FWI_MAXMCLK_PTR 0x28
|
||||
|
||||
#define ATOM_CT_SIZE_PTR 0
|
||||
#define ATOM_CT_WS_PTR 4
|
||||
#define ATOM_CT_PS_PTR 5
|
||||
#define ATOM_CT_PS_MASK 0x7F
|
||||
#define ATOM_CT_CODE_PTR 6
|
||||
|
||||
#define ATOM_OP_CNT 123
|
||||
#define ATOM_OP_EOT 91
|
||||
|
||||
#define ATOM_CASE_MAGIC 0x63
|
||||
#define ATOM_CASE_END 0x5A5A
|
||||
|
||||
#define ATOM_ARG_REG 0
|
||||
#define ATOM_ARG_PS 1
|
||||
#define ATOM_ARG_WS 2
|
||||
#define ATOM_ARG_FB 3
|
||||
#define ATOM_ARG_ID 4
|
||||
#define ATOM_ARG_IMM 5
|
||||
#define ATOM_ARG_PLL 6
|
||||
#define ATOM_ARG_MC 7
|
||||
|
||||
#define ATOM_SRC_DWORD 0
|
||||
#define ATOM_SRC_WORD0 1
|
||||
#define ATOM_SRC_WORD8 2
|
||||
#define ATOM_SRC_WORD16 3
|
||||
#define ATOM_SRC_BYTE0 4
|
||||
#define ATOM_SRC_BYTE8 5
|
||||
#define ATOM_SRC_BYTE16 6
|
||||
#define ATOM_SRC_BYTE24 7
|
||||
|
||||
#define ATOM_WS_QUOTIENT 0x40
|
||||
#define ATOM_WS_REMAINDER 0x41
|
||||
#define ATOM_WS_DATAPTR 0x42
|
||||
#define ATOM_WS_SHIFT 0x43
|
||||
#define ATOM_WS_OR_MASK 0x44
|
||||
#define ATOM_WS_AND_MASK 0x45
|
||||
#define ATOM_WS_FB_WINDOW 0x46
|
||||
#define ATOM_WS_ATTRIBUTES 0x47
|
||||
|
||||
#define ATOM_IIO_NOP 0
|
||||
#define ATOM_IIO_START 1
|
||||
#define ATOM_IIO_READ 2
|
||||
#define ATOM_IIO_WRITE 3
|
||||
#define ATOM_IIO_CLEAR 4
|
||||
#define ATOM_IIO_SET 5
|
||||
#define ATOM_IIO_MOVE_INDEX 6
|
||||
#define ATOM_IIO_MOVE_ATTR 7
|
||||
#define ATOM_IIO_MOVE_DATA 8
|
||||
#define ATOM_IIO_END 9
|
||||
|
||||
#define ATOM_IO_MM 0
|
||||
#define ATOM_IO_PCI 1
|
||||
#define ATOM_IO_SYSIO 2
|
||||
#define ATOM_IO_IIO 0x80
|
||||
|
||||
struct card_info {
|
||||
struct drm_device *dev;
|
||||
void (* reg_write)(struct card_info *, uint32_t, uint32_t); /* filled by driver */
|
||||
uint32_t (* reg_read)(struct card_info *, uint32_t); /* filled by driver */
|
||||
void (* mc_write)(struct card_info *, uint32_t, uint32_t); /* filled by driver */
|
||||
uint32_t (* mc_read)(struct card_info *, uint32_t); /* filled by driver */
|
||||
void (* pll_write)(struct card_info *, uint32_t, uint32_t); /* filled by driver */
|
||||
uint32_t (* pll_read)(struct card_info *, uint32_t); /* filled by driver */
|
||||
};
|
||||
|
||||
struct atom_context {
|
||||
struct card_info *card;
|
||||
void *bios;
|
||||
uint32_t cmd_table, data_table;
|
||||
uint16_t *iio;
|
||||
|
||||
uint16_t data_block;
|
||||
uint32_t fb_base;
|
||||
uint32_t divmul[2];
|
||||
uint16_t io_attr;
|
||||
uint16_t reg_block;
|
||||
uint8_t shift;
|
||||
int cs_equal, cs_above;
|
||||
int io_mode;
|
||||
};
|
||||
|
||||
extern int atom_debug;
|
||||
|
||||
struct atom_context *atom_parse(struct card_info *, void *);
|
||||
void atom_execute_table(struct atom_context *, int, uint32_t *);
|
||||
int atom_asic_init(struct atom_context *);
|
||||
void atom_destroy(struct atom_context *);
|
||||
void atom_parse_data_header(struct atom_context *ctx, int index, uint16_t *size, uint8_t *frev, uint8_t *crev, uint16_t *data_start);
|
||||
void atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t *frev, uint8_t *crev);
|
||||
#include "atom-types.h"
|
||||
#include "atombios.h"
|
||||
#include "ObjectID.h"
|
||||
|
||||
#endif
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,695 @@
|
|||
/*
|
||||
* Copyright 2007-8 Advanced Micro Devices, Inc.
|
||||
* Copyright 2008 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Dave Airlie
|
||||
* Alex Deucher
|
||||
*/
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/drm_crtc_helper.h>
|
||||
#include <drm/radeon_drm.h>
|
||||
#include "radeon_fixed.h"
|
||||
#include "radeon.h"
|
||||
#include "atom.h"
|
||||
#include "atom-bits.h"
|
||||
|
||||
static void atombios_lock_crtc(struct drm_crtc *crtc, int lock)
|
||||
{
|
||||
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
int index =
|
||||
GetIndexIntoMasterTable(COMMAND, UpdateCRTC_DoubleBufferRegisters);
|
||||
ENABLE_CRTC_PS_ALLOCATION args;
|
||||
|
||||
memset(&args, 0, sizeof(args));
|
||||
|
||||
args.ucCRTC = radeon_crtc->crtc_id;
|
||||
args.ucEnable = lock;
|
||||
|
||||
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
|
||||
}
|
||||
|
||||
static void atombios_enable_crtc(struct drm_crtc *crtc, int state)
|
||||
{
|
||||
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
int index = GetIndexIntoMasterTable(COMMAND, EnableCRTC);
|
||||
ENABLE_CRTC_PS_ALLOCATION args;
|
||||
|
||||
memset(&args, 0, sizeof(args));
|
||||
|
||||
args.ucCRTC = radeon_crtc->crtc_id;
|
||||
args.ucEnable = state;
|
||||
|
||||
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
|
||||
}
|
||||
|
||||
static void atombios_enable_crtc_memreq(struct drm_crtc *crtc, int state)
|
||||
{
|
||||
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
int index = GetIndexIntoMasterTable(COMMAND, EnableCRTCMemReq);
|
||||
ENABLE_CRTC_PS_ALLOCATION args;
|
||||
|
||||
memset(&args, 0, sizeof(args));
|
||||
|
||||
args.ucCRTC = radeon_crtc->crtc_id;
|
||||
args.ucEnable = state;
|
||||
|
||||
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
|
||||
}
|
||||
|
||||
static void atombios_blank_crtc(struct drm_crtc *crtc, int state)
|
||||
{
|
||||
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
int index = GetIndexIntoMasterTable(COMMAND, BlankCRTC);
|
||||
BLANK_CRTC_PS_ALLOCATION args;
|
||||
|
||||
memset(&args, 0, sizeof(args));
|
||||
|
||||
args.ucCRTC = radeon_crtc->crtc_id;
|
||||
args.ucBlanking = state;
|
||||
|
||||
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
|
||||
}
|
||||
|
||||
void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
|
||||
switch (mode) {
|
||||
case DRM_MODE_DPMS_ON:
|
||||
if (ASIC_IS_DCE3(rdev))
|
||||
atombios_enable_crtc_memreq(crtc, 1);
|
||||
atombios_enable_crtc(crtc, 1);
|
||||
atombios_blank_crtc(crtc, 0);
|
||||
break;
|
||||
case DRM_MODE_DPMS_STANDBY:
|
||||
case DRM_MODE_DPMS_SUSPEND:
|
||||
case DRM_MODE_DPMS_OFF:
|
||||
atombios_blank_crtc(crtc, 1);
|
||||
atombios_enable_crtc(crtc, 0);
|
||||
if (ASIC_IS_DCE3(rdev))
|
||||
atombios_enable_crtc_memreq(crtc, 0);
|
||||
break;
|
||||
}
|
||||
|
||||
if (mode != DRM_MODE_DPMS_OFF) {
|
||||
radeon_crtc_load_lut(crtc);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
atombios_set_crtc_dtd_timing(struct drm_crtc *crtc,
|
||||
SET_CRTC_USING_DTD_TIMING_PARAMETERS * crtc_param)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
SET_CRTC_USING_DTD_TIMING_PARAMETERS conv_param;
|
||||
int index = GetIndexIntoMasterTable(COMMAND, SetCRTC_UsingDTDTiming);
|
||||
|
||||
conv_param.usH_Size = cpu_to_le16(crtc_param->usH_Size);
|
||||
conv_param.usH_Blanking_Time =
|
||||
cpu_to_le16(crtc_param->usH_Blanking_Time);
|
||||
conv_param.usV_Size = cpu_to_le16(crtc_param->usV_Size);
|
||||
conv_param.usV_Blanking_Time =
|
||||
cpu_to_le16(crtc_param->usV_Blanking_Time);
|
||||
conv_param.usH_SyncOffset = cpu_to_le16(crtc_param->usH_SyncOffset);
|
||||
conv_param.usH_SyncWidth = cpu_to_le16(crtc_param->usH_SyncWidth);
|
||||
conv_param.usV_SyncOffset = cpu_to_le16(crtc_param->usV_SyncOffset);
|
||||
conv_param.usV_SyncWidth = cpu_to_le16(crtc_param->usV_SyncWidth);
|
||||
conv_param.susModeMiscInfo.usAccess =
|
||||
cpu_to_le16(crtc_param->susModeMiscInfo.usAccess);
|
||||
conv_param.ucCRTC = crtc_param->ucCRTC;
|
||||
|
||||
printk("executing set crtc dtd timing\n");
|
||||
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&conv_param);
|
||||
}
|
||||
|
||||
void atombios_crtc_set_timing(struct drm_crtc *crtc,
|
||||
SET_CRTC_TIMING_PARAMETERS_PS_ALLOCATION *
|
||||
crtc_param)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
SET_CRTC_TIMING_PARAMETERS_PS_ALLOCATION conv_param;
|
||||
int index = GetIndexIntoMasterTable(COMMAND, SetCRTC_Timing);
|
||||
|
||||
conv_param.usH_Total = cpu_to_le16(crtc_param->usH_Total);
|
||||
conv_param.usH_Disp = cpu_to_le16(crtc_param->usH_Disp);
|
||||
conv_param.usH_SyncStart = cpu_to_le16(crtc_param->usH_SyncStart);
|
||||
conv_param.usH_SyncWidth = cpu_to_le16(crtc_param->usH_SyncWidth);
|
||||
conv_param.usV_Total = cpu_to_le16(crtc_param->usV_Total);
|
||||
conv_param.usV_Disp = cpu_to_le16(crtc_param->usV_Disp);
|
||||
conv_param.usV_SyncStart = cpu_to_le16(crtc_param->usV_SyncStart);
|
||||
conv_param.usV_SyncWidth = cpu_to_le16(crtc_param->usV_SyncWidth);
|
||||
conv_param.susModeMiscInfo.usAccess =
|
||||
cpu_to_le16(crtc_param->susModeMiscInfo.usAccess);
|
||||
conv_param.ucCRTC = crtc_param->ucCRTC;
|
||||
conv_param.ucOverscanRight = crtc_param->ucOverscanRight;
|
||||
conv_param.ucOverscanLeft = crtc_param->ucOverscanLeft;
|
||||
conv_param.ucOverscanBottom = crtc_param->ucOverscanBottom;
|
||||
conv_param.ucOverscanTop = crtc_param->ucOverscanTop;
|
||||
conv_param.ucReserved = crtc_param->ucReserved;
|
||||
|
||||
printk("executing set crtc timing\n");
|
||||
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&conv_param);
|
||||
}
|
||||
|
||||
void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
|
||||
{
|
||||
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
struct drm_encoder *encoder = NULL;
|
||||
struct radeon_encoder *radeon_encoder = NULL;
|
||||
uint8_t frev, crev;
|
||||
int index = GetIndexIntoMasterTable(COMMAND, SetPixelClock);
|
||||
SET_PIXEL_CLOCK_PS_ALLOCATION args;
|
||||
PIXEL_CLOCK_PARAMETERS *spc1_ptr;
|
||||
PIXEL_CLOCK_PARAMETERS_V2 *spc2_ptr;
|
||||
PIXEL_CLOCK_PARAMETERS_V3 *spc3_ptr;
|
||||
uint32_t sclock = mode->clock;
|
||||
uint32_t ref_div = 0, fb_div = 0, frac_fb_div = 0, post_div = 0;
|
||||
struct radeon_pll *pll;
|
||||
int pll_flags = 0;
|
||||
|
||||
memset(&args, 0, sizeof(args));
|
||||
|
||||
if (ASIC_IS_AVIVO(rdev)) {
|
||||
uint32_t ss_cntl;
|
||||
|
||||
if (ASIC_IS_DCE32(rdev) && mode->clock > 200000) /* range limits??? */
|
||||
pll_flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
|
||||
else
|
||||
pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
|
||||
|
||||
/* disable spread spectrum clocking for now -- thanks Hedy Lamarr */
|
||||
if (radeon_crtc->crtc_id == 0) {
|
||||
ss_cntl = RREG32(AVIVO_P1PLL_INT_SS_CNTL);
|
||||
WREG32(AVIVO_P1PLL_INT_SS_CNTL, ss_cntl & ~1);
|
||||
} else {
|
||||
ss_cntl = RREG32(AVIVO_P2PLL_INT_SS_CNTL);
|
||||
WREG32(AVIVO_P2PLL_INT_SS_CNTL, ss_cntl & ~1);
|
||||
}
|
||||
} else {
|
||||
pll_flags |= RADEON_PLL_LEGACY;
|
||||
|
||||
if (mode->clock > 200000) /* range limits??? */
|
||||
pll_flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
|
||||
else
|
||||
pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
|
||||
|
||||
}
|
||||
|
||||
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
|
||||
if (encoder->crtc == crtc) {
|
||||
if (!ASIC_IS_AVIVO(rdev)) {
|
||||
if (encoder->encoder_type !=
|
||||
DRM_MODE_ENCODER_DAC)
|
||||
pll_flags |= RADEON_PLL_NO_ODD_POST_DIV;
|
||||
if (!ASIC_IS_AVIVO(rdev)
|
||||
&& (encoder->encoder_type ==
|
||||
DRM_MODE_ENCODER_LVDS))
|
||||
pll_flags |= RADEON_PLL_USE_REF_DIV;
|
||||
}
|
||||
radeon_encoder = to_radeon_encoder(encoder);
|
||||
}
|
||||
}
|
||||
|
||||
if (radeon_crtc->crtc_id == 0)
|
||||
pll = &rdev->clock.p1pll;
|
||||
else
|
||||
pll = &rdev->clock.p2pll;
|
||||
|
||||
radeon_compute_pll(pll, mode->clock, &sclock, &fb_div, &frac_fb_div,
|
||||
&ref_div, &post_div, pll_flags);
|
||||
|
||||
atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev,
|
||||
&crev);
|
||||
|
||||
switch (frev) {
|
||||
case 1:
|
||||
switch (crev) {
|
||||
case 1:
|
||||
spc1_ptr = (PIXEL_CLOCK_PARAMETERS *) & args.sPCLKInput;
|
||||
spc1_ptr->usPixelClock = cpu_to_le16(sclock);
|
||||
spc1_ptr->usRefDiv = cpu_to_le16(ref_div);
|
||||
spc1_ptr->usFbDiv = cpu_to_le16(fb_div);
|
||||
spc1_ptr->ucFracFbDiv = frac_fb_div;
|
||||
spc1_ptr->ucPostDiv = post_div;
|
||||
spc1_ptr->ucPpll =
|
||||
radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1;
|
||||
spc1_ptr->ucCRTC = radeon_crtc->crtc_id;
|
||||
spc1_ptr->ucRefDivSrc = 1;
|
||||
break;
|
||||
case 2:
|
||||
spc2_ptr =
|
||||
(PIXEL_CLOCK_PARAMETERS_V2 *) & args.sPCLKInput;
|
||||
spc2_ptr->usPixelClock = cpu_to_le16(sclock);
|
||||
spc2_ptr->usRefDiv = cpu_to_le16(ref_div);
|
||||
spc2_ptr->usFbDiv = cpu_to_le16(fb_div);
|
||||
spc2_ptr->ucFracFbDiv = frac_fb_div;
|
||||
spc2_ptr->ucPostDiv = post_div;
|
||||
spc2_ptr->ucPpll =
|
||||
radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1;
|
||||
spc2_ptr->ucCRTC = radeon_crtc->crtc_id;
|
||||
spc2_ptr->ucRefDivSrc = 1;
|
||||
break;
|
||||
case 3:
|
||||
if (!encoder)
|
||||
return;
|
||||
spc3_ptr =
|
||||
(PIXEL_CLOCK_PARAMETERS_V3 *) & args.sPCLKInput;
|
||||
spc3_ptr->usPixelClock = cpu_to_le16(sclock);
|
||||
spc3_ptr->usRefDiv = cpu_to_le16(ref_div);
|
||||
spc3_ptr->usFbDiv = cpu_to_le16(fb_div);
|
||||
spc3_ptr->ucFracFbDiv = frac_fb_div;
|
||||
spc3_ptr->ucPostDiv = post_div;
|
||||
spc3_ptr->ucPpll =
|
||||
radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1;
|
||||
spc3_ptr->ucMiscInfo = (radeon_crtc->crtc_id << 2);
|
||||
spc3_ptr->ucTransmitterId = radeon_encoder->encoder_id;
|
||||
spc3_ptr->ucEncoderMode =
|
||||
atombios_get_encoder_mode(encoder);
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("Unknown table version %d %d\n", frev, crev);
|
||||
return;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("Unknown table version %d %d\n", frev, crev);
|
||||
return;
|
||||
}
|
||||
|
||||
printk("executing set pll\n");
|
||||
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
|
||||
}
|
||||
|
||||
int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
|
||||
struct drm_framebuffer *old_fb)
|
||||
{
|
||||
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
struct radeon_framebuffer *radeon_fb;
|
||||
struct drm_gem_object *obj;
|
||||
struct drm_radeon_gem_object *obj_priv;
|
||||
uint64_t fb_location;
|
||||
uint32_t fb_format, fb_pitch_pixels;
|
||||
|
||||
if (!crtc->fb)
|
||||
return -EINVAL;
|
||||
|
||||
radeon_fb = to_radeon_framebuffer(crtc->fb);
|
||||
|
||||
obj = radeon_fb->obj;
|
||||
obj_priv = obj->driver_private;
|
||||
|
||||
if (radeon_gem_object_pin(obj, RADEON_GEM_DOMAIN_VRAM, &fb_location)) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
switch (crtc->fb->bits_per_pixel) {
|
||||
case 15:
|
||||
fb_format =
|
||||
AVIVO_D1GRPH_CONTROL_DEPTH_16BPP |
|
||||
AVIVO_D1GRPH_CONTROL_16BPP_ARGB1555;
|
||||
break;
|
||||
case 16:
|
||||
fb_format =
|
||||
AVIVO_D1GRPH_CONTROL_DEPTH_16BPP |
|
||||
AVIVO_D1GRPH_CONTROL_16BPP_RGB565;
|
||||
break;
|
||||
case 24:
|
||||
case 32:
|
||||
fb_format =
|
||||
AVIVO_D1GRPH_CONTROL_DEPTH_32BPP |
|
||||
AVIVO_D1GRPH_CONTROL_32BPP_ARGB8888;
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("Unsupported screen depth %d\n",
|
||||
crtc->fb->bits_per_pixel);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* TODO tiling */
|
||||
if (radeon_crtc->crtc_id == 0)
|
||||
WREG32(AVIVO_D1VGA_CONTROL, 0);
|
||||
else
|
||||
WREG32(AVIVO_D2VGA_CONTROL, 0);
|
||||
WREG32(AVIVO_D1GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
|
||||
(u32) fb_location);
|
||||
WREG32(AVIVO_D1GRPH_SECONDARY_SURFACE_ADDRESS +
|
||||
radeon_crtc->crtc_offset, (u32) fb_location);
|
||||
WREG32(AVIVO_D1GRPH_CONTROL + radeon_crtc->crtc_offset, fb_format);
|
||||
|
||||
WREG32(AVIVO_D1GRPH_SURFACE_OFFSET_X + radeon_crtc->crtc_offset, 0);
|
||||
WREG32(AVIVO_D1GRPH_SURFACE_OFFSET_Y + radeon_crtc->crtc_offset, 0);
|
||||
WREG32(AVIVO_D1GRPH_X_START + radeon_crtc->crtc_offset, 0);
|
||||
WREG32(AVIVO_D1GRPH_Y_START + radeon_crtc->crtc_offset, 0);
|
||||
WREG32(AVIVO_D1GRPH_X_END + radeon_crtc->crtc_offset, crtc->fb->width);
|
||||
WREG32(AVIVO_D1GRPH_Y_END + radeon_crtc->crtc_offset, crtc->fb->height);
|
||||
|
||||
fb_pitch_pixels = crtc->fb->pitch / (crtc->fb->bits_per_pixel / 8);
|
||||
WREG32(AVIVO_D1GRPH_PITCH + radeon_crtc->crtc_offset, fb_pitch_pixels);
|
||||
WREG32(AVIVO_D1GRPH_ENABLE + radeon_crtc->crtc_offset, 1);
|
||||
|
||||
WREG32(AVIVO_D1MODE_DESKTOP_HEIGHT + radeon_crtc->crtc_offset,
|
||||
crtc->mode.vdisplay);
|
||||
x &= ~3;
|
||||
y &= ~1;
|
||||
WREG32(AVIVO_D1MODE_VIEWPORT_START + radeon_crtc->crtc_offset,
|
||||
(x << 16) | y);
|
||||
WREG32(AVIVO_D1MODE_VIEWPORT_SIZE + radeon_crtc->crtc_offset,
|
||||
(crtc->mode.hdisplay << 16) | crtc->mode.vdisplay);
|
||||
|
||||
if (crtc->mode.flags & DRM_MODE_FLAG_INTERLACE)
|
||||
WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset,
|
||||
AVIVO_D1MODE_INTERLEAVE_EN);
|
||||
else
|
||||
WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset, 0);
|
||||
|
||||
if (old_fb && old_fb != crtc->fb) {
|
||||
radeon_fb = to_radeon_framebuffer(old_fb);
|
||||
radeon_gem_object_unpin(radeon_fb->obj);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int atombios_crtc_mode_set(struct drm_crtc *crtc,
|
||||
struct drm_display_mode *mode,
|
||||
struct drm_display_mode *adjusted_mode,
|
||||
int x, int y, struct drm_framebuffer *old_fb)
|
||||
{
|
||||
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
struct drm_encoder *encoder;
|
||||
SET_CRTC_TIMING_PARAMETERS_PS_ALLOCATION crtc_timing;
|
||||
|
||||
/* TODO color tiling */
|
||||
memset(&crtc_timing, 0, sizeof(crtc_timing));
|
||||
|
||||
/* TODO tv */
|
||||
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
|
||||
|
||||
}
|
||||
|
||||
crtc_timing.ucCRTC = radeon_crtc->crtc_id;
|
||||
crtc_timing.usH_Total = adjusted_mode->crtc_htotal;
|
||||
crtc_timing.usH_Disp = adjusted_mode->crtc_hdisplay;
|
||||
crtc_timing.usH_SyncStart = adjusted_mode->crtc_hsync_start;
|
||||
crtc_timing.usH_SyncWidth =
|
||||
adjusted_mode->crtc_hsync_end - adjusted_mode->crtc_hsync_start;
|
||||
|
||||
crtc_timing.usV_Total = adjusted_mode->crtc_vtotal;
|
||||
crtc_timing.usV_Disp = adjusted_mode->crtc_vdisplay;
|
||||
crtc_timing.usV_SyncStart = adjusted_mode->crtc_vsync_start;
|
||||
crtc_timing.usV_SyncWidth =
|
||||
adjusted_mode->crtc_vsync_end - adjusted_mode->crtc_vsync_start;
|
||||
|
||||
if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
|
||||
crtc_timing.susModeMiscInfo.usAccess |= ATOM_VSYNC_POLARITY;
|
||||
|
||||
if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
|
||||
crtc_timing.susModeMiscInfo.usAccess |= ATOM_HSYNC_POLARITY;
|
||||
|
||||
if (adjusted_mode->flags & DRM_MODE_FLAG_CSYNC)
|
||||
crtc_timing.susModeMiscInfo.usAccess |= ATOM_COMPOSITESYNC;
|
||||
|
||||
if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
|
||||
crtc_timing.susModeMiscInfo.usAccess |= ATOM_INTERLACE;
|
||||
|
||||
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
|
||||
crtc_timing.susModeMiscInfo.usAccess |= ATOM_DOUBLE_CLOCK_MODE;
|
||||
|
||||
atombios_crtc_set_pll(crtc, adjusted_mode);
|
||||
atombios_crtc_set_timing(crtc, &crtc_timing);
|
||||
|
||||
if (ASIC_IS_AVIVO(rdev))
|
||||
atombios_crtc_set_base(crtc, x, y, old_fb);
|
||||
else {
|
||||
if (radeon_crtc->crtc_id == 0) {
|
||||
SET_CRTC_USING_DTD_TIMING_PARAMETERS crtc_dtd_timing;
|
||||
memset(&crtc_dtd_timing, 0, sizeof(crtc_dtd_timing));
|
||||
|
||||
/* setup FP shadow regs on R4xx */
|
||||
crtc_dtd_timing.ucCRTC = radeon_crtc->crtc_id;
|
||||
crtc_dtd_timing.usH_Size = adjusted_mode->crtc_hdisplay;
|
||||
crtc_dtd_timing.usV_Size = adjusted_mode->crtc_vdisplay;
|
||||
crtc_dtd_timing.usH_Blanking_Time =
|
||||
adjusted_mode->crtc_hblank_end -
|
||||
adjusted_mode->crtc_hdisplay;
|
||||
crtc_dtd_timing.usV_Blanking_Time =
|
||||
adjusted_mode->crtc_vblank_end -
|
||||
adjusted_mode->crtc_vdisplay;
|
||||
crtc_dtd_timing.usH_SyncOffset =
|
||||
adjusted_mode->crtc_hsync_start -
|
||||
adjusted_mode->crtc_hdisplay;
|
||||
crtc_dtd_timing.usV_SyncOffset =
|
||||
adjusted_mode->crtc_vsync_start -
|
||||
adjusted_mode->crtc_vdisplay;
|
||||
crtc_dtd_timing.usH_SyncWidth =
|
||||
adjusted_mode->crtc_hsync_end -
|
||||
adjusted_mode->crtc_hsync_start;
|
||||
crtc_dtd_timing.usV_SyncWidth =
|
||||
adjusted_mode->crtc_vsync_end -
|
||||
adjusted_mode->crtc_vsync_start;
|
||||
/* crtc_dtd_timing.ucH_Border = adjusted_mode->crtc_hborder; */
|
||||
/* crtc_dtd_timing.ucV_Border = adjusted_mode->crtc_vborder; */
|
||||
|
||||
if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
|
||||
crtc_dtd_timing.susModeMiscInfo.usAccess |=
|
||||
ATOM_VSYNC_POLARITY;
|
||||
|
||||
if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
|
||||
crtc_dtd_timing.susModeMiscInfo.usAccess |=
|
||||
ATOM_HSYNC_POLARITY;
|
||||
|
||||
if (adjusted_mode->flags & DRM_MODE_FLAG_CSYNC)
|
||||
crtc_dtd_timing.susModeMiscInfo.usAccess |=
|
||||
ATOM_COMPOSITESYNC;
|
||||
|
||||
if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
|
||||
crtc_dtd_timing.susModeMiscInfo.usAccess |=
|
||||
ATOM_INTERLACE;
|
||||
|
||||
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
|
||||
crtc_dtd_timing.susModeMiscInfo.usAccess |=
|
||||
ATOM_DOUBLE_CLOCK_MODE;
|
||||
|
||||
atombios_set_crtc_dtd_timing(crtc, &crtc_dtd_timing);
|
||||
}
|
||||
radeon_crtc_set_base(crtc, x, y, old_fb);
|
||||
radeon_legacy_atom_set_surface(crtc);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool atombios_crtc_mode_fixup(struct drm_crtc *crtc,
|
||||
struct drm_display_mode *mode,
|
||||
struct drm_display_mode *adjusted_mode)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
static void atombios_crtc_prepare(struct drm_crtc *crtc)
|
||||
{
|
||||
atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
|
||||
atombios_lock_crtc(crtc, 1);
|
||||
}
|
||||
|
||||
static void atombios_crtc_commit(struct drm_crtc *crtc)
|
||||
{
|
||||
atombios_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
|
||||
atombios_lock_crtc(crtc, 0);
|
||||
}
|
||||
|
||||
static const struct drm_crtc_helper_funcs atombios_helper_funcs = {
|
||||
.dpms = atombios_crtc_dpms,
|
||||
.mode_fixup = atombios_crtc_mode_fixup,
|
||||
.mode_set = atombios_crtc_mode_set,
|
||||
.mode_set_base = atombios_crtc_set_base,
|
||||
.prepare = atombios_crtc_prepare,
|
||||
.commit = atombios_crtc_commit,
|
||||
};
|
||||
|
||||
void radeon_atombios_init_crtc(struct drm_device *dev,
|
||||
struct radeon_crtc *radeon_crtc)
|
||||
{
|
||||
if (radeon_crtc->crtc_id == 1)
|
||||
radeon_crtc->crtc_offset =
|
||||
AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL;
|
||||
drm_crtc_helper_add(&radeon_crtc->base, &atombios_helper_funcs);
|
||||
}
|
||||
|
||||
void radeon_init_disp_bw_avivo(struct drm_device *dev,
|
||||
struct drm_display_mode *mode1,
|
||||
uint32_t pixel_bytes1,
|
||||
struct drm_display_mode *mode2,
|
||||
uint32_t pixel_bytes2)
|
||||
{
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
fixed20_12 min_mem_eff;
|
||||
fixed20_12 peak_disp_bw, mem_bw, pix_clk, pix_clk2, temp_ff;
|
||||
fixed20_12 sclk_ff, mclk_ff;
|
||||
uint32_t dc_lb_memory_split, temp;
|
||||
|
||||
min_mem_eff.full = rfixed_const_8(0);
|
||||
if (rdev->disp_priority == 2) {
|
||||
uint32_t mc_init_misc_lat_timer = 0;
|
||||
if (rdev->family == CHIP_RV515)
|
||||
mc_init_misc_lat_timer =
|
||||
RREG32_MC(RV515_MC_INIT_MISC_LAT_TIMER);
|
||||
else if (rdev->family == CHIP_RS690)
|
||||
mc_init_misc_lat_timer =
|
||||
RREG32_MC(RS690_MC_INIT_MISC_LAT_TIMER);
|
||||
|
||||
mc_init_misc_lat_timer &=
|
||||
~(R300_MC_DISP1R_INIT_LAT_MASK <<
|
||||
R300_MC_DISP1R_INIT_LAT_SHIFT);
|
||||
mc_init_misc_lat_timer &=
|
||||
~(R300_MC_DISP0R_INIT_LAT_MASK <<
|
||||
R300_MC_DISP0R_INIT_LAT_SHIFT);
|
||||
|
||||
if (mode2)
|
||||
mc_init_misc_lat_timer |=
|
||||
(1 << R300_MC_DISP1R_INIT_LAT_SHIFT);
|
||||
if (mode1)
|
||||
mc_init_misc_lat_timer |=
|
||||
(1 << R300_MC_DISP0R_INIT_LAT_SHIFT);
|
||||
|
||||
if (rdev->family == CHIP_RV515)
|
||||
WREG32_MC(RV515_MC_INIT_MISC_LAT_TIMER,
|
||||
mc_init_misc_lat_timer);
|
||||
else if (rdev->family == CHIP_RS690)
|
||||
WREG32_MC(RS690_MC_INIT_MISC_LAT_TIMER,
|
||||
mc_init_misc_lat_timer);
|
||||
}
|
||||
|
||||
/*
|
||||
* determine is there is enough bw for current mode
|
||||
*/
|
||||
temp_ff.full = rfixed_const(100);
|
||||
mclk_ff.full = rfixed_const(rdev->clock.default_mclk);
|
||||
mclk_ff.full = rfixed_div(mclk_ff, temp_ff);
|
||||
sclk_ff.full = rfixed_const(rdev->clock.default_sclk);
|
||||
sclk_ff.full = rfixed_div(sclk_ff, temp_ff);
|
||||
|
||||
temp = (rdev->mc.vram_width / 8) * (rdev->mc.vram_is_ddr ? 2 : 1);
|
||||
temp_ff.full = rfixed_const(temp);
|
||||
mem_bw.full = rfixed_mul(mclk_ff, temp_ff);
|
||||
mem_bw.full = rfixed_mul(mem_bw, min_mem_eff);
|
||||
|
||||
pix_clk.full = 0;
|
||||
pix_clk2.full = 0;
|
||||
peak_disp_bw.full = 0;
|
||||
if (mode1) {
|
||||
temp_ff.full = rfixed_const(1000);
|
||||
pix_clk.full = rfixed_const(mode1->clock); /* convert to fixed point */
|
||||
pix_clk.full = rfixed_div(pix_clk, temp_ff);
|
||||
temp_ff.full = rfixed_const(pixel_bytes1);
|
||||
peak_disp_bw.full += rfixed_mul(pix_clk, temp_ff);
|
||||
}
|
||||
if (mode2) {
|
||||
temp_ff.full = rfixed_const(1000);
|
||||
pix_clk2.full = rfixed_const(mode2->clock); /* convert to fixed point */
|
||||
pix_clk2.full = rfixed_div(pix_clk2, temp_ff);
|
||||
temp_ff.full = rfixed_const(pixel_bytes2);
|
||||
peak_disp_bw.full += rfixed_mul(pix_clk2, temp_ff);
|
||||
}
|
||||
|
||||
if (peak_disp_bw.full >= mem_bw.full) {
|
||||
DRM_ERROR
|
||||
("You may not have enough display bandwidth for current mode\n"
|
||||
"If you have flickering problem, try to lower resolution, refresh rate, or color depth\n");
|
||||
printk("peak disp bw %d, mem_bw %d\n",
|
||||
rfixed_trunc(peak_disp_bw), rfixed_trunc(mem_bw));
|
||||
}
|
||||
|
||||
/*
|
||||
* Line Buffer Setup
|
||||
* There is a single line buffer shared by both display controllers.
|
||||
* DC_LB_MEMORY_SPLIT controls how that line buffer is shared between the display
|
||||
* controllers. The paritioning can either be done manually or via one of four
|
||||
* preset allocations specified in bits 1:0:
|
||||
* 0 - line buffer is divided in half and shared between each display controller
|
||||
* 1 - D1 gets 3/4 of the line buffer, D2 gets 1/4
|
||||
* 2 - D1 gets the whole buffer
|
||||
* 3 - D1 gets 1/4 of the line buffer, D2 gets 3/4
|
||||
* Setting bit 2 of DC_LB_MEMORY_SPLIT controls switches to manual allocation mode.
|
||||
* In manual allocation mode, D1 always starts at 0, D1 end/2 is specified in bits
|
||||
* 14:4; D2 allocation follows D1.
|
||||
*/
|
||||
|
||||
/* is auto or manual better ? */
|
||||
dc_lb_memory_split =
|
||||
RREG32(AVIVO_DC_LB_MEMORY_SPLIT) & ~AVIVO_DC_LB_MEMORY_SPLIT_MASK;
|
||||
dc_lb_memory_split &= ~AVIVO_DC_LB_MEMORY_SPLIT_SHIFT_MODE;
|
||||
#if 1
|
||||
/* auto */
|
||||
if (mode1 && mode2) {
|
||||
if (mode1->hdisplay > mode2->hdisplay) {
|
||||
if (mode1->hdisplay > 2560)
|
||||
dc_lb_memory_split |=
|
||||
AVIVO_DC_LB_MEMORY_SPLIT_D1_3Q_D2_1Q;
|
||||
else
|
||||
dc_lb_memory_split |=
|
||||
AVIVO_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF;
|
||||
} else if (mode2->hdisplay > mode1->hdisplay) {
|
||||
if (mode2->hdisplay > 2560)
|
||||
dc_lb_memory_split |=
|
||||
AVIVO_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q;
|
||||
else
|
||||
dc_lb_memory_split |=
|
||||
AVIVO_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF;
|
||||
} else
|
||||
dc_lb_memory_split |=
|
||||
AVIVO_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF;
|
||||
} else if (mode1) {
|
||||
dc_lb_memory_split |= AVIVO_DC_LB_MEMORY_SPLIT_D1_ONLY;
|
||||
} else if (mode2) {
|
||||
dc_lb_memory_split |= AVIVO_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q;
|
||||
}
|
||||
#else
|
||||
/* manual */
|
||||
dc_lb_memory_split |= AVIVO_DC_LB_MEMORY_SPLIT_SHIFT_MODE;
|
||||
dc_lb_memory_split &=
|
||||
~(AVIVO_DC_LB_DISP1_END_ADR_MASK <<
|
||||
AVIVO_DC_LB_DISP1_END_ADR_SHIFT);
|
||||
if (mode1) {
|
||||
dc_lb_memory_split |=
|
||||
((((mode1->hdisplay / 2) + 64) & AVIVO_DC_LB_DISP1_END_ADR_MASK)
|
||||
<< AVIVO_DC_LB_DISP1_END_ADR_SHIFT);
|
||||
} else if (mode2) {
|
||||
dc_lb_memory_split |= (0 << AVIVO_DC_LB_DISP1_END_ADR_SHIFT);
|
||||
}
|
||||
#endif
|
||||
WREG32(AVIVO_DC_LB_MEMORY_SPLIT, dc_lb_memory_split);
|
||||
}
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -1,30 +1,34 @@
|
|||
/**************************************************************************
|
||||
/*
|
||||
* Copyright 2005 Nicolai Haehnle et al.
|
||||
* Copyright 2008 Advanced Micro Devices, Inc.
|
||||
* Copyright 2009 Jerome Glisse.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Nicolai Haehnle
|
||||
* Jerome Glisse
|
||||
*/
|
||||
#ifndef _R300_REG_H_
|
||||
#define _R300_REG_H_
|
||||
|
||||
Copyright (C) 2004-2005 Nicolai Haehnle et al.
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a
|
||||
copy of this software and associated documentation files (the "Software"),
|
||||
to deal in the Software without restriction, including without limitation
|
||||
on the rights to use, copy, modify, merge, publish, distribute, sub
|
||||
license, and/or sell copies of the Software, and to permit persons to whom
|
||||
the Software is furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice (including the next
|
||||
paragraph) shall be included in all copies or substantial portions of the
|
||||
Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
|
||||
**************************************************************************/
|
||||
|
||||
#ifndef _R300_REG_H
|
||||
#define _R300_REG_H
|
||||
|
||||
#define R300_MC_INIT_MISC_LAT_TIMER 0x180
|
||||
# define R300_MC_MISC__MC_CPR_INIT_LAT_SHIFT 0
|
||||
|
|
|
@ -0,0 +1,223 @@
|
|||
/*
|
||||
* Copyright 2008 Advanced Micro Devices, Inc.
|
||||
* Copyright 2008 Red Hat Inc.
|
||||
* Copyright 2009 Jerome Glisse.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Dave Airlie
|
||||
* Alex Deucher
|
||||
* Jerome Glisse
|
||||
*/
|
||||
#include <linux/seq_file.h>
|
||||
#include "drmP.h"
|
||||
#include "radeon_reg.h"
|
||||
#include "radeon.h"
|
||||
|
||||
/* r420,r423,rv410 depends on : */
|
||||
void r100_pci_gart_disable(struct radeon_device *rdev);
|
||||
void r100_hdp_reset(struct radeon_device *rdev);
|
||||
void r100_mc_setup(struct radeon_device *rdev);
|
||||
int r100_gui_wait_for_idle(struct radeon_device *rdev);
|
||||
void r100_mc_disable_clients(struct radeon_device *rdev);
|
||||
void r300_vram_info(struct radeon_device *rdev);
|
||||
int r300_mc_wait_for_idle(struct radeon_device *rdev);
|
||||
int rv370_pcie_gart_enable(struct radeon_device *rdev);
|
||||
void rv370_pcie_gart_disable(struct radeon_device *rdev);
|
||||
|
||||
/* This files gather functions specifics to :
|
||||
* r420,r423,rv410
|
||||
*
|
||||
* Some of these functions might be used by newer ASICs.
|
||||
*/
|
||||
void r420_gpu_init(struct radeon_device *rdev);
|
||||
int r420_debugfs_pipes_info_init(struct radeon_device *rdev);
|
||||
|
||||
|
||||
/*
|
||||
* MC
|
||||
*/
|
||||
int r420_mc_init(struct radeon_device *rdev)
|
||||
{
|
||||
int r;
|
||||
|
||||
if (r100_debugfs_rbbm_init(rdev)) {
|
||||
DRM_ERROR("Failed to register debugfs file for RBBM !\n");
|
||||
}
|
||||
if (r420_debugfs_pipes_info_init(rdev)) {
|
||||
DRM_ERROR("Failed to register debugfs file for pipes !\n");
|
||||
}
|
||||
|
||||
r420_gpu_init(rdev);
|
||||
r100_pci_gart_disable(rdev);
|
||||
if (rdev->flags & RADEON_IS_PCIE) {
|
||||
rv370_pcie_gart_disable(rdev);
|
||||
}
|
||||
|
||||
/* Setup GPU memory space */
|
||||
rdev->mc.vram_location = 0xFFFFFFFFUL;
|
||||
rdev->mc.gtt_location = 0xFFFFFFFFUL;
|
||||
if (rdev->flags & RADEON_IS_AGP) {
|
||||
r = radeon_agp_init(rdev);
|
||||
if (r) {
|
||||
printk(KERN_WARNING "[drm] Disabling AGP\n");
|
||||
rdev->flags &= ~RADEON_IS_AGP;
|
||||
rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
|
||||
} else {
|
||||
rdev->mc.gtt_location = rdev->mc.agp_base;
|
||||
}
|
||||
}
|
||||
r = radeon_mc_setup(rdev);
|
||||
if (r) {
|
||||
return r;
|
||||
}
|
||||
|
||||
/* Program GPU memory space */
|
||||
r100_mc_disable_clients(rdev);
|
||||
if (r300_mc_wait_for_idle(rdev)) {
|
||||
printk(KERN_WARNING "Failed to wait MC idle while "
|
||||
"programming pipes. Bad things might happen.\n");
|
||||
}
|
||||
r100_mc_setup(rdev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void r420_mc_fini(struct radeon_device *rdev)
|
||||
{
|
||||
rv370_pcie_gart_disable(rdev);
|
||||
radeon_gart_table_vram_free(rdev);
|
||||
radeon_gart_fini(rdev);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Global GPU functions
|
||||
*/
|
||||
void r420_errata(struct radeon_device *rdev)
|
||||
{
|
||||
rdev->pll_errata = 0;
|
||||
}
|
||||
|
||||
void r420_pipes_init(struct radeon_device *rdev)
|
||||
{
|
||||
unsigned tmp;
|
||||
unsigned gb_pipe_select;
|
||||
unsigned num_pipes;
|
||||
|
||||
/* GA_ENHANCE workaround TCL deadlock issue */
|
||||
WREG32(0x4274, (1 << 0) | (1 << 1) | (1 << 2) | (1 << 3));
|
||||
/* get max number of pipes */
|
||||
gb_pipe_select = RREG32(0x402C);
|
||||
num_pipes = ((gb_pipe_select >> 12) & 3) + 1;
|
||||
rdev->num_gb_pipes = num_pipes;
|
||||
tmp = 0;
|
||||
switch (num_pipes) {
|
||||
default:
|
||||
/* force to 1 pipe */
|
||||
num_pipes = 1;
|
||||
case 1:
|
||||
tmp = (0 << 1);
|
||||
break;
|
||||
case 2:
|
||||
tmp = (3 << 1);
|
||||
break;
|
||||
case 3:
|
||||
tmp = (6 << 1);
|
||||
break;
|
||||
case 4:
|
||||
tmp = (7 << 1);
|
||||
break;
|
||||
}
|
||||
WREG32(0x42C8, (1 << num_pipes) - 1);
|
||||
/* Sub pixel 1/12 so we can have 4K rendering according to doc */
|
||||
tmp |= (1 << 4) | (1 << 0);
|
||||
WREG32(0x4018, tmp);
|
||||
if (r100_gui_wait_for_idle(rdev)) {
|
||||
printk(KERN_WARNING "Failed to wait GUI idle while "
|
||||
"programming pipes. Bad things might happen.\n");
|
||||
}
|
||||
|
||||
tmp = RREG32(0x170C);
|
||||
WREG32(0x170C, tmp | (1 << 31));
|
||||
|
||||
WREG32(R300_RB2D_DSTCACHE_MODE,
|
||||
RREG32(R300_RB2D_DSTCACHE_MODE) |
|
||||
R300_DC_AUTOFLUSH_ENABLE |
|
||||
R300_DC_DC_DISABLE_IGNORE_PE);
|
||||
|
||||
if (r100_gui_wait_for_idle(rdev)) {
|
||||
printk(KERN_WARNING "Failed to wait GUI idle while "
|
||||
"programming pipes. Bad things might happen.\n");
|
||||
}
|
||||
DRM_INFO("radeon: %d pipes initialized.\n", rdev->num_gb_pipes);
|
||||
}
|
||||
|
||||
void r420_gpu_init(struct radeon_device *rdev)
|
||||
{
|
||||
r100_hdp_reset(rdev);
|
||||
r420_pipes_init(rdev);
|
||||
if (r300_mc_wait_for_idle(rdev)) {
|
||||
printk(KERN_WARNING "Failed to wait MC idle while "
|
||||
"programming pipes. Bad things might happen.\n");
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* r420,r423,rv410 VRAM info
|
||||
*/
|
||||
void r420_vram_info(struct radeon_device *rdev)
|
||||
{
|
||||
r300_vram_info(rdev);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Debugfs info
|
||||
*/
|
||||
#if defined(CONFIG_DEBUG_FS)
|
||||
static int r420_debugfs_pipes_info(struct seq_file *m, void *data)
|
||||
{
|
||||
struct drm_info_node *node = (struct drm_info_node *) m->private;
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
uint32_t tmp;
|
||||
|
||||
tmp = RREG32(R400_GB_PIPE_SELECT);
|
||||
seq_printf(m, "GB_PIPE_SELECT 0x%08x\n", tmp);
|
||||
tmp = RREG32(R300_GB_TILE_CONFIG);
|
||||
seq_printf(m, "GB_TILE_CONFIG 0x%08x\n", tmp);
|
||||
tmp = RREG32(R300_DST_PIPE_CONFIG);
|
||||
seq_printf(m, "DST_PIPE_CONFIG 0x%08x\n", tmp);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct drm_info_list r420_pipes_info_list[] = {
|
||||
{"r420_pipes_info", r420_debugfs_pipes_info, 0, NULL},
|
||||
};
|
||||
#endif
|
||||
|
||||
int r420_debugfs_pipes_info_init(struct radeon_device *rdev)
|
||||
{
|
||||
#if defined(CONFIG_DEBUG_FS)
|
||||
return radeon_debugfs_add_files(rdev, r420_pipes_info_list, 1);
|
||||
#else
|
||||
return 0;
|
||||
#endif
|
||||
}
|
|
@ -0,0 +1,749 @@
|
|||
/*
|
||||
* Copyright 2008 Advanced Micro Devices, Inc.
|
||||
* Copyright 2008 Red Hat Inc.
|
||||
* Copyright 2009 Jerome Glisse.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Dave Airlie
|
||||
* Alex Deucher
|
||||
* Jerome Glisse
|
||||
*/
|
||||
#ifndef __R500_REG_H__
|
||||
#define __R500_REG_H__
|
||||
|
||||
/* pipe config regs */
|
||||
#define R300_GA_POLY_MODE 0x4288
|
||||
# define R300_FRONT_PTYPE_POINT (0 << 4)
|
||||
# define R300_FRONT_PTYPE_LINE (1 << 4)
|
||||
# define R300_FRONT_PTYPE_TRIANGE (2 << 4)
|
||||
# define R300_BACK_PTYPE_POINT (0 << 7)
|
||||
# define R300_BACK_PTYPE_LINE (1 << 7)
|
||||
# define R300_BACK_PTYPE_TRIANGE (2 << 7)
|
||||
#define R300_GA_ROUND_MODE 0x428c
|
||||
# define R300_GEOMETRY_ROUND_TRUNC (0 << 0)
|
||||
# define R300_GEOMETRY_ROUND_NEAREST (1 << 0)
|
||||
# define R300_COLOR_ROUND_TRUNC (0 << 2)
|
||||
# define R300_COLOR_ROUND_NEAREST (1 << 2)
|
||||
#define R300_GB_MSPOS0 0x4010
|
||||
# define R300_MS_X0_SHIFT 0
|
||||
# define R300_MS_Y0_SHIFT 4
|
||||
# define R300_MS_X1_SHIFT 8
|
||||
# define R300_MS_Y1_SHIFT 12
|
||||
# define R300_MS_X2_SHIFT 16
|
||||
# define R300_MS_Y2_SHIFT 20
|
||||
# define R300_MSBD0_Y_SHIFT 24
|
||||
# define R300_MSBD0_X_SHIFT 28
|
||||
#define R300_GB_MSPOS1 0x4014
|
||||
# define R300_MS_X3_SHIFT 0
|
||||
# define R300_MS_Y3_SHIFT 4
|
||||
# define R300_MS_X4_SHIFT 8
|
||||
# define R300_MS_Y4_SHIFT 12
|
||||
# define R300_MS_X5_SHIFT 16
|
||||
# define R300_MS_Y5_SHIFT 20
|
||||
# define R300_MSBD1_SHIFT 24
|
||||
|
||||
#define R300_GA_ENHANCE 0x4274
|
||||
# define R300_GA_DEADLOCK_CNTL (1 << 0)
|
||||
# define R300_GA_FASTSYNC_CNTL (1 << 1)
|
||||
#define R300_RB3D_DSTCACHE_CTLSTAT 0x4e4c
|
||||
# define R300_RB3D_DC_FLUSH (2 << 0)
|
||||
# define R300_RB3D_DC_FREE (2 << 2)
|
||||
# define R300_RB3D_DC_FINISH (1 << 4)
|
||||
#define R300_RB3D_ZCACHE_CTLSTAT 0x4f18
|
||||
# define R300_ZC_FLUSH (1 << 0)
|
||||
# define R300_ZC_FREE (1 << 1)
|
||||
# define R300_ZC_FLUSH_ALL 0x3
|
||||
#define R400_GB_PIPE_SELECT 0x402c
|
||||
#define R500_DYN_SCLK_PWMEM_PIPE 0x000d /* PLL */
|
||||
#define R500_SU_REG_DEST 0x42c8
|
||||
#define R300_GB_TILE_CONFIG 0x4018
|
||||
# define R300_ENABLE_TILING (1 << 0)
|
||||
# define R300_PIPE_COUNT_RV350 (0 << 1)
|
||||
# define R300_PIPE_COUNT_R300 (3 << 1)
|
||||
# define R300_PIPE_COUNT_R420_3P (6 << 1)
|
||||
# define R300_PIPE_COUNT_R420 (7 << 1)
|
||||
# define R300_TILE_SIZE_8 (0 << 4)
|
||||
# define R300_TILE_SIZE_16 (1 << 4)
|
||||
# define R300_TILE_SIZE_32 (2 << 4)
|
||||
# define R300_SUBPIXEL_1_12 (0 << 16)
|
||||
# define R300_SUBPIXEL_1_16 (1 << 16)
|
||||
#define R300_DST_PIPE_CONFIG 0x170c
|
||||
# define R300_PIPE_AUTO_CONFIG (1 << 31)
|
||||
#define R300_RB2D_DSTCACHE_MODE 0x3428
|
||||
# define R300_DC_AUTOFLUSH_ENABLE (1 << 8)
|
||||
# define R300_DC_DC_DISABLE_IGNORE_PE (1 << 17)
|
||||
|
||||
#define RADEON_CP_STAT 0x7C0
|
||||
#define RADEON_RBBM_CMDFIFO_ADDR 0xE70
|
||||
#define RADEON_RBBM_CMDFIFO_DATA 0xE74
|
||||
#define RADEON_ISYNC_CNTL 0x1724
|
||||
# define RADEON_ISYNC_ANY2D_IDLE3D (1 << 0)
|
||||
# define RADEON_ISYNC_ANY3D_IDLE2D (1 << 1)
|
||||
# define RADEON_ISYNC_TRIG2D_IDLE3D (1 << 2)
|
||||
# define RADEON_ISYNC_TRIG3D_IDLE2D (1 << 3)
|
||||
# define RADEON_ISYNC_WAIT_IDLEGUI (1 << 4)
|
||||
# define RADEON_ISYNC_CPSCRATCH_IDLEGUI (1 << 5)
|
||||
|
||||
#define RS480_NB_MC_INDEX 0x168
|
||||
# define RS480_NB_MC_IND_WR_EN (1 << 8)
|
||||
#define RS480_NB_MC_DATA 0x16c
|
||||
|
||||
/*
|
||||
* RS690
|
||||
*/
|
||||
#define RS690_MCCFG_FB_LOCATION 0x100
|
||||
#define RS690_MC_FB_START_MASK 0x0000FFFF
|
||||
#define RS690_MC_FB_START_SHIFT 0
|
||||
#define RS690_MC_FB_TOP_MASK 0xFFFF0000
|
||||
#define RS690_MC_FB_TOP_SHIFT 16
|
||||
#define RS690_MCCFG_AGP_LOCATION 0x101
|
||||
#define RS690_MC_AGP_START_MASK 0x0000FFFF
|
||||
#define RS690_MC_AGP_START_SHIFT 0
|
||||
#define RS690_MC_AGP_TOP_MASK 0xFFFF0000
|
||||
#define RS690_MC_AGP_TOP_SHIFT 16
|
||||
#define RS690_MCCFG_AGP_BASE 0x102
|
||||
#define RS690_MCCFG_AGP_BASE_2 0x103
|
||||
#define RS690_MC_INIT_MISC_LAT_TIMER 0x104
|
||||
#define RS690_HDP_FB_LOCATION 0x0134
|
||||
#define RS690_MC_INDEX 0x78
|
||||
# define RS690_MC_INDEX_MASK 0x1ff
|
||||
# define RS690_MC_INDEX_WR_EN (1 << 9)
|
||||
# define RS690_MC_INDEX_WR_ACK 0x7f
|
||||
#define RS690_MC_DATA 0x7c
|
||||
#define RS690_MC_STATUS 0x90
|
||||
#define RS690_MC_STATUS_IDLE (1 << 0)
|
||||
#define RS480_AGP_BASE_2 0x0164
|
||||
#define RS480_MC_MISC_CNTL 0x18
|
||||
# define RS480_DISABLE_GTW (1 << 1)
|
||||
# define RS480_GART_INDEX_REG_EN (1 << 12)
|
||||
# define RS690_BLOCK_GFX_D3_EN (1 << 14)
|
||||
#define RS480_GART_FEATURE_ID 0x2b
|
||||
# define RS480_HANG_EN (1 << 11)
|
||||
# define RS480_TLB_ENABLE (1 << 18)
|
||||
# define RS480_P2P_ENABLE (1 << 19)
|
||||
# define RS480_GTW_LAC_EN (1 << 25)
|
||||
# define RS480_2LEVEL_GART (0 << 30)
|
||||
# define RS480_1LEVEL_GART (1 << 30)
|
||||
# define RS480_PDC_EN (1 << 31)
|
||||
#define RS480_GART_BASE 0x2c
|
||||
#define RS480_GART_CACHE_CNTRL 0x2e
|
||||
# define RS480_GART_CACHE_INVALIDATE (1 << 0) /* wait for it to clear */
|
||||
#define RS480_AGP_ADDRESS_SPACE_SIZE 0x38
|
||||
# define RS480_GART_EN (1 << 0)
|
||||
# define RS480_VA_SIZE_32MB (0 << 1)
|
||||
# define RS480_VA_SIZE_64MB (1 << 1)
|
||||
# define RS480_VA_SIZE_128MB (2 << 1)
|
||||
# define RS480_VA_SIZE_256MB (3 << 1)
|
||||
# define RS480_VA_SIZE_512MB (4 << 1)
|
||||
# define RS480_VA_SIZE_1GB (5 << 1)
|
||||
# define RS480_VA_SIZE_2GB (6 << 1)
|
||||
#define RS480_AGP_MODE_CNTL 0x39
|
||||
# define RS480_POST_GART_Q_SIZE (1 << 18)
|
||||
# define RS480_NONGART_SNOOP (1 << 19)
|
||||
# define RS480_AGP_RD_BUF_SIZE (1 << 20)
|
||||
# define RS480_REQ_TYPE_SNOOP_SHIFT 22
|
||||
# define RS480_REQ_TYPE_SNOOP_MASK 0x3
|
||||
# define RS480_REQ_TYPE_SNOOP_DIS (1 << 24)
|
||||
|
||||
#define RS690_AIC_CTRL_SCRATCH 0x3A
|
||||
# define RS690_DIS_OUT_OF_PCI_GART_ACCESS (1 << 1)
|
||||
|
||||
/*
|
||||
* RS600
|
||||
*/
|
||||
#define RS600_MC_STATUS 0x0
|
||||
#define RS600_MC_STATUS_IDLE (1 << 0)
|
||||
#define RS600_MC_INDEX 0x70
|
||||
# define RS600_MC_ADDR_MASK 0xffff
|
||||
# define RS600_MC_IND_SEQ_RBS_0 (1 << 16)
|
||||
# define RS600_MC_IND_SEQ_RBS_1 (1 << 17)
|
||||
# define RS600_MC_IND_SEQ_RBS_2 (1 << 18)
|
||||
# define RS600_MC_IND_SEQ_RBS_3 (1 << 19)
|
||||
# define RS600_MC_IND_AIC_RBS (1 << 20)
|
||||
# define RS600_MC_IND_CITF_ARB0 (1 << 21)
|
||||
# define RS600_MC_IND_CITF_ARB1 (1 << 22)
|
||||
# define RS600_MC_IND_WR_EN (1 << 23)
|
||||
#define RS600_MC_DATA 0x74
|
||||
#define RS600_MC_STATUS 0x0
|
||||
# define RS600_MC_IDLE (1 << 1)
|
||||
#define RS600_MC_FB_LOCATION 0x4
|
||||
#define RS600_MC_FB_START_MASK 0x0000FFFF
|
||||
#define RS600_MC_FB_START_SHIFT 0
|
||||
#define RS600_MC_FB_TOP_MASK 0xFFFF0000
|
||||
#define RS600_MC_FB_TOP_SHIFT 16
|
||||
#define RS600_MC_AGP_LOCATION 0x5
|
||||
#define RS600_MC_AGP_START_MASK 0x0000FFFF
|
||||
#define RS600_MC_AGP_START_SHIFT 0
|
||||
#define RS600_MC_AGP_TOP_MASK 0xFFFF0000
|
||||
#define RS600_MC_AGP_TOP_SHIFT 16
|
||||
#define RS600_MC_AGP_BASE 0x6
|
||||
#define RS600_MC_AGP_BASE_2 0x7
|
||||
#define RS600_MC_CNTL1 0x9
|
||||
# define RS600_ENABLE_PAGE_TABLES (1 << 26)
|
||||
#define RS600_MC_PT0_CNTL 0x100
|
||||
# define RS600_ENABLE_PT (1 << 0)
|
||||
# define RS600_EFFECTIVE_L2_CACHE_SIZE(x) ((x) << 15)
|
||||
# define RS600_EFFECTIVE_L2_QUEUE_SIZE(x) ((x) << 21)
|
||||
# define RS600_INVALIDATE_ALL_L1_TLBS (1 << 28)
|
||||
# define RS600_INVALIDATE_L2_CACHE (1 << 29)
|
||||
#define RS600_MC_PT0_CONTEXT0_CNTL 0x102
|
||||
# define RS600_ENABLE_PAGE_TABLE (1 << 0)
|
||||
# define RS600_PAGE_TABLE_TYPE_FLAT (0 << 1)
|
||||
#define RS600_MC_PT0_SYSTEM_APERTURE_LOW_ADDR 0x112
|
||||
#define RS600_MC_PT0_SYSTEM_APERTURE_HIGH_ADDR 0x114
|
||||
#define RS600_MC_PT0_CONTEXT0_DEFAULT_READ_ADDR 0x11c
|
||||
#define RS600_MC_PT0_CONTEXT0_FLAT_BASE_ADDR 0x12c
|
||||
#define RS600_MC_PT0_CONTEXT0_FLAT_START_ADDR 0x13c
|
||||
#define RS600_MC_PT0_CONTEXT0_FLAT_END_ADDR 0x14c
|
||||
#define RS600_MC_PT0_CLIENT0_CNTL 0x16c
|
||||
# define RS600_ENABLE_TRANSLATION_MODE_OVERRIDE (1 << 0)
|
||||
# define RS600_TRANSLATION_MODE_OVERRIDE (1 << 1)
|
||||
# define RS600_SYSTEM_ACCESS_MODE_MASK (3 << 8)
|
||||
# define RS600_SYSTEM_ACCESS_MODE_PA_ONLY (0 << 8)
|
||||
# define RS600_SYSTEM_ACCESS_MODE_USE_SYS_MAP (1 << 8)
|
||||
# define RS600_SYSTEM_ACCESS_MODE_IN_SYS (2 << 8)
|
||||
# define RS600_SYSTEM_ACCESS_MODE_NOT_IN_SYS (3 << 8)
|
||||
# define RS600_SYSTEM_APERTURE_UNMAPPED_ACCESS_PASSTHROUGH (0 << 10)
|
||||
# define RS600_SYSTEM_APERTURE_UNMAPPED_ACCESS_DEFAULT_PAGE (1 << 10)
|
||||
# define RS600_EFFECTIVE_L1_CACHE_SIZE(x) ((x) << 11)
|
||||
# define RS600_ENABLE_FRAGMENT_PROCESSING (1 << 14)
|
||||
# define RS600_EFFECTIVE_L1_QUEUE_SIZE(x) ((x) << 15)
|
||||
# define RS600_INVALIDATE_L1_TLB (1 << 20)
|
||||
/* rs600/rs690/rs740 */
|
||||
# define RS600_BUS_MASTER_DIS (1 << 14)
|
||||
# define RS600_MSI_REARM (1 << 20)
|
||||
/* see RS400_MSI_REARM in AIC_CNTL for rs480 */
|
||||
|
||||
|
||||
|
||||
#define RV515_MC_FB_LOCATION 0x01
|
||||
#define RV515_MC_FB_START_MASK 0x0000FFFF
|
||||
#define RV515_MC_FB_START_SHIFT 0
|
||||
#define RV515_MC_FB_TOP_MASK 0xFFFF0000
|
||||
#define RV515_MC_FB_TOP_SHIFT 16
|
||||
#define RV515_MC_AGP_LOCATION 0x02
|
||||
#define RV515_MC_AGP_START_MASK 0x0000FFFF
|
||||
#define RV515_MC_AGP_START_SHIFT 0
|
||||
#define RV515_MC_AGP_TOP_MASK 0xFFFF0000
|
||||
#define RV515_MC_AGP_TOP_SHIFT 16
|
||||
#define RV515_MC_AGP_BASE 0x03
|
||||
#define RV515_MC_AGP_BASE_2 0x04
|
||||
|
||||
#define R520_MC_FB_LOCATION 0x04
|
||||
#define R520_MC_FB_START_MASK 0x0000FFFF
|
||||
#define R520_MC_FB_START_SHIFT 0
|
||||
#define R520_MC_FB_TOP_MASK 0xFFFF0000
|
||||
#define R520_MC_FB_TOP_SHIFT 16
|
||||
#define R520_MC_AGP_LOCATION 0x05
|
||||
#define R520_MC_AGP_START_MASK 0x0000FFFF
|
||||
#define R520_MC_AGP_START_SHIFT 0
|
||||
#define R520_MC_AGP_TOP_MASK 0xFFFF0000
|
||||
#define R520_MC_AGP_TOP_SHIFT 16
|
||||
#define R520_MC_AGP_BASE 0x06
|
||||
#define R520_MC_AGP_BASE_2 0x07
|
||||
|
||||
|
||||
#define AVIVO_MC_INDEX 0x0070
|
||||
#define R520_MC_STATUS 0x00
|
||||
#define R520_MC_STATUS_IDLE (1<<1)
|
||||
#define RV515_MC_STATUS 0x08
|
||||
#define RV515_MC_STATUS_IDLE (1<<4)
|
||||
#define RV515_MC_INIT_MISC_LAT_TIMER 0x09
|
||||
#define AVIVO_MC_DATA 0x0074
|
||||
|
||||
#define R520_MC_IND_INDEX 0x70
|
||||
#define R520_MC_IND_WR_EN (1 << 24)
|
||||
#define R520_MC_IND_DATA 0x74
|
||||
|
||||
#define RV515_MC_CNTL 0x5
|
||||
# define RV515_MEM_NUM_CHANNELS_MASK 0x3
|
||||
#define R520_MC_CNTL0 0x8
|
||||
# define R520_MEM_NUM_CHANNELS_MASK (0x3 << 24)
|
||||
# define R520_MEM_NUM_CHANNELS_SHIFT 24
|
||||
# define R520_MC_CHANNEL_SIZE (1 << 23)
|
||||
|
||||
#define AVIVO_CP_DYN_CNTL 0x000f /* PLL */
|
||||
# define AVIVO_CP_FORCEON (1 << 0)
|
||||
#define AVIVO_E2_DYN_CNTL 0x0011 /* PLL */
|
||||
# define AVIVO_E2_FORCEON (1 << 0)
|
||||
#define AVIVO_IDCT_DYN_CNTL 0x0013 /* PLL */
|
||||
# define AVIVO_IDCT_FORCEON (1 << 0)
|
||||
|
||||
#define AVIVO_HDP_FB_LOCATION 0x134
|
||||
|
||||
#define AVIVO_VGA_RENDER_CONTROL 0x0300
|
||||
# define AVIVO_VGA_VSTATUS_CNTL_MASK (3 << 16)
|
||||
#define AVIVO_D1VGA_CONTROL 0x0330
|
||||
# define AVIVO_DVGA_CONTROL_MODE_ENABLE (1<<0)
|
||||
# define AVIVO_DVGA_CONTROL_TIMING_SELECT (1<<8)
|
||||
# define AVIVO_DVGA_CONTROL_SYNC_POLARITY_SELECT (1<<9)
|
||||
# define AVIVO_DVGA_CONTROL_OVERSCAN_TIMING_SELECT (1<<10)
|
||||
# define AVIVO_DVGA_CONTROL_OVERSCAN_COLOR_EN (1<<16)
|
||||
# define AVIVO_DVGA_CONTROL_ROTATE (1<<24)
|
||||
#define AVIVO_D2VGA_CONTROL 0x0338
|
||||
|
||||
#define AVIVO_EXT1_PPLL_REF_DIV_SRC 0x400
|
||||
#define AVIVO_EXT1_PPLL_REF_DIV 0x404
|
||||
#define AVIVO_EXT1_PPLL_UPDATE_LOCK 0x408
|
||||
#define AVIVO_EXT1_PPLL_UPDATE_CNTL 0x40c
|
||||
|
||||
#define AVIVO_EXT2_PPLL_REF_DIV_SRC 0x410
|
||||
#define AVIVO_EXT2_PPLL_REF_DIV 0x414
|
||||
#define AVIVO_EXT2_PPLL_UPDATE_LOCK 0x418
|
||||
#define AVIVO_EXT2_PPLL_UPDATE_CNTL 0x41c
|
||||
|
||||
#define AVIVO_EXT1_PPLL_FB_DIV 0x430
|
||||
#define AVIVO_EXT2_PPLL_FB_DIV 0x434
|
||||
|
||||
#define AVIVO_EXT1_PPLL_POST_DIV_SRC 0x438
|
||||
#define AVIVO_EXT1_PPLL_POST_DIV 0x43c
|
||||
|
||||
#define AVIVO_EXT2_PPLL_POST_DIV_SRC 0x440
|
||||
#define AVIVO_EXT2_PPLL_POST_DIV 0x444
|
||||
|
||||
#define AVIVO_EXT1_PPLL_CNTL 0x448
|
||||
#define AVIVO_EXT2_PPLL_CNTL 0x44c
|
||||
|
||||
#define AVIVO_P1PLL_CNTL 0x450
|
||||
#define AVIVO_P2PLL_CNTL 0x454
|
||||
#define AVIVO_P1PLL_INT_SS_CNTL 0x458
|
||||
#define AVIVO_P2PLL_INT_SS_CNTL 0x45c
|
||||
#define AVIVO_P1PLL_TMDSA_CNTL 0x460
|
||||
#define AVIVO_P2PLL_LVTMA_CNTL 0x464
|
||||
|
||||
#define AVIVO_PCLK_CRTC1_CNTL 0x480
|
||||
#define AVIVO_PCLK_CRTC2_CNTL 0x484
|
||||
|
||||
#define AVIVO_D1CRTC_H_TOTAL 0x6000
|
||||
#define AVIVO_D1CRTC_H_BLANK_START_END 0x6004
|
||||
#define AVIVO_D1CRTC_H_SYNC_A 0x6008
|
||||
#define AVIVO_D1CRTC_H_SYNC_A_CNTL 0x600c
|
||||
#define AVIVO_D1CRTC_H_SYNC_B 0x6010
|
||||
#define AVIVO_D1CRTC_H_SYNC_B_CNTL 0x6014
|
||||
|
||||
#define AVIVO_D1CRTC_V_TOTAL 0x6020
|
||||
#define AVIVO_D1CRTC_V_BLANK_START_END 0x6024
|
||||
#define AVIVO_D1CRTC_V_SYNC_A 0x6028
|
||||
#define AVIVO_D1CRTC_V_SYNC_A_CNTL 0x602c
|
||||
#define AVIVO_D1CRTC_V_SYNC_B 0x6030
|
||||
#define AVIVO_D1CRTC_V_SYNC_B_CNTL 0x6034
|
||||
|
||||
#define AVIVO_D1CRTC_CONTROL 0x6080
|
||||
# define AVIVO_CRTC_EN (1 << 0)
|
||||
#define AVIVO_D1CRTC_BLANK_CONTROL 0x6084
|
||||
#define AVIVO_D1CRTC_INTERLACE_CONTROL 0x6088
|
||||
#define AVIVO_D1CRTC_INTERLACE_STATUS 0x608c
|
||||
#define AVIVO_D1CRTC_STEREO_CONTROL 0x60c4
|
||||
|
||||
/* master controls */
|
||||
#define AVIVO_DC_CRTC_MASTER_EN 0x60f8
|
||||
#define AVIVO_DC_CRTC_TV_CONTROL 0x60fc
|
||||
|
||||
#define AVIVO_D1GRPH_ENABLE 0x6100
|
||||
#define AVIVO_D1GRPH_CONTROL 0x6104
|
||||
# define AVIVO_D1GRPH_CONTROL_DEPTH_8BPP (0 << 0)
|
||||
# define AVIVO_D1GRPH_CONTROL_DEPTH_16BPP (1 << 0)
|
||||
# define AVIVO_D1GRPH_CONTROL_DEPTH_32BPP (2 << 0)
|
||||
# define AVIVO_D1GRPH_CONTROL_DEPTH_64BPP (3 << 0)
|
||||
|
||||
# define AVIVO_D1GRPH_CONTROL_8BPP_INDEXED (0 << 8)
|
||||
|
||||
# define AVIVO_D1GRPH_CONTROL_16BPP_ARGB1555 (0 << 8)
|
||||
# define AVIVO_D1GRPH_CONTROL_16BPP_RGB565 (1 << 8)
|
||||
# define AVIVO_D1GRPH_CONTROL_16BPP_ARGB4444 (2 << 8)
|
||||
# define AVIVO_D1GRPH_CONTROL_16BPP_AI88 (3 << 8)
|
||||
# define AVIVO_D1GRPH_CONTROL_16BPP_MONO16 (4 << 8)
|
||||
|
||||
# define AVIVO_D1GRPH_CONTROL_32BPP_ARGB8888 (0 << 8)
|
||||
# define AVIVO_D1GRPH_CONTROL_32BPP_ARGB2101010 (1 << 8)
|
||||
# define AVIVO_D1GRPH_CONTROL_32BPP_DIGITAL (2 << 8)
|
||||
# define AVIVO_D1GRPH_CONTROL_32BPP_8B_ARGB2101010 (3 << 8)
|
||||
|
||||
|
||||
# define AVIVO_D1GRPH_CONTROL_64BPP_ARGB16161616 (0 << 8)
|
||||
|
||||
# define AVIVO_D1GRPH_SWAP_RB (1 << 16)
|
||||
# define AVIVO_D1GRPH_TILED (1 << 20)
|
||||
# define AVIVO_D1GRPH_MACRO_ADDRESS_MODE (1 << 21)
|
||||
|
||||
#define AVIVO_D1GRPH_LUT_SEL 0x6108
|
||||
#define AVIVO_D1GRPH_PRIMARY_SURFACE_ADDRESS 0x6110
|
||||
#define AVIVO_D1GRPH_SECONDARY_SURFACE_ADDRESS 0x6118
|
||||
#define AVIVO_D1GRPH_PITCH 0x6120
|
||||
#define AVIVO_D1GRPH_SURFACE_OFFSET_X 0x6124
|
||||
#define AVIVO_D1GRPH_SURFACE_OFFSET_Y 0x6128
|
||||
#define AVIVO_D1GRPH_X_START 0x612c
|
||||
#define AVIVO_D1GRPH_Y_START 0x6130
|
||||
#define AVIVO_D1GRPH_X_END 0x6134
|
||||
#define AVIVO_D1GRPH_Y_END 0x6138
|
||||
#define AVIVO_D1GRPH_UPDATE 0x6144
|
||||
# define AVIVO_D1GRPH_UPDATE_LOCK (1 << 16)
|
||||
#define AVIVO_D1GRPH_FLIP_CONTROL 0x6148
|
||||
|
||||
#define AVIVO_D1CUR_CONTROL 0x6400
|
||||
# define AVIVO_D1CURSOR_EN (1 << 0)
|
||||
# define AVIVO_D1CURSOR_MODE_SHIFT 8
|
||||
# define AVIVO_D1CURSOR_MODE_MASK (3 << 8)
|
||||
# define AVIVO_D1CURSOR_MODE_24BPP 2
|
||||
#define AVIVO_D1CUR_SURFACE_ADDRESS 0x6408
|
||||
#define AVIVO_D1CUR_SIZE 0x6410
|
||||
#define AVIVO_D1CUR_POSITION 0x6414
|
||||
#define AVIVO_D1CUR_HOT_SPOT 0x6418
|
||||
#define AVIVO_D1CUR_UPDATE 0x6424
|
||||
# define AVIVO_D1CURSOR_UPDATE_LOCK (1 << 16)
|
||||
|
||||
#define AVIVO_DC_LUT_RW_SELECT 0x6480
|
||||
#define AVIVO_DC_LUT_RW_MODE 0x6484
|
||||
#define AVIVO_DC_LUT_RW_INDEX 0x6488
|
||||
#define AVIVO_DC_LUT_SEQ_COLOR 0x648c
|
||||
#define AVIVO_DC_LUT_PWL_DATA 0x6490
|
||||
#define AVIVO_DC_LUT_30_COLOR 0x6494
|
||||
#define AVIVO_DC_LUT_READ_PIPE_SELECT 0x6498
|
||||
#define AVIVO_DC_LUT_WRITE_EN_MASK 0x649c
|
||||
#define AVIVO_DC_LUT_AUTOFILL 0x64a0
|
||||
|
||||
#define AVIVO_DC_LUTA_CONTROL 0x64c0
|
||||
#define AVIVO_DC_LUTA_BLACK_OFFSET_BLUE 0x64c4
|
||||
#define AVIVO_DC_LUTA_BLACK_OFFSET_GREEN 0x64c8
|
||||
#define AVIVO_DC_LUTA_BLACK_OFFSET_RED 0x64cc
|
||||
#define AVIVO_DC_LUTA_WHITE_OFFSET_BLUE 0x64d0
|
||||
#define AVIVO_DC_LUTA_WHITE_OFFSET_GREEN 0x64d4
|
||||
#define AVIVO_DC_LUTA_WHITE_OFFSET_RED 0x64d8
|
||||
|
||||
#define AVIVO_DC_LB_MEMORY_SPLIT 0x6520
|
||||
# define AVIVO_DC_LB_MEMORY_SPLIT_MASK 0x3
|
||||
# define AVIVO_DC_LB_MEMORY_SPLIT_SHIFT 0
|
||||
# define AVIVO_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF 0
|
||||
# define AVIVO_DC_LB_MEMORY_SPLIT_D1_3Q_D2_1Q 1
|
||||
# define AVIVO_DC_LB_MEMORY_SPLIT_D1_ONLY 2
|
||||
# define AVIVO_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q 3
|
||||
# define AVIVO_DC_LB_MEMORY_SPLIT_SHIFT_MODE (1 << 2)
|
||||
# define AVIVO_DC_LB_DISP1_END_ADR_SHIFT 4
|
||||
# define AVIVO_DC_LB_DISP1_END_ADR_MASK 0x7ff
|
||||
|
||||
#define R500_DxMODE_INT_MASK 0x6540
|
||||
#define R500_D1MODE_INT_MASK (1<<0)
|
||||
#define R500_D2MODE_INT_MASK (1<<8)
|
||||
|
||||
#define AVIVO_D1MODE_DATA_FORMAT 0x6528
|
||||
# define AVIVO_D1MODE_INTERLEAVE_EN (1 << 0)
|
||||
#define AVIVO_D1MODE_DESKTOP_HEIGHT 0x652C
|
||||
#define AVIVO_D1MODE_VIEWPORT_START 0x6580
|
||||
#define AVIVO_D1MODE_VIEWPORT_SIZE 0x6584
|
||||
#define AVIVO_D1MODE_EXT_OVERSCAN_LEFT_RIGHT 0x6588
|
||||
#define AVIVO_D1MODE_EXT_OVERSCAN_TOP_BOTTOM 0x658c
|
||||
|
||||
#define AVIVO_D1SCL_SCALER_ENABLE 0x6590
|
||||
#define AVIVO_D1SCL_SCALER_TAP_CONTROL 0x6594
|
||||
#define AVIVO_D1SCL_UPDATE 0x65cc
|
||||
# define AVIVO_D1SCL_UPDATE_LOCK (1 << 16)
|
||||
|
||||
/* second crtc */
|
||||
#define AVIVO_D2CRTC_H_TOTAL 0x6800
|
||||
#define AVIVO_D2CRTC_H_BLANK_START_END 0x6804
|
||||
#define AVIVO_D2CRTC_H_SYNC_A 0x6808
|
||||
#define AVIVO_D2CRTC_H_SYNC_A_CNTL 0x680c
|
||||
#define AVIVO_D2CRTC_H_SYNC_B 0x6810
|
||||
#define AVIVO_D2CRTC_H_SYNC_B_CNTL 0x6814
|
||||
|
||||
#define AVIVO_D2CRTC_V_TOTAL 0x6820
|
||||
#define AVIVO_D2CRTC_V_BLANK_START_END 0x6824
|
||||
#define AVIVO_D2CRTC_V_SYNC_A 0x6828
|
||||
#define AVIVO_D2CRTC_V_SYNC_A_CNTL 0x682c
|
||||
#define AVIVO_D2CRTC_V_SYNC_B 0x6830
|
||||
#define AVIVO_D2CRTC_V_SYNC_B_CNTL 0x6834
|
||||
|
||||
#define AVIVO_D2CRTC_CONTROL 0x6880
|
||||
#define AVIVO_D2CRTC_BLANK_CONTROL 0x6884
|
||||
#define AVIVO_D2CRTC_INTERLACE_CONTROL 0x6888
|
||||
#define AVIVO_D2CRTC_INTERLACE_STATUS 0x688c
|
||||
#define AVIVO_D2CRTC_STEREO_CONTROL 0x68c4
|
||||
|
||||
#define AVIVO_D2GRPH_ENABLE 0x6900
|
||||
#define AVIVO_D2GRPH_CONTROL 0x6904
|
||||
#define AVIVO_D2GRPH_LUT_SEL 0x6908
|
||||
#define AVIVO_D2GRPH_PRIMARY_SURFACE_ADDRESS 0x6910
|
||||
#define AVIVO_D2GRPH_SECONDARY_SURFACE_ADDRESS 0x6918
|
||||
#define AVIVO_D2GRPH_PITCH 0x6920
|
||||
#define AVIVO_D2GRPH_SURFACE_OFFSET_X 0x6924
|
||||
#define AVIVO_D2GRPH_SURFACE_OFFSET_Y 0x6928
|
||||
#define AVIVO_D2GRPH_X_START 0x692c
|
||||
#define AVIVO_D2GRPH_Y_START 0x6930
|
||||
#define AVIVO_D2GRPH_X_END 0x6934
|
||||
#define AVIVO_D2GRPH_Y_END 0x6938
|
||||
#define AVIVO_D2GRPH_UPDATE 0x6944
|
||||
#define AVIVO_D2GRPH_FLIP_CONTROL 0x6948
|
||||
|
||||
#define AVIVO_D2CUR_CONTROL 0x6c00
|
||||
#define AVIVO_D2CUR_SURFACE_ADDRESS 0x6c08
|
||||
#define AVIVO_D2CUR_SIZE 0x6c10
|
||||
#define AVIVO_D2CUR_POSITION 0x6c14
|
||||
|
||||
#define AVIVO_D2MODE_VIEWPORT_START 0x6d80
|
||||
#define AVIVO_D2MODE_VIEWPORT_SIZE 0x6d84
|
||||
#define AVIVO_D2MODE_EXT_OVERSCAN_LEFT_RIGHT 0x6d88
|
||||
#define AVIVO_D2MODE_EXT_OVERSCAN_TOP_BOTTOM 0x6d8c
|
||||
|
||||
#define AVIVO_D2SCL_SCALER_ENABLE 0x6d90
|
||||
#define AVIVO_D2SCL_SCALER_TAP_CONTROL 0x6d94
|
||||
|
||||
#define AVIVO_DDIA_BIT_DEPTH_CONTROL 0x7214
|
||||
|
||||
#define AVIVO_DACA_ENABLE 0x7800
|
||||
# define AVIVO_DAC_ENABLE (1 << 0)
|
||||
#define AVIVO_DACA_SOURCE_SELECT 0x7804
|
||||
# define AVIVO_DAC_SOURCE_CRTC1 (0 << 0)
|
||||
# define AVIVO_DAC_SOURCE_CRTC2 (1 << 0)
|
||||
# define AVIVO_DAC_SOURCE_TV (2 << 0)
|
||||
|
||||
#define AVIVO_DACA_FORCE_OUTPUT_CNTL 0x783c
|
||||
# define AVIVO_DACA_FORCE_OUTPUT_CNTL_FORCE_DATA_EN (1 << 0)
|
||||
# define AVIVO_DACA_FORCE_OUTPUT_CNTL_DATA_SEL_SHIFT (8)
|
||||
# define AVIVO_DACA_FORCE_OUTPUT_CNTL_DATA_SEL_BLUE (1 << 0)
|
||||
# define AVIVO_DACA_FORCE_OUTPUT_CNTL_DATA_SEL_GREEN (1 << 1)
|
||||
# define AVIVO_DACA_FORCE_OUTPUT_CNTL_DATA_SEL_RED (1 << 2)
|
||||
# define AVIVO_DACA_FORCE_OUTPUT_CNTL_DATA_ON_BLANKB_ONLY (1 << 24)
|
||||
#define AVIVO_DACA_POWERDOWN 0x7850
|
||||
# define AVIVO_DACA_POWERDOWN_POWERDOWN (1 << 0)
|
||||
# define AVIVO_DACA_POWERDOWN_BLUE (1 << 8)
|
||||
# define AVIVO_DACA_POWERDOWN_GREEN (1 << 16)
|
||||
# define AVIVO_DACA_POWERDOWN_RED (1 << 24)
|
||||
|
||||
#define AVIVO_DACB_ENABLE 0x7a00
|
||||
#define AVIVO_DACB_SOURCE_SELECT 0x7a04
|
||||
#define AVIVO_DACB_FORCE_OUTPUT_CNTL 0x7a3c
|
||||
# define AVIVO_DACB_FORCE_OUTPUT_CNTL_FORCE_DATA_EN (1 << 0)
|
||||
# define AVIVO_DACB_FORCE_OUTPUT_CNTL_DATA_SEL_SHIFT (8)
|
||||
# define AVIVO_DACB_FORCE_OUTPUT_CNTL_DATA_SEL_BLUE (1 << 0)
|
||||
# define AVIVO_DACB_FORCE_OUTPUT_CNTL_DATA_SEL_GREEN (1 << 1)
|
||||
# define AVIVO_DACB_FORCE_OUTPUT_CNTL_DATA_SEL_RED (1 << 2)
|
||||
# define AVIVO_DACB_FORCE_OUTPUT_CNTL_DATA_ON_BLANKB_ONLY (1 << 24)
|
||||
#define AVIVO_DACB_POWERDOWN 0x7a50
|
||||
# define AVIVO_DACB_POWERDOWN_POWERDOWN (1 << 0)
|
||||
# define AVIVO_DACB_POWERDOWN_BLUE (1 << 8)
|
||||
# define AVIVO_DACB_POWERDOWN_GREEN (1 << 16)
|
||||
# define AVIVO_DACB_POWERDOWN_RED
|
||||
|
||||
#define AVIVO_TMDSA_CNTL 0x7880
|
||||
# define AVIVO_TMDSA_CNTL_ENABLE (1 << 0)
|
||||
# define AVIVO_TMDSA_CNTL_HPD_MASK (1 << 4)
|
||||
# define AVIVO_TMDSA_CNTL_HPD_SELECT (1 << 8)
|
||||
# define AVIVO_TMDSA_CNTL_SYNC_PHASE (1 << 12)
|
||||
# define AVIVO_TMDSA_CNTL_PIXEL_ENCODING (1 << 16)
|
||||
# define AVIVO_TMDSA_CNTL_DUAL_LINK_ENABLE (1 << 24)
|
||||
# define AVIVO_TMDSA_CNTL_SWAP (1 << 28)
|
||||
#define AVIVO_TMDSA_SOURCE_SELECT 0x7884
|
||||
/* 78a8 appears to be some kind of (reasonably tolerant) clock?
|
||||
* 78d0 definitely hits the transmitter, definitely clock. */
|
||||
/* MYSTERY1 This appears to control dithering? */
|
||||
#define AVIVO_TMDSA_BIT_DEPTH_CONTROL 0x7894
|
||||
# define AVIVO_TMDS_BIT_DEPTH_CONTROL_TRUNCATE_EN (1 << 0)
|
||||
# define AVIVO_TMDS_BIT_DEPTH_CONTROL_TRUNCATE_DEPTH (1 << 4)
|
||||
# define AVIVO_TMDS_BIT_DEPTH_CONTROL_SPATIAL_DITHER_EN (1 << 8)
|
||||
# define AVIVO_TMDS_BIT_DEPTH_CONTROL_SPATIAL_DITHER_DEPTH (1 << 12)
|
||||
# define AVIVO_TMDS_BIT_DEPTH_CONTROL_TEMPORAL_DITHER_EN (1 << 16)
|
||||
# define AVIVO_TMDS_BIT_DEPTH_CONTROL_TEMPORAL_DITHER_DEPTH (1 << 20)
|
||||
# define AVIVO_TMDS_BIT_DEPTH_CONTROL_TEMPORAL_LEVEL (1 << 24)
|
||||
# define AVIVO_TMDS_BIT_DEPTH_CONTROL_TEMPORAL_DITHER_RESET (1 << 26)
|
||||
#define AVIVO_TMDSA_DCBALANCER_CONTROL 0x78d0
|
||||
# define AVIVO_TMDSA_DCBALANCER_CONTROL_EN (1 << 0)
|
||||
# define AVIVO_TMDSA_DCBALANCER_CONTROL_TEST_EN (1 << 8)
|
||||
# define AVIVO_TMDSA_DCBALANCER_CONTROL_TEST_IN_SHIFT (16)
|
||||
# define AVIVO_TMDSA_DCBALANCER_CONTROL_FORCE (1 << 24)
|
||||
#define AVIVO_TMDSA_DATA_SYNCHRONIZATION 0x78d8
|
||||
# define AVIVO_TMDSA_DATA_SYNCHRONIZATION_DSYNSEL (1 << 0)
|
||||
# define AVIVO_TMDSA_DATA_SYNCHRONIZATION_PFREQCHG (1 << 8)
|
||||
#define AVIVO_TMDSA_CLOCK_ENABLE 0x7900
|
||||
#define AVIVO_TMDSA_TRANSMITTER_ENABLE 0x7904
|
||||
# define AVIVO_TMDSA_TRANSMITTER_ENABLE_TX0_ENABLE (1 << 0)
|
||||
# define AVIVO_TMDSA_TRANSMITTER_ENABLE_LNKC0EN (1 << 1)
|
||||
# define AVIVO_TMDSA_TRANSMITTER_ENABLE_LNKD00EN (1 << 2)
|
||||
# define AVIVO_TMDSA_TRANSMITTER_ENABLE_LNKD01EN (1 << 3)
|
||||
# define AVIVO_TMDSA_TRANSMITTER_ENABLE_LNKD02EN (1 << 4)
|
||||
# define AVIVO_TMDSA_TRANSMITTER_ENABLE_TX1_ENABLE (1 << 8)
|
||||
# define AVIVO_TMDSA_TRANSMITTER_ENABLE_LNKD10EN (1 << 10)
|
||||
# define AVIVO_TMDSA_TRANSMITTER_ENABLE_LNKD11EN (1 << 11)
|
||||
# define AVIVO_TMDSA_TRANSMITTER_ENABLE_LNKD12EN (1 << 12)
|
||||
# define AVIVO_TMDSA_TRANSMITTER_ENABLE_TX_ENABLE_HPD_MASK (1 << 16)
|
||||
# define AVIVO_TMDSA_TRANSMITTER_ENABLE_LNKCEN_HPD_MASK (1 << 17)
|
||||
# define AVIVO_TMDSA_TRANSMITTER_ENABLE_LNKDEN_HPD_MASK (1 << 18)
|
||||
|
||||
#define AVIVO_TMDSA_TRANSMITTER_CONTROL 0x7910
|
||||
# define AVIVO_TMDSA_TRANSMITTER_CONTROL_PLL_ENABLE (1 << 0)
|
||||
# define AVIVO_TMDSA_TRANSMITTER_CONTROL_PLL_RESET (1 << 1)
|
||||
# define AVIVO_TMDSA_TRANSMITTER_CONTROL_PLL_HPD_MASK_SHIFT (2)
|
||||
# define AVIVO_TMDSA_TRANSMITTER_CONTROL_IDSCKSEL (1 << 4)
|
||||
# define AVIVO_TMDSA_TRANSMITTER_CONTROL_BGSLEEP (1 << 5)
|
||||
# define AVIVO_TMDSA_TRANSMITTER_CONTROL_PLL_PWRUP_SEQ_EN (1 << 6)
|
||||
# define AVIVO_TMDSA_TRANSMITTER_CONTROL_TMCLK (1 << 8)
|
||||
# define AVIVO_TMDSA_TRANSMITTER_CONTROL_TMCLK_FROM_PADS (1 << 13)
|
||||
# define AVIVO_TMDSA_TRANSMITTER_CONTROL_TDCLK (1 << 14)
|
||||
# define AVIVO_TMDSA_TRANSMITTER_CONTROL_TDCLK_FROM_PADS (1 << 15)
|
||||
# define AVIVO_TMDSA_TRANSMITTER_CONTROL_CLK_PATTERN_SHIFT (16)
|
||||
# define AVIVO_TMDSA_TRANSMITTER_CONTROL_BYPASS_PLL (1 << 28)
|
||||
# define AVIVO_TMDSA_TRANSMITTER_CONTROL_USE_CLK_DATA (1 << 29)
|
||||
# define AVIVO_TMDSA_TRANSMITTER_CONTROL_INPUT_TEST_CLK_SEL (1 << 31)
|
||||
|
||||
#define AVIVO_LVTMA_CNTL 0x7a80
|
||||
# define AVIVO_LVTMA_CNTL_ENABLE (1 << 0)
|
||||
# define AVIVO_LVTMA_CNTL_HPD_MASK (1 << 4)
|
||||
# define AVIVO_LVTMA_CNTL_HPD_SELECT (1 << 8)
|
||||
# define AVIVO_LVTMA_CNTL_SYNC_PHASE (1 << 12)
|
||||
# define AVIVO_LVTMA_CNTL_PIXEL_ENCODING (1 << 16)
|
||||
# define AVIVO_LVTMA_CNTL_DUAL_LINK_ENABLE (1 << 24)
|
||||
# define AVIVO_LVTMA_CNTL_SWAP (1 << 28)
|
||||
#define AVIVO_LVTMA_SOURCE_SELECT 0x7a84
|
||||
#define AVIVO_LVTMA_COLOR_FORMAT 0x7a88
|
||||
#define AVIVO_LVTMA_BIT_DEPTH_CONTROL 0x7a94
|
||||
# define AVIVO_LVTMA_BIT_DEPTH_CONTROL_TRUNCATE_EN (1 << 0)
|
||||
# define AVIVO_LVTMA_BIT_DEPTH_CONTROL_TRUNCATE_DEPTH (1 << 4)
|
||||
# define AVIVO_LVTMA_BIT_DEPTH_CONTROL_SPATIAL_DITHER_EN (1 << 8)
|
||||
# define AVIVO_LVTMA_BIT_DEPTH_CONTROL_SPATIAL_DITHER_DEPTH (1 << 12)
|
||||
# define AVIVO_LVTMA_BIT_DEPTH_CONTROL_TEMPORAL_DITHER_EN (1 << 16)
|
||||
# define AVIVO_LVTMA_BIT_DEPTH_CONTROL_TEMPORAL_DITHER_DEPTH (1 << 20)
|
||||
# define AVIVO_LVTMA_BIT_DEPTH_CONTROL_TEMPORAL_LEVEL (1 << 24)
|
||||
# define AVIVO_LVTMA_BIT_DEPTH_CONTROL_TEMPORAL_DITHER_RESET (1 << 26)
|
||||
|
||||
|
||||
|
||||
#define AVIVO_LVTMA_DCBALANCER_CONTROL 0x7ad0
|
||||
# define AVIVO_LVTMA_DCBALANCER_CONTROL_EN (1 << 0)
|
||||
# define AVIVO_LVTMA_DCBALANCER_CONTROL_TEST_EN (1 << 8)
|
||||
# define AVIVO_LVTMA_DCBALANCER_CONTROL_TEST_IN_SHIFT (16)
|
||||
# define AVIVO_LVTMA_DCBALANCER_CONTROL_FORCE (1 << 24)
|
||||
|
||||
#define AVIVO_LVTMA_DATA_SYNCHRONIZATION 0x78d8
|
||||
# define AVIVO_LVTMA_DATA_SYNCHRONIZATION_DSYNSEL (1 << 0)
|
||||
# define AVIVO_LVTMA_DATA_SYNCHRONIZATION_PFREQCHG (1 << 8)
|
||||
#define R500_LVTMA_CLOCK_ENABLE 0x7b00
|
||||
#define R600_LVTMA_CLOCK_ENABLE 0x7b04
|
||||
|
||||
#define R500_LVTMA_TRANSMITTER_ENABLE 0x7b04
|
||||
#define R600_LVTMA_TRANSMITTER_ENABLE 0x7b08
|
||||
# define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKC0EN (1 << 1)
|
||||
# define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKD00EN (1 << 2)
|
||||
# define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKD01EN (1 << 3)
|
||||
# define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKD02EN (1 << 4)
|
||||
# define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKD03EN (1 << 5)
|
||||
# define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKC1EN (1 << 9)
|
||||
# define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKD10EN (1 << 10)
|
||||
# define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKD11EN (1 << 11)
|
||||
# define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKD12EN (1 << 12)
|
||||
# define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKCEN_HPD_MASK (1 << 17)
|
||||
# define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKDEN_HPD_MASK (1 << 18)
|
||||
|
||||
#define R500_LVTMA_TRANSMITTER_CONTROL 0x7b10
|
||||
#define R600_LVTMA_TRANSMITTER_CONTROL 0x7b14
|
||||
# define AVIVO_LVTMA_TRANSMITTER_CONTROL_PLL_ENABLE (1 << 0)
|
||||
# define AVIVO_LVTMA_TRANSMITTER_CONTROL_PLL_RESET (1 << 1)
|
||||
# define AVIVO_LVTMA_TRANSMITTER_CONTROL_PLL_HPD_MASK_SHIFT (2)
|
||||
# define AVIVO_LVTMA_TRANSMITTER_CONTROL_IDSCKSEL (1 << 4)
|
||||
# define AVIVO_LVTMA_TRANSMITTER_CONTROL_BGSLEEP (1 << 5)
|
||||
# define AVIVO_LVTMA_TRANSMITTER_CONTROL_PLL_PWRUP_SEQ_EN (1 << 6)
|
||||
# define AVIVO_LVTMA_TRANSMITTER_CONTROL_TMCLK (1 << 8)
|
||||
# define AVIVO_LVTMA_TRANSMITTER_CONTROL_TMCLK_FROM_PADS (1 << 13)
|
||||
# define AVIVO_LVTMA_TRANSMITTER_CONTROL_TDCLK (1 << 14)
|
||||
# define AVIVO_LVTMA_TRANSMITTER_CONTROL_TDCLK_FROM_PADS (1 << 15)
|
||||
# define AVIVO_LVTMA_TRANSMITTER_CONTROL_CLK_PATTERN_SHIFT (16)
|
||||
# define AVIVO_LVTMA_TRANSMITTER_CONTROL_BYPASS_PLL (1 << 28)
|
||||
# define AVIVO_LVTMA_TRANSMITTER_CONTROL_USE_CLK_DATA (1 << 29)
|
||||
# define AVIVO_LVTMA_TRANSMITTER_CONTROL_INPUT_TEST_CLK_SEL (1 << 31)
|
||||
|
||||
#define R500_LVTMA_PWRSEQ_CNTL 0x7af0
|
||||
#define R600_LVTMA_PWRSEQ_CNTL 0x7af4
|
||||
# define AVIVO_LVTMA_PWRSEQ_EN (1 << 0)
|
||||
# define AVIVO_LVTMA_PWRSEQ_PLL_ENABLE_MASK (1 << 2)
|
||||
# define AVIVO_LVTMA_PWRSEQ_PLL_RESET_MASK (1 << 3)
|
||||
# define AVIVO_LVTMA_PWRSEQ_TARGET_STATE (1 << 4)
|
||||
# define AVIVO_LVTMA_SYNCEN (1 << 8)
|
||||
# define AVIVO_LVTMA_SYNCEN_OVRD (1 << 9)
|
||||
# define AVIVO_LVTMA_SYNCEN_POL (1 << 10)
|
||||
# define AVIVO_LVTMA_DIGON (1 << 16)
|
||||
# define AVIVO_LVTMA_DIGON_OVRD (1 << 17)
|
||||
# define AVIVO_LVTMA_DIGON_POL (1 << 18)
|
||||
# define AVIVO_LVTMA_BLON (1 << 24)
|
||||
# define AVIVO_LVTMA_BLON_OVRD (1 << 25)
|
||||
# define AVIVO_LVTMA_BLON_POL (1 << 26)
|
||||
|
||||
#define R500_LVTMA_PWRSEQ_STATE 0x7af4
|
||||
#define R600_LVTMA_PWRSEQ_STATE 0x7af8
|
||||
# define AVIVO_LVTMA_PWRSEQ_STATE_TARGET_STATE_R (1 << 0)
|
||||
# define AVIVO_LVTMA_PWRSEQ_STATE_DIGON (1 << 1)
|
||||
# define AVIVO_LVTMA_PWRSEQ_STATE_SYNCEN (1 << 2)
|
||||
# define AVIVO_LVTMA_PWRSEQ_STATE_BLON (1 << 3)
|
||||
# define AVIVO_LVTMA_PWRSEQ_STATE_DONE (1 << 4)
|
||||
# define AVIVO_LVTMA_PWRSEQ_STATE_STATUS_SHIFT (8)
|
||||
|
||||
#define AVIVO_LVDS_BACKLIGHT_CNTL 0x7af8
|
||||
# define AVIVO_LVDS_BACKLIGHT_CNTL_EN (1 << 0)
|
||||
# define AVIVO_LVDS_BACKLIGHT_LEVEL_MASK 0x0000ff00
|
||||
# define AVIVO_LVDS_BACKLIGHT_LEVEL_SHIFT 8
|
||||
|
||||
#define AVIVO_DVOA_BIT_DEPTH_CONTROL 0x7988
|
||||
|
||||
#define AVIVO_GPIO_0 0x7e30
|
||||
#define AVIVO_GPIO_1 0x7e40
|
||||
#define AVIVO_GPIO_2 0x7e50
|
||||
#define AVIVO_GPIO_3 0x7e60
|
||||
|
||||
#define AVIVO_DC_GPIO_HPD_Y 0x7e9c
|
||||
|
||||
#define AVIVO_I2C_STATUS 0x7d30
|
||||
# define AVIVO_I2C_STATUS_DONE (1 << 0)
|
||||
# define AVIVO_I2C_STATUS_NACK (1 << 1)
|
||||
# define AVIVO_I2C_STATUS_HALT (1 << 2)
|
||||
# define AVIVO_I2C_STATUS_GO (1 << 3)
|
||||
# define AVIVO_I2C_STATUS_MASK 0x7
|
||||
/* If radeon_mm_i2c is to be believed, this is HALT, NACK, and maybe
|
||||
* DONE? */
|
||||
# define AVIVO_I2C_STATUS_CMD_RESET 0x7
|
||||
# define AVIVO_I2C_STATUS_CMD_WAIT (1 << 3)
|
||||
#define AVIVO_I2C_STOP 0x7d34
|
||||
#define AVIVO_I2C_START_CNTL 0x7d38
|
||||
# define AVIVO_I2C_START (1 << 8)
|
||||
# define AVIVO_I2C_CONNECTOR0 (0 << 16)
|
||||
# define AVIVO_I2C_CONNECTOR1 (1 << 16)
|
||||
#define R520_I2C_START (1<<0)
|
||||
#define R520_I2C_STOP (1<<1)
|
||||
#define R520_I2C_RX (1<<2)
|
||||
#define R520_I2C_EN (1<<8)
|
||||
#define R520_I2C_DDC1 (0<<16)
|
||||
#define R520_I2C_DDC2 (1<<16)
|
||||
#define R520_I2C_DDC3 (2<<16)
|
||||
#define R520_I2C_DDC_MASK (3<<16)
|
||||
#define AVIVO_I2C_CONTROL2 0x7d3c
|
||||
# define AVIVO_I2C_7D3C_SIZE_SHIFT 8
|
||||
# define AVIVO_I2C_7D3C_SIZE_MASK (0xf << 8)
|
||||
#define AVIVO_I2C_CONTROL3 0x7d40
|
||||
/* Reading is done 4 bytes at a time: read the bottom 8 bits from
|
||||
* 7d44, four times in a row.
|
||||
* Writing is a little more complex. First write DATA with
|
||||
* 0xnnnnnnzz, then 0xnnnnnnyy, where nnnnnn is some non-deterministic
|
||||
* magic number, zz is, I think, the slave address, and yy is the byte
|
||||
* you want to write. */
|
||||
#define AVIVO_I2C_DATA 0x7d44
|
||||
#define R520_I2C_ADDR_COUNT_MASK (0x7)
|
||||
#define R520_I2C_DATA_COUNT_SHIFT (8)
|
||||
#define R520_I2C_DATA_COUNT_MASK (0xF00)
|
||||
#define AVIVO_I2C_CNTL 0x7d50
|
||||
# define AVIVO_I2C_EN (1 << 0)
|
||||
# define AVIVO_I2C_RESET (1 << 8)
|
||||
|
||||
#endif
|
|
@ -0,0 +1,234 @@
|
|||
/*
|
||||
* Copyright 2008 Advanced Micro Devices, Inc.
|
||||
* Copyright 2008 Red Hat Inc.
|
||||
* Copyright 2009 Jerome Glisse.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Dave Airlie
|
||||
* Alex Deucher
|
||||
* Jerome Glisse
|
||||
*/
|
||||
#include "drmP.h"
|
||||
#include "radeon_reg.h"
|
||||
#include "radeon.h"
|
||||
|
||||
/* r520,rv530,rv560,rv570,r580 depends on : */
|
||||
void r100_hdp_reset(struct radeon_device *rdev);
|
||||
int rv370_pcie_gart_enable(struct radeon_device *rdev);
|
||||
void rv370_pcie_gart_disable(struct radeon_device *rdev);
|
||||
void r420_pipes_init(struct radeon_device *rdev);
|
||||
void rs600_mc_disable_clients(struct radeon_device *rdev);
|
||||
void rs600_disable_vga(struct radeon_device *rdev);
|
||||
int rv515_debugfs_pipes_info_init(struct radeon_device *rdev);
|
||||
int rv515_debugfs_ga_info_init(struct radeon_device *rdev);
|
||||
|
||||
/* This files gather functions specifics to:
|
||||
* r520,rv530,rv560,rv570,r580
|
||||
*
|
||||
* Some of these functions might be used by newer ASICs.
|
||||
*/
|
||||
void r520_gpu_init(struct radeon_device *rdev);
|
||||
int r520_mc_wait_for_idle(struct radeon_device *rdev);
|
||||
|
||||
|
||||
/*
|
||||
* MC
|
||||
*/
|
||||
int r520_mc_init(struct radeon_device *rdev)
|
||||
{
|
||||
uint32_t tmp;
|
||||
int r;
|
||||
|
||||
if (r100_debugfs_rbbm_init(rdev)) {
|
||||
DRM_ERROR("Failed to register debugfs file for RBBM !\n");
|
||||
}
|
||||
if (rv515_debugfs_pipes_info_init(rdev)) {
|
||||
DRM_ERROR("Failed to register debugfs file for pipes !\n");
|
||||
}
|
||||
if (rv515_debugfs_ga_info_init(rdev)) {
|
||||
DRM_ERROR("Failed to register debugfs file for pipes !\n");
|
||||
}
|
||||
|
||||
r520_gpu_init(rdev);
|
||||
rv370_pcie_gart_disable(rdev);
|
||||
|
||||
/* Setup GPU memory space */
|
||||
rdev->mc.vram_location = 0xFFFFFFFFUL;
|
||||
rdev->mc.gtt_location = 0xFFFFFFFFUL;
|
||||
if (rdev->flags & RADEON_IS_AGP) {
|
||||
r = radeon_agp_init(rdev);
|
||||
if (r) {
|
||||
printk(KERN_WARNING "[drm] Disabling AGP\n");
|
||||
rdev->flags &= ~RADEON_IS_AGP;
|
||||
rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
|
||||
} else {
|
||||
rdev->mc.gtt_location = rdev->mc.agp_base;
|
||||
}
|
||||
}
|
||||
r = radeon_mc_setup(rdev);
|
||||
if (r) {
|
||||
return r;
|
||||
}
|
||||
|
||||
/* Program GPU memory space */
|
||||
rs600_mc_disable_clients(rdev);
|
||||
if (r520_mc_wait_for_idle(rdev)) {
|
||||
printk(KERN_WARNING "Failed to wait MC idle while "
|
||||
"programming pipes. Bad things might happen.\n");
|
||||
}
|
||||
/* Write VRAM size in case we are limiting it */
|
||||
WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.vram_size);
|
||||
tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1;
|
||||
tmp = REG_SET(R520_MC_FB_TOP, tmp >> 16);
|
||||
tmp |= REG_SET(R520_MC_FB_START, rdev->mc.vram_location >> 16);
|
||||
WREG32_MC(R520_MC_FB_LOCATION, tmp);
|
||||
WREG32(RS690_HDP_FB_LOCATION, rdev->mc.vram_location >> 16);
|
||||
WREG32(0x310, rdev->mc.vram_location);
|
||||
if (rdev->flags & RADEON_IS_AGP) {
|
||||
tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
|
||||
tmp = REG_SET(R520_MC_AGP_TOP, tmp >> 16);
|
||||
tmp |= REG_SET(R520_MC_AGP_START, rdev->mc.gtt_location >> 16);
|
||||
WREG32_MC(R520_MC_AGP_LOCATION, tmp);
|
||||
WREG32_MC(R520_MC_AGP_BASE, rdev->mc.agp_base);
|
||||
WREG32_MC(R520_MC_AGP_BASE_2, 0);
|
||||
} else {
|
||||
WREG32_MC(R520_MC_AGP_LOCATION, 0x0FFFFFFF);
|
||||
WREG32_MC(R520_MC_AGP_BASE, 0);
|
||||
WREG32_MC(R520_MC_AGP_BASE_2, 0);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void r520_mc_fini(struct radeon_device *rdev)
|
||||
{
|
||||
rv370_pcie_gart_disable(rdev);
|
||||
radeon_gart_table_vram_free(rdev);
|
||||
radeon_gart_fini(rdev);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Global GPU functions
|
||||
*/
|
||||
void r520_errata(struct radeon_device *rdev)
|
||||
{
|
||||
rdev->pll_errata = 0;
|
||||
}
|
||||
|
||||
int r520_mc_wait_for_idle(struct radeon_device *rdev)
|
||||
{
|
||||
unsigned i;
|
||||
uint32_t tmp;
|
||||
|
||||
for (i = 0; i < rdev->usec_timeout; i++) {
|
||||
/* read MC_STATUS */
|
||||
tmp = RREG32_MC(R520_MC_STATUS);
|
||||
if (tmp & R520_MC_STATUS_IDLE) {
|
||||
return 0;
|
||||
}
|
||||
DRM_UDELAY(1);
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
void r520_gpu_init(struct radeon_device *rdev)
|
||||
{
|
||||
unsigned pipe_select_current, gb_pipe_select, tmp;
|
||||
|
||||
r100_hdp_reset(rdev);
|
||||
rs600_disable_vga(rdev);
|
||||
/*
|
||||
* DST_PIPE_CONFIG 0x170C
|
||||
* GB_TILE_CONFIG 0x4018
|
||||
* GB_FIFO_SIZE 0x4024
|
||||
* GB_PIPE_SELECT 0x402C
|
||||
* GB_PIPE_SELECT2 0x4124
|
||||
* Z_PIPE_SHIFT 0
|
||||
* Z_PIPE_MASK 0x000000003
|
||||
* GB_FIFO_SIZE2 0x4128
|
||||
* SC_SFIFO_SIZE_SHIFT 0
|
||||
* SC_SFIFO_SIZE_MASK 0x000000003
|
||||
* SC_MFIFO_SIZE_SHIFT 2
|
||||
* SC_MFIFO_SIZE_MASK 0x00000000C
|
||||
* FG_SFIFO_SIZE_SHIFT 4
|
||||
* FG_SFIFO_SIZE_MASK 0x000000030
|
||||
* ZB_MFIFO_SIZE_SHIFT 6
|
||||
* ZB_MFIFO_SIZE_MASK 0x0000000C0
|
||||
* GA_ENHANCE 0x4274
|
||||
* SU_REG_DEST 0x42C8
|
||||
*/
|
||||
/* workaround for RV530 */
|
||||
if (rdev->family == CHIP_RV530) {
|
||||
WREG32(0x4124, 1);
|
||||
WREG32(0x4128, 0xFF);
|
||||
}
|
||||
r420_pipes_init(rdev);
|
||||
gb_pipe_select = RREG32(0x402C);
|
||||
tmp = RREG32(0x170C);
|
||||
pipe_select_current = (tmp >> 2) & 3;
|
||||
tmp = (1 << pipe_select_current) |
|
||||
(((gb_pipe_select >> 8) & 0xF) << 4);
|
||||
WREG32_PLL(0x000D, tmp);
|
||||
if (r520_mc_wait_for_idle(rdev)) {
|
||||
printk(KERN_WARNING "Failed to wait MC idle while "
|
||||
"programming pipes. Bad things might happen.\n");
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* VRAM info
|
||||
*/
|
||||
static void r520_vram_get_type(struct radeon_device *rdev)
|
||||
{
|
||||
uint32_t tmp;
|
||||
|
||||
rdev->mc.vram_width = 128;
|
||||
rdev->mc.vram_is_ddr = true;
|
||||
tmp = RREG32_MC(R520_MC_CNTL0);
|
||||
switch ((tmp & R520_MEM_NUM_CHANNELS_MASK) >> R520_MEM_NUM_CHANNELS_SHIFT) {
|
||||
case 0:
|
||||
rdev->mc.vram_width = 32;
|
||||
break;
|
||||
case 1:
|
||||
rdev->mc.vram_width = 64;
|
||||
break;
|
||||
case 2:
|
||||
rdev->mc.vram_width = 128;
|
||||
break;
|
||||
case 3:
|
||||
rdev->mc.vram_width = 256;
|
||||
break;
|
||||
default:
|
||||
rdev->mc.vram_width = 128;
|
||||
break;
|
||||
}
|
||||
if (tmp & R520_MC_CHANNEL_SIZE)
|
||||
rdev->mc.vram_width *= 2;
|
||||
}
|
||||
|
||||
void r520_vram_info(struct radeon_device *rdev)
|
||||
{
|
||||
r520_vram_get_type(rdev);
|
||||
rdev->mc.vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
|
||||
|
||||
rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
|
||||
rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
|
||||
}
|
|
@ -0,0 +1,169 @@
|
|||
/*
|
||||
* Copyright 2008 Advanced Micro Devices, Inc.
|
||||
* Copyright 2008 Red Hat Inc.
|
||||
* Copyright 2009 Jerome Glisse.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Dave Airlie
|
||||
* Alex Deucher
|
||||
* Jerome Glisse
|
||||
*/
|
||||
#include "drmP.h"
|
||||
#include "radeon_reg.h"
|
||||
#include "radeon.h"
|
||||
|
||||
/* r600,rv610,rv630,rv620,rv635,rv670 depends on : */
|
||||
void rs600_mc_disable_clients(struct radeon_device *rdev);
|
||||
|
||||
/* This files gather functions specifics to:
|
||||
* r600,rv610,rv630,rv620,rv635,rv670
|
||||
*
|
||||
* Some of these functions might be used by newer ASICs.
|
||||
*/
|
||||
int r600_mc_wait_for_idle(struct radeon_device *rdev);
|
||||
void r600_gpu_init(struct radeon_device *rdev);
|
||||
|
||||
|
||||
/*
|
||||
* MC
|
||||
*/
|
||||
int r600_mc_init(struct radeon_device *rdev)
|
||||
{
|
||||
uint32_t tmp;
|
||||
|
||||
r600_gpu_init(rdev);
|
||||
|
||||
/* setup the gart before changing location so we can ask to
|
||||
* discard unmapped mc request
|
||||
*/
|
||||
/* FIXME: disable out of gart access */
|
||||
tmp = rdev->mc.gtt_location / 4096;
|
||||
tmp = REG_SET(R600_LOGICAL_PAGE_NUMBER, tmp);
|
||||
WREG32(R600_MC_VM_SYSTEM_APERTURE_LOW_ADDR, tmp);
|
||||
tmp = (rdev->mc.gtt_location + rdev->mc.gtt_size) / 4096;
|
||||
tmp = REG_SET(R600_LOGICAL_PAGE_NUMBER, tmp);
|
||||
WREG32(R600_MC_VM_SYSTEM_APERTURE_HIGH_ADDR, tmp);
|
||||
|
||||
rs600_mc_disable_clients(rdev);
|
||||
if (r600_mc_wait_for_idle(rdev)) {
|
||||
printk(KERN_WARNING "Failed to wait MC idle while "
|
||||
"programming pipes. Bad things might happen.\n");
|
||||
}
|
||||
|
||||
tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1;
|
||||
tmp = REG_SET(R600_MC_FB_TOP, tmp >> 24);
|
||||
tmp |= REG_SET(R600_MC_FB_BASE, rdev->mc.vram_location >> 24);
|
||||
WREG32(R600_MC_VM_FB_LOCATION, tmp);
|
||||
tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
|
||||
tmp = REG_SET(R600_MC_AGP_TOP, tmp >> 22);
|
||||
WREG32(R600_MC_VM_AGP_TOP, tmp);
|
||||
tmp = REG_SET(R600_MC_AGP_BOT, rdev->mc.gtt_location >> 22);
|
||||
WREG32(R600_MC_VM_AGP_BOT, tmp);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void r600_mc_fini(struct radeon_device *rdev)
|
||||
{
|
||||
/* FIXME: implement */
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Global GPU functions
|
||||
*/
|
||||
void r600_errata(struct radeon_device *rdev)
|
||||
{
|
||||
rdev->pll_errata = 0;
|
||||
}
|
||||
|
||||
int r600_mc_wait_for_idle(struct radeon_device *rdev)
|
||||
{
|
||||
/* FIXME: implement */
|
||||
return 0;
|
||||
}
|
||||
|
||||
void r600_gpu_init(struct radeon_device *rdev)
|
||||
{
|
||||
/* FIXME: implement */
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* VRAM info
|
||||
*/
|
||||
void r600_vram_get_type(struct radeon_device *rdev)
|
||||
{
|
||||
uint32_t tmp;
|
||||
int chansize;
|
||||
|
||||
rdev->mc.vram_width = 128;
|
||||
rdev->mc.vram_is_ddr = true;
|
||||
|
||||
tmp = RREG32(R600_RAMCFG);
|
||||
if (tmp & R600_CHANSIZE_OVERRIDE) {
|
||||
chansize = 16;
|
||||
} else if (tmp & R600_CHANSIZE) {
|
||||
chansize = 64;
|
||||
} else {
|
||||
chansize = 32;
|
||||
}
|
||||
if (rdev->family == CHIP_R600) {
|
||||
rdev->mc.vram_width = 8 * chansize;
|
||||
} else if (rdev->family == CHIP_RV670) {
|
||||
rdev->mc.vram_width = 4 * chansize;
|
||||
} else if ((rdev->family == CHIP_RV610) ||
|
||||
(rdev->family == CHIP_RV620)) {
|
||||
rdev->mc.vram_width = chansize;
|
||||
} else if ((rdev->family == CHIP_RV630) ||
|
||||
(rdev->family == CHIP_RV635)) {
|
||||
rdev->mc.vram_width = 2 * chansize;
|
||||
}
|
||||
}
|
||||
|
||||
void r600_vram_info(struct radeon_device *rdev)
|
||||
{
|
||||
r600_vram_get_type(rdev);
|
||||
rdev->mc.vram_size = RREG32(R600_CONFIG_MEMSIZE);
|
||||
|
||||
/* Could aper size report 0 ? */
|
||||
rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
|
||||
rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Indirect registers accessor
|
||||
*/
|
||||
uint32_t r600_pciep_rreg(struct radeon_device *rdev, uint32_t reg)
|
||||
{
|
||||
uint32_t r;
|
||||
|
||||
WREG32(R600_PCIE_PORT_INDEX, ((reg) & 0xff));
|
||||
(void)RREG32(R600_PCIE_PORT_INDEX);
|
||||
r = RREG32(R600_PCIE_PORT_DATA);
|
||||
return r;
|
||||
}
|
||||
|
||||
void r600_pciep_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
|
||||
{
|
||||
WREG32(R600_PCIE_PORT_INDEX, ((reg) & 0xff));
|
||||
(void)RREG32(R600_PCIE_PORT_INDEX);
|
||||
WREG32(R600_PCIE_PORT_DATA, (v));
|
||||
(void)RREG32(R600_PCIE_PORT_DATA);
|
||||
}
|
|
@ -0,0 +1,114 @@
|
|||
/*
|
||||
* Copyright 2008 Advanced Micro Devices, Inc.
|
||||
* Copyright 2008 Red Hat Inc.
|
||||
* Copyright 2009 Jerome Glisse.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Dave Airlie
|
||||
* Alex Deucher
|
||||
* Jerome Glisse
|
||||
*/
|
||||
#ifndef __R600_REG_H__
|
||||
#define __R600_REG_H__
|
||||
|
||||
#define R600_PCIE_PORT_INDEX 0x0038
|
||||
#define R600_PCIE_PORT_DATA 0x003c
|
||||
|
||||
#define R600_MC_VM_FB_LOCATION 0x2180
|
||||
#define R600_MC_FB_BASE_MASK 0x0000FFFF
|
||||
#define R600_MC_FB_BASE_SHIFT 0
|
||||
#define R600_MC_FB_TOP_MASK 0xFFFF0000
|
||||
#define R600_MC_FB_TOP_SHIFT 16
|
||||
#define R600_MC_VM_AGP_TOP 0x2184
|
||||
#define R600_MC_AGP_TOP_MASK 0x0003FFFF
|
||||
#define R600_MC_AGP_TOP_SHIFT 0
|
||||
#define R600_MC_VM_AGP_BOT 0x2188
|
||||
#define R600_MC_AGP_BOT_MASK 0x0003FFFF
|
||||
#define R600_MC_AGP_BOT_SHIFT 0
|
||||
#define R600_MC_VM_AGP_BASE 0x218c
|
||||
#define R600_MC_VM_SYSTEM_APERTURE_LOW_ADDR 0x2190
|
||||
#define R600_LOGICAL_PAGE_NUMBER_MASK 0x000FFFFF
|
||||
#define R600_LOGICAL_PAGE_NUMBER_SHIFT 0
|
||||
#define R600_MC_VM_SYSTEM_APERTURE_HIGH_ADDR 0x2194
|
||||
#define R600_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR 0x2198
|
||||
|
||||
#define R700_MC_VM_FB_LOCATION 0x2024
|
||||
#define R700_MC_FB_BASE_MASK 0x0000FFFF
|
||||
#define R700_MC_FB_BASE_SHIFT 0
|
||||
#define R700_MC_FB_TOP_MASK 0xFFFF0000
|
||||
#define R700_MC_FB_TOP_SHIFT 16
|
||||
#define R700_MC_VM_AGP_TOP 0x2028
|
||||
#define R700_MC_AGP_TOP_MASK 0x0003FFFF
|
||||
#define R700_MC_AGP_TOP_SHIFT 0
|
||||
#define R700_MC_VM_AGP_BOT 0x202c
|
||||
#define R700_MC_AGP_BOT_MASK 0x0003FFFF
|
||||
#define R700_MC_AGP_BOT_SHIFT 0
|
||||
#define R700_MC_VM_AGP_BASE 0x2030
|
||||
#define R700_MC_VM_SYSTEM_APERTURE_LOW_ADDR 0x2034
|
||||
#define R700_LOGICAL_PAGE_NUMBER_MASK 0x000FFFFF
|
||||
#define R700_LOGICAL_PAGE_NUMBER_SHIFT 0
|
||||
#define R700_MC_VM_SYSTEM_APERTURE_HIGH_ADDR 0x2038
|
||||
#define R700_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR 0x203c
|
||||
|
||||
#define R600_RAMCFG 0x2408
|
||||
# define R600_CHANSIZE (1 << 7)
|
||||
# define R600_CHANSIZE_OVERRIDE (1 << 10)
|
||||
|
||||
|
||||
#define R600_GENERAL_PWRMGT 0x618
|
||||
# define R600_OPEN_DRAIN_PADS (1 << 11)
|
||||
|
||||
#define R600_LOWER_GPIO_ENABLE 0x710
|
||||
#define R600_CTXSW_VID_LOWER_GPIO_CNTL 0x718
|
||||
#define R600_HIGH_VID_LOWER_GPIO_CNTL 0x71c
|
||||
#define R600_MEDIUM_VID_LOWER_GPIO_CNTL 0x720
|
||||
#define R600_LOW_VID_LOWER_GPIO_CNTL 0x724
|
||||
|
||||
|
||||
|
||||
#define R600_HDP_NONSURFACE_BASE 0x2c04
|
||||
|
||||
#define R600_BUS_CNTL 0x5420
|
||||
#define R600_CONFIG_CNTL 0x5424
|
||||
#define R600_CONFIG_MEMSIZE 0x5428
|
||||
#define R600_CONFIG_F0_BASE 0x542C
|
||||
#define R600_CONFIG_APER_SIZE 0x5430
|
||||
|
||||
#define R600_ROM_CNTL 0x1600
|
||||
# define R600_SCK_OVERWRITE (1 << 1)
|
||||
# define R600_SCK_PRESCALE_CRYSTAL_CLK_SHIFT 28
|
||||
# define R600_SCK_PRESCALE_CRYSTAL_CLK_MASK (0xf << 28)
|
||||
|
||||
#define R600_CG_SPLL_FUNC_CNTL 0x600
|
||||
# define R600_SPLL_BYPASS_EN (1 << 3)
|
||||
#define R600_CG_SPLL_STATUS 0x60c
|
||||
# define R600_SPLL_CHG_STATUS (1 << 1)
|
||||
|
||||
#define R600_BIOS_0_SCRATCH 0x1724
|
||||
#define R600_BIOS_1_SCRATCH 0x1728
|
||||
#define R600_BIOS_2_SCRATCH 0x172c
|
||||
#define R600_BIOS_3_SCRATCH 0x1730
|
||||
#define R600_BIOS_4_SCRATCH 0x1734
|
||||
#define R600_BIOS_5_SCRATCH 0x1738
|
||||
#define R600_BIOS_6_SCRATCH 0x173c
|
||||
#define R600_BIOS_7_SCRATCH 0x1740
|
||||
|
||||
|
||||
#endif
|
|
@ -0,0 +1,793 @@
|
|||
/*
|
||||
* Copyright 2008 Advanced Micro Devices, Inc.
|
||||
* Copyright 2008 Red Hat Inc.
|
||||
* Copyright 2009 Jerome Glisse.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Dave Airlie
|
||||
* Alex Deucher
|
||||
* Jerome Glisse
|
||||
*/
|
||||
#ifndef __RADEON_H__
|
||||
#define __RADEON_H__
|
||||
|
||||
#include "radeon_object.h"
|
||||
|
||||
/* TODO: Here are things that needs to be done :
|
||||
* - surface allocator & initializer : (bit like scratch reg) should
|
||||
* initialize HDP_ stuff on RS600, R600, R700 hw, well anythings
|
||||
* related to surface
|
||||
* - WB : write back stuff (do it bit like scratch reg things)
|
||||
* - Vblank : look at Jesse's rework and what we should do
|
||||
* - r600/r700: gart & cp
|
||||
* - cs : clean cs ioctl use bitmap & things like that.
|
||||
* - power management stuff
|
||||
* - Barrier in gart code
|
||||
* - Unmappabled vram ?
|
||||
* - TESTING, TESTING, TESTING
|
||||
*/
|
||||
|
||||
#include <asm/atomic.h>
|
||||
#include <linux/wait.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/kref.h>
|
||||
|
||||
#include "radeon_mode.h"
|
||||
#include "radeon_reg.h"
|
||||
|
||||
|
||||
/*
|
||||
* Modules parameters.
|
||||
*/
|
||||
extern int radeon_no_wb;
|
||||
extern int radeon_modeset;
|
||||
extern int radeon_dynclks;
|
||||
extern int radeon_r4xx_atom;
|
||||
extern int radeon_agpmode;
|
||||
extern int radeon_vram_limit;
|
||||
extern int radeon_gart_size;
|
||||
extern int radeon_benchmarking;
|
||||
extern int radeon_connector_table;
|
||||
|
||||
/*
|
||||
* Copy from radeon_drv.h so we don't have to include both and have conflicting
|
||||
* symbol;
|
||||
*/
|
||||
#define RADEON_MAX_USEC_TIMEOUT 100000 /* 100 ms */
|
||||
#define RADEON_IB_POOL_SIZE 16
|
||||
#define RADEON_DEBUGFS_MAX_NUM_FILES 32
|
||||
#define RADEONFB_CONN_LIMIT 4
|
||||
|
||||
enum radeon_family {
|
||||
CHIP_R100,
|
||||
CHIP_RV100,
|
||||
CHIP_RS100,
|
||||
CHIP_RV200,
|
||||
CHIP_RS200,
|
||||
CHIP_R200,
|
||||
CHIP_RV250,
|
||||
CHIP_RS300,
|
||||
CHIP_RV280,
|
||||
CHIP_R300,
|
||||
CHIP_R350,
|
||||
CHIP_RV350,
|
||||
CHIP_RV380,
|
||||
CHIP_R420,
|
||||
CHIP_R423,
|
||||
CHIP_RV410,
|
||||
CHIP_RS400,
|
||||
CHIP_RS480,
|
||||
CHIP_RS600,
|
||||
CHIP_RS690,
|
||||
CHIP_RS740,
|
||||
CHIP_RV515,
|
||||
CHIP_R520,
|
||||
CHIP_RV530,
|
||||
CHIP_RV560,
|
||||
CHIP_RV570,
|
||||
CHIP_R580,
|
||||
CHIP_R600,
|
||||
CHIP_RV610,
|
||||
CHIP_RV630,
|
||||
CHIP_RV620,
|
||||
CHIP_RV635,
|
||||
CHIP_RV670,
|
||||
CHIP_RS780,
|
||||
CHIP_RV770,
|
||||
CHIP_RV730,
|
||||
CHIP_RV710,
|
||||
CHIP_LAST,
|
||||
};
|
||||
|
||||
enum radeon_chip_flags {
|
||||
RADEON_FAMILY_MASK = 0x0000ffffUL,
|
||||
RADEON_FLAGS_MASK = 0xffff0000UL,
|
||||
RADEON_IS_MOBILITY = 0x00010000UL,
|
||||
RADEON_IS_IGP = 0x00020000UL,
|
||||
RADEON_SINGLE_CRTC = 0x00040000UL,
|
||||
RADEON_IS_AGP = 0x00080000UL,
|
||||
RADEON_HAS_HIERZ = 0x00100000UL,
|
||||
RADEON_IS_PCIE = 0x00200000UL,
|
||||
RADEON_NEW_MEMMAP = 0x00400000UL,
|
||||
RADEON_IS_PCI = 0x00800000UL,
|
||||
RADEON_IS_IGPGART = 0x01000000UL,
|
||||
};
|
||||
|
||||
|
||||
/*
|
||||
* Errata workarounds.
|
||||
*/
|
||||
enum radeon_pll_errata {
|
||||
CHIP_ERRATA_R300_CG = 0x00000001,
|
||||
CHIP_ERRATA_PLL_DUMMYREADS = 0x00000002,
|
||||
CHIP_ERRATA_PLL_DELAY = 0x00000004
|
||||
};
|
||||
|
||||
|
||||
struct radeon_device;
|
||||
|
||||
|
||||
/*
|
||||
* BIOS.
|
||||
*/
|
||||
bool radeon_get_bios(struct radeon_device *rdev);
|
||||
|
||||
/*
|
||||
* Clocks
|
||||
*/
|
||||
|
||||
struct radeon_clock {
|
||||
struct radeon_pll p1pll;
|
||||
struct radeon_pll p2pll;
|
||||
struct radeon_pll spll;
|
||||
struct radeon_pll mpll;
|
||||
/* 10 Khz units */
|
||||
uint32_t default_mclk;
|
||||
uint32_t default_sclk;
|
||||
};
|
||||
|
||||
/*
|
||||
* Fences.
|
||||
*/
|
||||
struct radeon_fence_driver {
|
||||
uint32_t scratch_reg;
|
||||
atomic_t seq;
|
||||
uint32_t last_seq;
|
||||
unsigned long count_timeout;
|
||||
wait_queue_head_t queue;
|
||||
rwlock_t lock;
|
||||
struct list_head created;
|
||||
struct list_head emited;
|
||||
struct list_head signaled;
|
||||
};
|
||||
|
||||
struct radeon_fence {
|
||||
struct radeon_device *rdev;
|
||||
struct kref kref;
|
||||
struct list_head list;
|
||||
/* protected by radeon_fence.lock */
|
||||
uint32_t seq;
|
||||
unsigned long timeout;
|
||||
bool emited;
|
||||
bool signaled;
|
||||
};
|
||||
|
||||
int radeon_fence_driver_init(struct radeon_device *rdev);
|
||||
void radeon_fence_driver_fini(struct radeon_device *rdev);
|
||||
int radeon_fence_create(struct radeon_device *rdev, struct radeon_fence **fence);
|
||||
int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence);
|
||||
void radeon_fence_process(struct radeon_device *rdev);
|
||||
bool radeon_fence_signaled(struct radeon_fence *fence);
|
||||
int radeon_fence_wait(struct radeon_fence *fence, bool interruptible);
|
||||
int radeon_fence_wait_next(struct radeon_device *rdev);
|
||||
int radeon_fence_wait_last(struct radeon_device *rdev);
|
||||
struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence);
|
||||
void radeon_fence_unref(struct radeon_fence **fence);
|
||||
|
||||
|
||||
/*
|
||||
* Radeon buffer.
|
||||
*/
|
||||
struct radeon_object;
|
||||
|
||||
struct radeon_object_list {
|
||||
struct list_head list;
|
||||
struct radeon_object *robj;
|
||||
uint64_t gpu_offset;
|
||||
unsigned rdomain;
|
||||
unsigned wdomain;
|
||||
};
|
||||
|
||||
int radeon_object_init(struct radeon_device *rdev);
|
||||
void radeon_object_fini(struct radeon_device *rdev);
|
||||
int radeon_object_create(struct radeon_device *rdev,
|
||||
struct drm_gem_object *gobj,
|
||||
unsigned long size,
|
||||
bool kernel,
|
||||
uint32_t domain,
|
||||
bool interruptible,
|
||||
struct radeon_object **robj_ptr);
|
||||
int radeon_object_kmap(struct radeon_object *robj, void **ptr);
|
||||
void radeon_object_kunmap(struct radeon_object *robj);
|
||||
void radeon_object_unref(struct radeon_object **robj);
|
||||
int radeon_object_pin(struct radeon_object *robj, uint32_t domain,
|
||||
uint64_t *gpu_addr);
|
||||
void radeon_object_unpin(struct radeon_object *robj);
|
||||
int radeon_object_wait(struct radeon_object *robj);
|
||||
int radeon_object_evict_vram(struct radeon_device *rdev);
|
||||
int radeon_object_mmap(struct radeon_object *robj, uint64_t *offset);
|
||||
void radeon_object_force_delete(struct radeon_device *rdev);
|
||||
void radeon_object_list_add_object(struct radeon_object_list *lobj,
|
||||
struct list_head *head);
|
||||
int radeon_object_list_validate(struct list_head *head, void *fence);
|
||||
void radeon_object_list_unvalidate(struct list_head *head);
|
||||
void radeon_object_list_clean(struct list_head *head);
|
||||
int radeon_object_fbdev_mmap(struct radeon_object *robj,
|
||||
struct vm_area_struct *vma);
|
||||
unsigned long radeon_object_size(struct radeon_object *robj);
|
||||
|
||||
|
||||
/*
|
||||
* GEM objects.
|
||||
*/
|
||||
struct radeon_gem {
|
||||
struct list_head objects;
|
||||
};
|
||||
|
||||
int radeon_gem_init(struct radeon_device *rdev);
|
||||
void radeon_gem_fini(struct radeon_device *rdev);
|
||||
int radeon_gem_object_create(struct radeon_device *rdev, int size,
|
||||
int alignment, int initial_domain,
|
||||
bool discardable, bool kernel,
|
||||
bool interruptible,
|
||||
struct drm_gem_object **obj);
|
||||
int radeon_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain,
|
||||
uint64_t *gpu_addr);
|
||||
void radeon_gem_object_unpin(struct drm_gem_object *obj);
|
||||
|
||||
|
||||
/*
|
||||
* GART structures, functions & helpers
|
||||
*/
|
||||
struct radeon_mc;
|
||||
|
||||
struct radeon_gart_table_ram {
|
||||
volatile uint32_t *ptr;
|
||||
};
|
||||
|
||||
struct radeon_gart_table_vram {
|
||||
struct radeon_object *robj;
|
||||
volatile uint32_t *ptr;
|
||||
};
|
||||
|
||||
union radeon_gart_table {
|
||||
struct radeon_gart_table_ram ram;
|
||||
struct radeon_gart_table_vram vram;
|
||||
};
|
||||
|
||||
struct radeon_gart {
|
||||
dma_addr_t table_addr;
|
||||
unsigned num_gpu_pages;
|
||||
unsigned num_cpu_pages;
|
||||
unsigned table_size;
|
||||
union radeon_gart_table table;
|
||||
struct page **pages;
|
||||
dma_addr_t *pages_addr;
|
||||
bool ready;
|
||||
};
|
||||
|
||||
int radeon_gart_table_ram_alloc(struct radeon_device *rdev);
|
||||
void radeon_gart_table_ram_free(struct radeon_device *rdev);
|
||||
int radeon_gart_table_vram_alloc(struct radeon_device *rdev);
|
||||
void radeon_gart_table_vram_free(struct radeon_device *rdev);
|
||||
int radeon_gart_init(struct radeon_device *rdev);
|
||||
void radeon_gart_fini(struct radeon_device *rdev);
|
||||
void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
|
||||
int pages);
|
||||
int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
|
||||
int pages, struct page **pagelist);
|
||||
|
||||
|
||||
/*
|
||||
* GPU MC structures, functions & helpers
|
||||
*/
|
||||
struct radeon_mc {
|
||||
resource_size_t aper_size;
|
||||
resource_size_t aper_base;
|
||||
resource_size_t agp_base;
|
||||
unsigned gtt_location;
|
||||
unsigned gtt_size;
|
||||
unsigned vram_location;
|
||||
unsigned vram_size;
|
||||
unsigned vram_width;
|
||||
int vram_mtrr;
|
||||
bool vram_is_ddr;
|
||||
};
|
||||
|
||||
int radeon_mc_setup(struct radeon_device *rdev);
|
||||
|
||||
|
||||
/*
|
||||
* GPU scratch registers structures, functions & helpers
|
||||
*/
|
||||
struct radeon_scratch {
|
||||
unsigned num_reg;
|
||||
bool free[32];
|
||||
uint32_t reg[32];
|
||||
};
|
||||
|
||||
int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg);
|
||||
void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg);
|
||||
|
||||
|
||||
/*
|
||||
* IRQS.
|
||||
*/
|
||||
struct radeon_irq {
|
||||
bool installed;
|
||||
bool sw_int;
|
||||
/* FIXME: use a define max crtc rather than hardcode it */
|
||||
bool crtc_vblank_int[2];
|
||||
};
|
||||
|
||||
int radeon_irq_kms_init(struct radeon_device *rdev);
|
||||
void radeon_irq_kms_fini(struct radeon_device *rdev);
|
||||
|
||||
|
||||
/*
|
||||
* CP & ring.
|
||||
*/
|
||||
struct radeon_ib {
|
||||
struct list_head list;
|
||||
unsigned long idx;
|
||||
uint64_t gpu_addr;
|
||||
struct radeon_fence *fence;
|
||||
volatile uint32_t *ptr;
|
||||
uint32_t length_dw;
|
||||
};
|
||||
|
||||
struct radeon_ib_pool {
|
||||
struct mutex mutex;
|
||||
struct radeon_object *robj;
|
||||
struct list_head scheduled_ibs;
|
||||
struct radeon_ib ibs[RADEON_IB_POOL_SIZE];
|
||||
bool ready;
|
||||
DECLARE_BITMAP(alloc_bm, RADEON_IB_POOL_SIZE);
|
||||
};
|
||||
|
||||
struct radeon_cp {
|
||||
struct radeon_object *ring_obj;
|
||||
volatile uint32_t *ring;
|
||||
unsigned rptr;
|
||||
unsigned wptr;
|
||||
unsigned wptr_old;
|
||||
unsigned ring_size;
|
||||
unsigned ring_free_dw;
|
||||
int count_dw;
|
||||
uint64_t gpu_addr;
|
||||
uint32_t align_mask;
|
||||
uint32_t ptr_mask;
|
||||
struct mutex mutex;
|
||||
bool ready;
|
||||
};
|
||||
|
||||
int radeon_ib_get(struct radeon_device *rdev, struct radeon_ib **ib);
|
||||
void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib);
|
||||
int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib);
|
||||
int radeon_ib_pool_init(struct radeon_device *rdev);
|
||||
void radeon_ib_pool_fini(struct radeon_device *rdev);
|
||||
int radeon_ib_test(struct radeon_device *rdev);
|
||||
/* Ring access between begin & end cannot sleep */
|
||||
void radeon_ring_free_size(struct radeon_device *rdev);
|
||||
int radeon_ring_lock(struct radeon_device *rdev, unsigned ndw);
|
||||
void radeon_ring_unlock_commit(struct radeon_device *rdev);
|
||||
void radeon_ring_unlock_undo(struct radeon_device *rdev);
|
||||
int radeon_ring_test(struct radeon_device *rdev);
|
||||
int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size);
|
||||
void radeon_ring_fini(struct radeon_device *rdev);
|
||||
|
||||
|
||||
/*
|
||||
* CS.
|
||||
*/
|
||||
struct radeon_cs_reloc {
|
||||
struct drm_gem_object *gobj;
|
||||
struct radeon_object *robj;
|
||||
struct radeon_object_list lobj;
|
||||
uint32_t handle;
|
||||
uint32_t flags;
|
||||
};
|
||||
|
||||
struct radeon_cs_chunk {
|
||||
uint32_t chunk_id;
|
||||
uint32_t length_dw;
|
||||
uint32_t *kdata;
|
||||
};
|
||||
|
||||
struct radeon_cs_parser {
|
||||
struct radeon_device *rdev;
|
||||
struct drm_file *filp;
|
||||
/* chunks */
|
||||
unsigned nchunks;
|
||||
struct radeon_cs_chunk *chunks;
|
||||
uint64_t *chunks_array;
|
||||
/* IB */
|
||||
unsigned idx;
|
||||
/* relocations */
|
||||
unsigned nrelocs;
|
||||
struct radeon_cs_reloc *relocs;
|
||||
struct radeon_cs_reloc **relocs_ptr;
|
||||
struct list_head validated;
|
||||
/* indices of various chunks */
|
||||
int chunk_ib_idx;
|
||||
int chunk_relocs_idx;
|
||||
struct radeon_ib *ib;
|
||||
void *track;
|
||||
};
|
||||
|
||||
struct radeon_cs_packet {
|
||||
unsigned idx;
|
||||
unsigned type;
|
||||
unsigned reg;
|
||||
unsigned opcode;
|
||||
int count;
|
||||
unsigned one_reg_wr;
|
||||
};
|
||||
|
||||
typedef int (*radeon_packet0_check_t)(struct radeon_cs_parser *p,
|
||||
struct radeon_cs_packet *pkt,
|
||||
unsigned idx, unsigned reg);
|
||||
typedef int (*radeon_packet3_check_t)(struct radeon_cs_parser *p,
|
||||
struct radeon_cs_packet *pkt);
|
||||
|
||||
|
||||
/*
|
||||
* AGP
|
||||
*/
|
||||
int radeon_agp_init(struct radeon_device *rdev);
|
||||
void radeon_agp_fini(struct radeon_device *rdev);
|
||||
|
||||
|
||||
/*
|
||||
* Writeback
|
||||
*/
|
||||
struct radeon_wb {
|
||||
struct radeon_object *wb_obj;
|
||||
volatile uint32_t *wb;
|
||||
uint64_t gpu_addr;
|
||||
};
|
||||
|
||||
|
||||
/*
|
||||
* Benchmarking
|
||||
*/
|
||||
void radeon_benchmark(struct radeon_device *rdev);
|
||||
|
||||
|
||||
/*
|
||||
* Debugfs
|
||||
*/
|
||||
int radeon_debugfs_add_files(struct radeon_device *rdev,
|
||||
struct drm_info_list *files,
|
||||
unsigned nfiles);
|
||||
int radeon_debugfs_fence_init(struct radeon_device *rdev);
|
||||
int r100_debugfs_rbbm_init(struct radeon_device *rdev);
|
||||
int r100_debugfs_cp_init(struct radeon_device *rdev);
|
||||
|
||||
|
||||
/*
|
||||
* ASIC specific functions.
|
||||
*/
|
||||
struct radeon_asic {
|
||||
void (*errata)(struct radeon_device *rdev);
|
||||
void (*vram_info)(struct radeon_device *rdev);
|
||||
int (*gpu_reset)(struct radeon_device *rdev);
|
||||
int (*mc_init)(struct radeon_device *rdev);
|
||||
void (*mc_fini)(struct radeon_device *rdev);
|
||||
int (*wb_init)(struct radeon_device *rdev);
|
||||
void (*wb_fini)(struct radeon_device *rdev);
|
||||
int (*gart_enable)(struct radeon_device *rdev);
|
||||
void (*gart_disable)(struct radeon_device *rdev);
|
||||
void (*gart_tlb_flush)(struct radeon_device *rdev);
|
||||
int (*gart_set_page)(struct radeon_device *rdev, int i, uint64_t addr);
|
||||
int (*cp_init)(struct radeon_device *rdev, unsigned ring_size);
|
||||
void (*cp_fini)(struct radeon_device *rdev);
|
||||
void (*cp_disable)(struct radeon_device *rdev);
|
||||
void (*ring_start)(struct radeon_device *rdev);
|
||||
int (*irq_set)(struct radeon_device *rdev);
|
||||
int (*irq_process)(struct radeon_device *rdev);
|
||||
void (*fence_ring_emit)(struct radeon_device *rdev, struct radeon_fence *fence);
|
||||
int (*cs_parse)(struct radeon_cs_parser *p);
|
||||
int (*copy_blit)(struct radeon_device *rdev,
|
||||
uint64_t src_offset,
|
||||
uint64_t dst_offset,
|
||||
unsigned num_pages,
|
||||
struct radeon_fence *fence);
|
||||
int (*copy_dma)(struct radeon_device *rdev,
|
||||
uint64_t src_offset,
|
||||
uint64_t dst_offset,
|
||||
unsigned num_pages,
|
||||
struct radeon_fence *fence);
|
||||
int (*copy)(struct radeon_device *rdev,
|
||||
uint64_t src_offset,
|
||||
uint64_t dst_offset,
|
||||
unsigned num_pages,
|
||||
struct radeon_fence *fence);
|
||||
void (*set_engine_clock)(struct radeon_device *rdev, uint32_t eng_clock);
|
||||
void (*set_memory_clock)(struct radeon_device *rdev, uint32_t mem_clock);
|
||||
void (*set_pcie_lanes)(struct radeon_device *rdev, int lanes);
|
||||
void (*set_clock_gating)(struct radeon_device *rdev, int enable);
|
||||
};
|
||||
|
||||
|
||||
/*
|
||||
* IOCTL.
|
||||
*/
|
||||
int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *filp);
|
||||
int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *filp);
|
||||
int radeon_gem_pin_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
int radeon_gem_unpin_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
int radeon_gem_pread_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *filp);
|
||||
int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *filp);
|
||||
int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *filp);
|
||||
int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *filp);
|
||||
int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
|
||||
|
||||
|
||||
/*
|
||||
* Core structure, functions and helpers.
|
||||
*/
|
||||
typedef uint32_t (*radeon_rreg_t)(struct radeon_device*, uint32_t);
|
||||
typedef void (*radeon_wreg_t)(struct radeon_device*, uint32_t, uint32_t);
|
||||
|
||||
struct radeon_device {
|
||||
struct drm_device *ddev;
|
||||
struct pci_dev *pdev;
|
||||
/* ASIC */
|
||||
enum radeon_family family;
|
||||
unsigned long flags;
|
||||
int usec_timeout;
|
||||
enum radeon_pll_errata pll_errata;
|
||||
int num_gb_pipes;
|
||||
int disp_priority;
|
||||
/* BIOS */
|
||||
uint8_t *bios;
|
||||
bool is_atom_bios;
|
||||
uint16_t bios_header_start;
|
||||
struct radeon_object *stollen_vga_memory;
|
||||
struct fb_info *fbdev_info;
|
||||
struct radeon_object *fbdev_robj;
|
||||
struct radeon_framebuffer *fbdev_rfb;
|
||||
/* Register mmio */
|
||||
unsigned long rmmio_base;
|
||||
unsigned long rmmio_size;
|
||||
void *rmmio;
|
||||
radeon_rreg_t mm_rreg;
|
||||
radeon_wreg_t mm_wreg;
|
||||
radeon_rreg_t mc_rreg;
|
||||
radeon_wreg_t mc_wreg;
|
||||
radeon_rreg_t pll_rreg;
|
||||
radeon_wreg_t pll_wreg;
|
||||
radeon_rreg_t pcie_rreg;
|
||||
radeon_wreg_t pcie_wreg;
|
||||
radeon_rreg_t pciep_rreg;
|
||||
radeon_wreg_t pciep_wreg;
|
||||
struct radeon_clock clock;
|
||||
struct radeon_mc mc;
|
||||
struct radeon_gart gart;
|
||||
struct radeon_mode_info mode_info;
|
||||
struct radeon_scratch scratch;
|
||||
struct radeon_mman mman;
|
||||
struct radeon_fence_driver fence_drv;
|
||||
struct radeon_cp cp;
|
||||
struct radeon_ib_pool ib_pool;
|
||||
struct radeon_irq irq;
|
||||
struct radeon_asic *asic;
|
||||
struct radeon_gem gem;
|
||||
struct mutex cs_mutex;
|
||||
struct radeon_wb wb;
|
||||
bool gpu_lockup;
|
||||
bool shutdown;
|
||||
bool suspend;
|
||||
};
|
||||
|
||||
int radeon_device_init(struct radeon_device *rdev,
|
||||
struct drm_device *ddev,
|
||||
struct pci_dev *pdev,
|
||||
uint32_t flags);
|
||||
void radeon_device_fini(struct radeon_device *rdev);
|
||||
int radeon_gpu_wait_for_idle(struct radeon_device *rdev);
|
||||
|
||||
|
||||
/*
|
||||
* Registers read & write functions.
|
||||
*/
|
||||
#define RREG8(reg) readb(((void __iomem *)rdev->rmmio) + (reg))
|
||||
#define WREG8(reg, v) writeb(v, ((void __iomem *)rdev->rmmio) + (reg))
|
||||
#define RREG32(reg) rdev->mm_rreg(rdev, (reg))
|
||||
#define WREG32(reg, v) rdev->mm_wreg(rdev, (reg), (v))
|
||||
#define REG_SET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
|
||||
#define REG_GET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
|
||||
#define RREG32_PLL(reg) rdev->pll_rreg(rdev, (reg))
|
||||
#define WREG32_PLL(reg, v) rdev->pll_wreg(rdev, (reg), (v))
|
||||
#define RREG32_MC(reg) rdev->mc_rreg(rdev, (reg))
|
||||
#define WREG32_MC(reg, v) rdev->mc_wreg(rdev, (reg), (v))
|
||||
#define RREG32_PCIE(reg) rdev->pcie_rreg(rdev, (reg))
|
||||
#define WREG32_PCIE(reg, v) rdev->pcie_wreg(rdev, (reg), (v))
|
||||
#define WREG32_P(reg, val, mask) \
|
||||
do { \
|
||||
uint32_t tmp_ = RREG32(reg); \
|
||||
tmp_ &= (mask); \
|
||||
tmp_ |= ((val) & ~(mask)); \
|
||||
WREG32(reg, tmp_); \
|
||||
} while (0)
|
||||
#define WREG32_PLL_P(reg, val, mask) \
|
||||
do { \
|
||||
uint32_t tmp_ = RREG32_PLL(reg); \
|
||||
tmp_ &= (mask); \
|
||||
tmp_ |= ((val) & ~(mask)); \
|
||||
WREG32_PLL(reg, tmp_); \
|
||||
} while (0)
|
||||
|
||||
void r100_pll_errata_after_index(struct radeon_device *rdev);
|
||||
|
||||
|
||||
/*
|
||||
* ASICs helpers.
|
||||
*/
|
||||
#define ASIC_IS_RV100(rdev) ((rdev->family == CHIP_RV100) || \
|
||||
(rdev->family == CHIP_RV200) || \
|
||||
(rdev->family == CHIP_RS100) || \
|
||||
(rdev->family == CHIP_RS200) || \
|
||||
(rdev->family == CHIP_RV250) || \
|
||||
(rdev->family == CHIP_RV280) || \
|
||||
(rdev->family == CHIP_RS300))
|
||||
#define ASIC_IS_R300(rdev) ((rdev->family == CHIP_R300) || \
|
||||
(rdev->family == CHIP_RV350) || \
|
||||
(rdev->family == CHIP_R350) || \
|
||||
(rdev->family == CHIP_RV380) || \
|
||||
(rdev->family == CHIP_R420) || \
|
||||
(rdev->family == CHIP_R423) || \
|
||||
(rdev->family == CHIP_RV410) || \
|
||||
(rdev->family == CHIP_RS400) || \
|
||||
(rdev->family == CHIP_RS480))
|
||||
#define ASIC_IS_AVIVO(rdev) ((rdev->family >= CHIP_RS600))
|
||||
#define ASIC_IS_DCE3(rdev) ((rdev->family >= CHIP_RV620))
|
||||
#define ASIC_IS_DCE32(rdev) ((rdev->family >= CHIP_RV730))
|
||||
|
||||
|
||||
/*
|
||||
* BIOS helpers.
|
||||
*/
|
||||
#define RBIOS8(i) (rdev->bios[i])
|
||||
#define RBIOS16(i) (RBIOS8(i) | (RBIOS8((i)+1) << 8))
|
||||
#define RBIOS32(i) ((RBIOS16(i)) | (RBIOS16((i)+2) << 16))
|
||||
|
||||
int radeon_combios_init(struct radeon_device *rdev);
|
||||
void radeon_combios_fini(struct radeon_device *rdev);
|
||||
int radeon_atombios_init(struct radeon_device *rdev);
|
||||
void radeon_atombios_fini(struct radeon_device *rdev);
|
||||
|
||||
|
||||
/*
|
||||
* RING helpers.
|
||||
*/
|
||||
#define CP_PACKET0 0x00000000
|
||||
#define PACKET0_BASE_INDEX_SHIFT 0
|
||||
#define PACKET0_BASE_INDEX_MASK (0x1ffff << 0)
|
||||
#define PACKET0_COUNT_SHIFT 16
|
||||
#define PACKET0_COUNT_MASK (0x3fff << 16)
|
||||
#define CP_PACKET1 0x40000000
|
||||
#define CP_PACKET2 0x80000000
|
||||
#define PACKET2_PAD_SHIFT 0
|
||||
#define PACKET2_PAD_MASK (0x3fffffff << 0)
|
||||
#define CP_PACKET3 0xC0000000
|
||||
#define PACKET3_IT_OPCODE_SHIFT 8
|
||||
#define PACKET3_IT_OPCODE_MASK (0xff << 8)
|
||||
#define PACKET3_COUNT_SHIFT 16
|
||||
#define PACKET3_COUNT_MASK (0x3fff << 16)
|
||||
/* PACKET3 op code */
|
||||
#define PACKET3_NOP 0x10
|
||||
#define PACKET3_3D_DRAW_VBUF 0x28
|
||||
#define PACKET3_3D_DRAW_IMMD 0x29
|
||||
#define PACKET3_3D_DRAW_INDX 0x2A
|
||||
#define PACKET3_3D_LOAD_VBPNTR 0x2F
|
||||
#define PACKET3_INDX_BUFFER 0x33
|
||||
#define PACKET3_3D_DRAW_VBUF_2 0x34
|
||||
#define PACKET3_3D_DRAW_IMMD_2 0x35
|
||||
#define PACKET3_3D_DRAW_INDX_2 0x36
|
||||
#define PACKET3_BITBLT_MULTI 0x9B
|
||||
|
||||
#define PACKET0(reg, n) (CP_PACKET0 | \
|
||||
REG_SET(PACKET0_BASE_INDEX, (reg) >> 2) | \
|
||||
REG_SET(PACKET0_COUNT, (n)))
|
||||
#define PACKET2(v) (CP_PACKET2 | REG_SET(PACKET2_PAD, (v)))
|
||||
#define PACKET3(op, n) (CP_PACKET3 | \
|
||||
REG_SET(PACKET3_IT_OPCODE, (op)) | \
|
||||
REG_SET(PACKET3_COUNT, (n)))
|
||||
|
||||
#define PACKET_TYPE0 0
|
||||
#define PACKET_TYPE1 1
|
||||
#define PACKET_TYPE2 2
|
||||
#define PACKET_TYPE3 3
|
||||
|
||||
#define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3)
|
||||
#define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF)
|
||||
#define CP_PACKET0_GET_REG(h) (((h) & 0x1FFF) << 2)
|
||||
#define CP_PACKET0_GET_ONE_REG_WR(h) (((h) >> 15) & 1)
|
||||
#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
|
||||
|
||||
static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v)
|
||||
{
|
||||
#if DRM_DEBUG_CODE
|
||||
if (rdev->cp.count_dw <= 0) {
|
||||
DRM_ERROR("radeon: writting more dword to ring than expected !\n");
|
||||
}
|
||||
#endif
|
||||
rdev->cp.ring[rdev->cp.wptr++] = v;
|
||||
rdev->cp.wptr &= rdev->cp.ptr_mask;
|
||||
rdev->cp.count_dw--;
|
||||
rdev->cp.ring_free_dw--;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* ASICs macro.
|
||||
*/
|
||||
#define radeon_cs_parse(p) rdev->asic->cs_parse((p))
|
||||
#define radeon_errata(rdev) (rdev)->asic->errata((rdev))
|
||||
#define radeon_vram_info(rdev) (rdev)->asic->vram_info((rdev))
|
||||
#define radeon_gpu_reset(rdev) (rdev)->asic->gpu_reset((rdev))
|
||||
#define radeon_mc_init(rdev) (rdev)->asic->mc_init((rdev))
|
||||
#define radeon_mc_fini(rdev) (rdev)->asic->mc_fini((rdev))
|
||||
#define radeon_wb_init(rdev) (rdev)->asic->wb_init((rdev))
|
||||
#define radeon_wb_fini(rdev) (rdev)->asic->wb_fini((rdev))
|
||||
#define radeon_gart_enable(rdev) (rdev)->asic->gart_enable((rdev))
|
||||
#define radeon_gart_disable(rdev) (rdev)->asic->gart_disable((rdev))
|
||||
#define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart_tlb_flush((rdev))
|
||||
#define radeon_gart_set_page(rdev, i, p) (rdev)->asic->gart_set_page((rdev), (i), (p))
|
||||
#define radeon_cp_init(rdev,rsize) (rdev)->asic->cp_init((rdev), (rsize))
|
||||
#define radeon_cp_fini(rdev) (rdev)->asic->cp_fini((rdev))
|
||||
#define radeon_cp_disable(rdev) (rdev)->asic->cp_disable((rdev))
|
||||
#define radeon_ring_start(rdev) (rdev)->asic->ring_start((rdev))
|
||||
#define radeon_irq_set(rdev) (rdev)->asic->irq_set((rdev))
|
||||
#define radeon_irq_process(rdev) (rdev)->asic->irq_process((rdev))
|
||||
#define radeon_fence_ring_emit(rdev, fence) (rdev)->asic->fence_ring_emit((rdev), (fence))
|
||||
#define radeon_copy_blit(rdev, s, d, np, f) (rdev)->asic->copy_blit((rdev), (s), (d), (np), (f))
|
||||
#define radeon_copy_dma(rdev, s, d, np, f) (rdev)->asic->copy_dma((rdev), (s), (d), (np), (f))
|
||||
#define radeon_copy(rdev, s, d, np, f) (rdev)->asic->copy((rdev), (s), (d), (np), (f))
|
||||
#define radeon_set_engine_clock(rdev, e) (rdev)->asic->set_engine_clock((rdev), (e))
|
||||
#define radeon_set_memory_clock(rdev, e) (rdev)->asic->set_engine_clock((rdev), (e))
|
||||
#define radeon_set_pcie_lanes(rdev, l) (rdev)->asic->set_pcie_lanes((rdev), (l))
|
||||
#define radeon_set_clock_gating(rdev, e) (rdev)->asic->set_clock_gating((rdev), (e))
|
||||
|
||||
#endif
|
|
@ -0,0 +1,249 @@
|
|||
/*
|
||||
* Copyright 2008 Red Hat Inc.
|
||||
* Copyright 2009 Jerome Glisse.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors:
|
||||
* Dave Airlie
|
||||
* Jerome Glisse <glisse@freedesktop.org>
|
||||
*/
|
||||
#include "drmP.h"
|
||||
#include "drm.h"
|
||||
#include "radeon.h"
|
||||
#include "radeon_drm.h"
|
||||
|
||||
#if __OS_HAS_AGP
|
||||
|
||||
struct radeon_agpmode_quirk {
|
||||
u32 hostbridge_vendor;
|
||||
u32 hostbridge_device;
|
||||
u32 chip_vendor;
|
||||
u32 chip_device;
|
||||
u32 subsys_vendor;
|
||||
u32 subsys_device;
|
||||
u32 default_mode;
|
||||
};
|
||||
|
||||
static struct radeon_agpmode_quirk radeon_agpmode_quirk_list[] = {
|
||||
/* Intel E7505 Memory Controller Hub / RV350 AR [Radeon 9600XT] Needs AGPMode 4 (deb #515326) */
|
||||
{ PCI_VENDOR_ID_INTEL, 0x2550, PCI_VENDOR_ID_ATI, 0x4152, 0x1458, 0x4038, 4},
|
||||
/* Intel 82865G/PE/P DRAM Controller/Host-Hub / Mobility 9800 Needs AGPMode 4 (deb #462590) */
|
||||
{ PCI_VENDOR_ID_INTEL, 0x2570, PCI_VENDOR_ID_ATI, 0x4a4e, PCI_VENDOR_ID_DELL, 0x5106, 4},
|
||||
/* Intel 82865G/PE/P DRAM Controller/Host-Hub / RV280 [Radeon 9200 SE] Needs AGPMode 4 (lp #300304) */
|
||||
{ PCI_VENDOR_ID_INTEL, 0x2570, PCI_VENDOR_ID_ATI, 0x5964,
|
||||
0x148c, 0x2073, 4},
|
||||
/* Intel 82855PM Processor to I/O Controller / Mobility M6 LY Needs AGPMode 1 (deb #467235) */
|
||||
{ PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4c59,
|
||||
PCI_VENDOR_ID_IBM, 0x052f, 1},
|
||||
/* Intel 82855PM host bridge / Mobility 9600 M10 RV350 Needs AGPMode 1 (lp #195051) */
|
||||
{ PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4e50,
|
||||
PCI_VENDOR_ID_IBM, 0x0550, 1},
|
||||
/* Intel 82855PM host bridge / Mobility M7 needs AGPMode 1 */
|
||||
{ PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4c57,
|
||||
PCI_VENDOR_ID_IBM, 0x0530, 1},
|
||||
/* Intel 82855PM host bridge / FireGL Mobility T2 RV350 Needs AGPMode 2 (fdo #20647) */
|
||||
{ PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4e54,
|
||||
PCI_VENDOR_ID_IBM, 0x054f, 2},
|
||||
/* Intel 82855PM host bridge / Mobility M9+ / VaioPCG-V505DX Needs AGPMode 2 (fdo #17928) */
|
||||
{ PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x5c61,
|
||||
PCI_VENDOR_ID_SONY, 0x816b, 2},
|
||||
/* Intel 82855PM Processor to I/O Controller / Mobility M9+ Needs AGPMode 8 (phoronix forum) */
|
||||
{ PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x5c61,
|
||||
PCI_VENDOR_ID_SONY, 0x8195, 8},
|
||||
/* Intel 82830 830 Chipset Host Bridge / Mobility M6 LY Needs AGPMode 2 (fdo #17360)*/
|
||||
{ PCI_VENDOR_ID_INTEL, 0x3575, PCI_VENDOR_ID_ATI, 0x4c59,
|
||||
PCI_VENDOR_ID_DELL, 0x00e3, 2},
|
||||
/* Intel 82852/82855 host bridge / Mobility FireGL 9000 R250 Needs AGPMode 1 (lp #296617) */
|
||||
{ PCI_VENDOR_ID_INTEL, 0x3580, PCI_VENDOR_ID_ATI, 0x4c66,
|
||||
PCI_VENDOR_ID_DELL, 0x0149, 1},
|
||||
/* Intel 82852/82855 host bridge / Mobility 9600 M10 RV350 Needs AGPMode 1 (deb #467460) */
|
||||
{ PCI_VENDOR_ID_INTEL, 0x3580, PCI_VENDOR_ID_ATI, 0x4e50,
|
||||
0x1025, 0x0061, 1},
|
||||
/* Intel 82852/82855 host bridge / Mobility 9600 M10 RV350 Needs AGPMode 1 (lp #203007) */
|
||||
{ PCI_VENDOR_ID_INTEL, 0x3580, PCI_VENDOR_ID_ATI, 0x4e50,
|
||||
0x1025, 0x0064, 1},
|
||||
/* Intel 82852/82855 host bridge / Mobility 9600 M10 RV350 Needs AGPMode 1 (lp #141551) */
|
||||
{ PCI_VENDOR_ID_INTEL, 0x3580, PCI_VENDOR_ID_ATI, 0x4e50,
|
||||
PCI_VENDOR_ID_ASUSTEK, 0x1942, 1},
|
||||
/* Intel 82852/82855 host bridge / Mobility 9600/9700 Needs AGPMode 1 (deb #510208) */
|
||||
{ PCI_VENDOR_ID_INTEL, 0x3580, PCI_VENDOR_ID_ATI, 0x4e50,
|
||||
0x10cf, 0x127f, 1},
|
||||
/* ASRock K7VT4A+ AGP 8x / ATI Radeon 9250 AGP Needs AGPMode 4 (lp #133192) */
|
||||
{ 0x1849, 0x3189, PCI_VENDOR_ID_ATI, 0x5960,
|
||||
0x1787, 0x5960, 4},
|
||||
/* VIA K8M800 Host Bridge / RV280 [Radeon 9200 PRO] Needs AGPMode 4 (fdo #12544) */
|
||||
{ PCI_VENDOR_ID_VIA, 0x0204, PCI_VENDOR_ID_ATI, 0x5960,
|
||||
0x17af, 0x2020, 4},
|
||||
/* VIA KT880 Host Bridge / RV350 [Radeon 9550] Needs AGPMode 4 (fdo #19981) */
|
||||
{ PCI_VENDOR_ID_VIA, 0x0269, PCI_VENDOR_ID_ATI, 0x4153,
|
||||
PCI_VENDOR_ID_ASUSTEK, 0x003c, 4},
|
||||
/* VIA VT8363 Host Bridge / R200 QL [Radeon 8500] Needs AGPMode 2 (lp #141551) */
|
||||
{ PCI_VENDOR_ID_VIA, 0x0305, PCI_VENDOR_ID_ATI, 0x514c,
|
||||
PCI_VENDOR_ID_ATI, 0x013a, 2},
|
||||
/* VIA VT82C693A Host Bridge / RV280 [Radeon 9200 PRO] Needs AGPMode 2 (deb #515512) */
|
||||
{ PCI_VENDOR_ID_VIA, 0x0691, PCI_VENDOR_ID_ATI, 0x5960,
|
||||
PCI_VENDOR_ID_ASUSTEK, 0x004c, 2},
|
||||
/* VIA VT82C693A Host Bridge / RV280 [Radeon 9200 PRO] Needs AGPMode 2 */
|
||||
{ PCI_VENDOR_ID_VIA, 0x0691, PCI_VENDOR_ID_ATI, 0x5960,
|
||||
PCI_VENDOR_ID_ASUSTEK, 0x0054, 2},
|
||||
/* VIA VT8377 Host Bridge / R200 QM [Radeon 9100] Needs AGPMode 4 (deb #461144) */
|
||||
{ PCI_VENDOR_ID_VIA, 0x3189, PCI_VENDOR_ID_ATI, 0x514d,
|
||||
0x174b, 0x7149, 4},
|
||||
/* VIA VT8377 Host Bridge / RV280 [Radeon 9200 PRO] Needs AGPMode 4 (lp #312693) */
|
||||
{ PCI_VENDOR_ID_VIA, 0x3189, PCI_VENDOR_ID_ATI, 0x5960,
|
||||
0x1462, 0x0380, 4},
|
||||
/* VIA VT8377 Host Bridge / RV280 Needs AGPMode 4 (ati ML) */
|
||||
{ PCI_VENDOR_ID_VIA, 0x3189, PCI_VENDOR_ID_ATI, 0x5964,
|
||||
0x148c, 0x2073, 4},
|
||||
/* ATI Host Bridge / RV280 [M9+] Needs AGPMode 1 (phoronix forum) */
|
||||
{ PCI_VENDOR_ID_ATI, 0xcbb2, PCI_VENDOR_ID_ATI, 0x5c61,
|
||||
PCI_VENDOR_ID_SONY, 0x8175, 1},
|
||||
/* HP Host Bridge / R300 [FireGL X1] Needs AGPMode 2 (fdo #7770) */
|
||||
{ PCI_VENDOR_ID_HP, 0x122e, PCI_VENDOR_ID_ATI, 0x4e47,
|
||||
PCI_VENDOR_ID_ATI, 0x0152, 2},
|
||||
{ 0, 0, 0, 0, 0, 0, 0 },
|
||||
};
|
||||
#endif
|
||||
|
||||
int radeon_agp_init(struct radeon_device *rdev)
|
||||
{
|
||||
#if __OS_HAS_AGP
|
||||
struct radeon_agpmode_quirk *p = radeon_agpmode_quirk_list;
|
||||
struct drm_agp_mode mode;
|
||||
struct drm_agp_info info;
|
||||
uint32_t agp_status;
|
||||
int default_mode;
|
||||
bool is_v3;
|
||||
int ret;
|
||||
|
||||
/* Acquire AGP. */
|
||||
if (!rdev->ddev->agp->acquired) {
|
||||
ret = drm_agp_acquire(rdev->ddev);
|
||||
if (ret) {
|
||||
DRM_ERROR("Unable to acquire AGP: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
ret = drm_agp_info(rdev->ddev, &info);
|
||||
if (ret) {
|
||||
DRM_ERROR("Unable to get AGP info: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
mode.mode = info.mode;
|
||||
agp_status = (RREG32(RADEON_AGP_STATUS) | RADEON_AGPv3_MODE) & mode.mode;
|
||||
is_v3 = !!(agp_status & RADEON_AGPv3_MODE);
|
||||
|
||||
if (is_v3) {
|
||||
default_mode = (agp_status & RADEON_AGPv3_8X_MODE) ? 8 : 4;
|
||||
} else {
|
||||
if (agp_status & RADEON_AGP_4X_MODE) {
|
||||
default_mode = 4;
|
||||
} else if (agp_status & RADEON_AGP_2X_MODE) {
|
||||
default_mode = 2;
|
||||
} else {
|
||||
default_mode = 1;
|
||||
}
|
||||
}
|
||||
|
||||
/* Apply AGPMode Quirks */
|
||||
while (p && p->chip_device != 0) {
|
||||
if (info.id_vendor == p->hostbridge_vendor &&
|
||||
info.id_device == p->hostbridge_device &&
|
||||
rdev->pdev->vendor == p->chip_vendor &&
|
||||
rdev->pdev->device == p->chip_device &&
|
||||
rdev->pdev->subsystem_vendor == p->subsys_vendor &&
|
||||
rdev->pdev->subsystem_device == p->subsys_device) {
|
||||
default_mode = p->default_mode;
|
||||
}
|
||||
++p;
|
||||
}
|
||||
|
||||
if (radeon_agpmode > 0) {
|
||||
if ((radeon_agpmode < (is_v3 ? 4 : 1)) ||
|
||||
(radeon_agpmode > (is_v3 ? 8 : 4)) ||
|
||||
(radeon_agpmode & (radeon_agpmode - 1))) {
|
||||
DRM_ERROR("Illegal AGP Mode: %d (valid %s), leaving at %d\n",
|
||||
radeon_agpmode, is_v3 ? "4, 8" : "1, 2, 4",
|
||||
default_mode);
|
||||
radeon_agpmode = default_mode;
|
||||
} else {
|
||||
DRM_INFO("AGP mode requested: %d\n", radeon_agpmode);
|
||||
}
|
||||
} else {
|
||||
radeon_agpmode = default_mode;
|
||||
}
|
||||
|
||||
mode.mode &= ~RADEON_AGP_MODE_MASK;
|
||||
if (is_v3) {
|
||||
switch (radeon_agpmode) {
|
||||
case 8:
|
||||
mode.mode |= RADEON_AGPv3_8X_MODE;
|
||||
break;
|
||||
case 4:
|
||||
default:
|
||||
mode.mode |= RADEON_AGPv3_4X_MODE;
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
switch (radeon_agpmode) {
|
||||
case 4:
|
||||
mode.mode |= RADEON_AGP_4X_MODE;
|
||||
break;
|
||||
case 2:
|
||||
mode.mode |= RADEON_AGP_2X_MODE;
|
||||
break;
|
||||
case 1:
|
||||
default:
|
||||
mode.mode |= RADEON_AGP_1X_MODE;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
mode.mode &= ~RADEON_AGP_FW_MODE; /* disable fw */
|
||||
ret = drm_agp_enable(rdev->ddev, mode);
|
||||
if (ret) {
|
||||
DRM_ERROR("Unable to enable AGP (mode = 0x%lx)\n", mode.mode);
|
||||
return ret;
|
||||
}
|
||||
|
||||
rdev->mc.agp_base = rdev->ddev->agp->agp_info.aper_base;
|
||||
rdev->mc.gtt_size = rdev->ddev->agp->agp_info.aper_size << 20;
|
||||
|
||||
/* workaround some hw issues */
|
||||
if (rdev->family < CHIP_R200) {
|
||||
WREG32(RADEON_AGP_CNTL, RREG32(RADEON_AGP_CNTL) | 0x000e0000);
|
||||
}
|
||||
return 0;
|
||||
#else
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
void radeon_agp_fini(struct radeon_device *rdev)
|
||||
{
|
||||
#if __OS_HAS_AGP
|
||||
if (rdev->flags & RADEON_IS_AGP) {
|
||||
if (rdev->ddev->agp && rdev->ddev->agp->acquired) {
|
||||
drm_agp_release(rdev->ddev);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
|
@ -0,0 +1,405 @@
|
|||
/*
|
||||
* Copyright 2008 Advanced Micro Devices, Inc.
|
||||
* Copyright 2008 Red Hat Inc.
|
||||
* Copyright 2009 Jerome Glisse.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Dave Airlie
|
||||
* Alex Deucher
|
||||
* Jerome Glisse
|
||||
*/
|
||||
#ifndef __RADEON_ASIC_H__
|
||||
#define __RADEON_ASIC_H__
|
||||
|
||||
/*
|
||||
* common functions
|
||||
*/
|
||||
void radeon_legacy_set_engine_clock(struct radeon_device *rdev, uint32_t eng_clock);
|
||||
void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable);
|
||||
|
||||
void radeon_atom_set_engine_clock(struct radeon_device *rdev, uint32_t eng_clock);
|
||||
void radeon_atom_set_memory_clock(struct radeon_device *rdev, uint32_t mem_clock);
|
||||
void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable);
|
||||
|
||||
/*
|
||||
* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280
|
||||
*/
|
||||
uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg);
|
||||
void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
|
||||
void r100_errata(struct radeon_device *rdev);
|
||||
void r100_vram_info(struct radeon_device *rdev);
|
||||
int r100_gpu_reset(struct radeon_device *rdev);
|
||||
int r100_mc_init(struct radeon_device *rdev);
|
||||
void r100_mc_fini(struct radeon_device *rdev);
|
||||
int r100_wb_init(struct radeon_device *rdev);
|
||||
void r100_wb_fini(struct radeon_device *rdev);
|
||||
int r100_gart_enable(struct radeon_device *rdev);
|
||||
void r100_pci_gart_disable(struct radeon_device *rdev);
|
||||
void r100_pci_gart_tlb_flush(struct radeon_device *rdev);
|
||||
int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
|
||||
int r100_cp_init(struct radeon_device *rdev, unsigned ring_size);
|
||||
void r100_cp_fini(struct radeon_device *rdev);
|
||||
void r100_cp_disable(struct radeon_device *rdev);
|
||||
void r100_ring_start(struct radeon_device *rdev);
|
||||
int r100_irq_set(struct radeon_device *rdev);
|
||||
int r100_irq_process(struct radeon_device *rdev);
|
||||
void r100_fence_ring_emit(struct radeon_device *rdev,
|
||||
struct radeon_fence *fence);
|
||||
int r100_cs_parse(struct radeon_cs_parser *p);
|
||||
void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
|
||||
uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg);
|
||||
int r100_copy_blit(struct radeon_device *rdev,
|
||||
uint64_t src_offset,
|
||||
uint64_t dst_offset,
|
||||
unsigned num_pages,
|
||||
struct radeon_fence *fence);
|
||||
|
||||
static struct radeon_asic r100_asic = {
|
||||
.errata = &r100_errata,
|
||||
.vram_info = &r100_vram_info,
|
||||
.gpu_reset = &r100_gpu_reset,
|
||||
.mc_init = &r100_mc_init,
|
||||
.mc_fini = &r100_mc_fini,
|
||||
.wb_init = &r100_wb_init,
|
||||
.wb_fini = &r100_wb_fini,
|
||||
.gart_enable = &r100_gart_enable,
|
||||
.gart_disable = &r100_pci_gart_disable,
|
||||
.gart_tlb_flush = &r100_pci_gart_tlb_flush,
|
||||
.gart_set_page = &r100_pci_gart_set_page,
|
||||
.cp_init = &r100_cp_init,
|
||||
.cp_fini = &r100_cp_fini,
|
||||
.cp_disable = &r100_cp_disable,
|
||||
.ring_start = &r100_ring_start,
|
||||
.irq_set = &r100_irq_set,
|
||||
.irq_process = &r100_irq_process,
|
||||
.fence_ring_emit = &r100_fence_ring_emit,
|
||||
.cs_parse = &r100_cs_parse,
|
||||
.copy_blit = &r100_copy_blit,
|
||||
.copy_dma = NULL,
|
||||
.copy = &r100_copy_blit,
|
||||
.set_engine_clock = &radeon_legacy_set_engine_clock,
|
||||
.set_memory_clock = NULL,
|
||||
.set_pcie_lanes = NULL,
|
||||
.set_clock_gating = &radeon_legacy_set_clock_gating,
|
||||
};
|
||||
|
||||
|
||||
/*
|
||||
* r300,r350,rv350,rv380
|
||||
*/
|
||||
void r300_errata(struct radeon_device *rdev);
|
||||
void r300_vram_info(struct radeon_device *rdev);
|
||||
int r300_gpu_reset(struct radeon_device *rdev);
|
||||
int r300_mc_init(struct radeon_device *rdev);
|
||||
void r300_mc_fini(struct radeon_device *rdev);
|
||||
void r300_ring_start(struct radeon_device *rdev);
|
||||
void r300_fence_ring_emit(struct radeon_device *rdev,
|
||||
struct radeon_fence *fence);
|
||||
int r300_cs_parse(struct radeon_cs_parser *p);
|
||||
int r300_gart_enable(struct radeon_device *rdev);
|
||||
void rv370_pcie_gart_disable(struct radeon_device *rdev);
|
||||
void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev);
|
||||
int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
|
||||
uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg);
|
||||
void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
|
||||
void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes);
|
||||
int r300_copy_dma(struct radeon_device *rdev,
|
||||
uint64_t src_offset,
|
||||
uint64_t dst_offset,
|
||||
unsigned num_pages,
|
||||
struct radeon_fence *fence);
|
||||
static struct radeon_asic r300_asic = {
|
||||
.errata = &r300_errata,
|
||||
.vram_info = &r300_vram_info,
|
||||
.gpu_reset = &r300_gpu_reset,
|
||||
.mc_init = &r300_mc_init,
|
||||
.mc_fini = &r300_mc_fini,
|
||||
.wb_init = &r100_wb_init,
|
||||
.wb_fini = &r100_wb_fini,
|
||||
.gart_enable = &r300_gart_enable,
|
||||
.gart_disable = &r100_pci_gart_disable,
|
||||
.gart_tlb_flush = &r100_pci_gart_tlb_flush,
|
||||
.gart_set_page = &r100_pci_gart_set_page,
|
||||
.cp_init = &r100_cp_init,
|
||||
.cp_fini = &r100_cp_fini,
|
||||
.cp_disable = &r100_cp_disable,
|
||||
.ring_start = &r300_ring_start,
|
||||
.irq_set = &r100_irq_set,
|
||||
.irq_process = &r100_irq_process,
|
||||
.fence_ring_emit = &r300_fence_ring_emit,
|
||||
.cs_parse = &r300_cs_parse,
|
||||
.copy_blit = &r100_copy_blit,
|
||||
.copy_dma = &r300_copy_dma,
|
||||
.copy = &r100_copy_blit,
|
||||
.set_engine_clock = &radeon_legacy_set_engine_clock,
|
||||
.set_memory_clock = NULL,
|
||||
.set_pcie_lanes = &rv370_set_pcie_lanes,
|
||||
.set_clock_gating = &radeon_legacy_set_clock_gating,
|
||||
};
|
||||
|
||||
/*
|
||||
* r420,r423,rv410
|
||||
*/
|
||||
void r420_errata(struct radeon_device *rdev);
|
||||
void r420_vram_info(struct radeon_device *rdev);
|
||||
int r420_mc_init(struct radeon_device *rdev);
|
||||
void r420_mc_fini(struct radeon_device *rdev);
|
||||
static struct radeon_asic r420_asic = {
|
||||
.errata = &r420_errata,
|
||||
.vram_info = &r420_vram_info,
|
||||
.gpu_reset = &r300_gpu_reset,
|
||||
.mc_init = &r420_mc_init,
|
||||
.mc_fini = &r420_mc_fini,
|
||||
.wb_init = &r100_wb_init,
|
||||
.wb_fini = &r100_wb_fini,
|
||||
.gart_enable = &r300_gart_enable,
|
||||
.gart_disable = &rv370_pcie_gart_disable,
|
||||
.gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
|
||||
.gart_set_page = &rv370_pcie_gart_set_page,
|
||||
.cp_init = &r100_cp_init,
|
||||
.cp_fini = &r100_cp_fini,
|
||||
.cp_disable = &r100_cp_disable,
|
||||
.ring_start = &r300_ring_start,
|
||||
.irq_set = &r100_irq_set,
|
||||
.irq_process = &r100_irq_process,
|
||||
.fence_ring_emit = &r300_fence_ring_emit,
|
||||
.cs_parse = &r300_cs_parse,
|
||||
.copy_blit = &r100_copy_blit,
|
||||
.copy_dma = &r300_copy_dma,
|
||||
.copy = &r100_copy_blit,
|
||||
.set_engine_clock = &radeon_atom_set_engine_clock,
|
||||
.set_memory_clock = &radeon_atom_set_memory_clock,
|
||||
.set_pcie_lanes = &rv370_set_pcie_lanes,
|
||||
.set_clock_gating = &radeon_atom_set_clock_gating,
|
||||
};
|
||||
|
||||
|
||||
/*
|
||||
* rs400,rs480
|
||||
*/
|
||||
void rs400_errata(struct radeon_device *rdev);
|
||||
void rs400_vram_info(struct radeon_device *rdev);
|
||||
int rs400_mc_init(struct radeon_device *rdev);
|
||||
void rs400_mc_fini(struct radeon_device *rdev);
|
||||
int rs400_gart_enable(struct radeon_device *rdev);
|
||||
void rs400_gart_disable(struct radeon_device *rdev);
|
||||
void rs400_gart_tlb_flush(struct radeon_device *rdev);
|
||||
int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
|
||||
uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg);
|
||||
void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
|
||||
static struct radeon_asic rs400_asic = {
|
||||
.errata = &rs400_errata,
|
||||
.vram_info = &rs400_vram_info,
|
||||
.gpu_reset = &r300_gpu_reset,
|
||||
.mc_init = &rs400_mc_init,
|
||||
.mc_fini = &rs400_mc_fini,
|
||||
.wb_init = &r100_wb_init,
|
||||
.wb_fini = &r100_wb_fini,
|
||||
.gart_enable = &rs400_gart_enable,
|
||||
.gart_disable = &rs400_gart_disable,
|
||||
.gart_tlb_flush = &rs400_gart_tlb_flush,
|
||||
.gart_set_page = &rs400_gart_set_page,
|
||||
.cp_init = &r100_cp_init,
|
||||
.cp_fini = &r100_cp_fini,
|
||||
.cp_disable = &r100_cp_disable,
|
||||
.ring_start = &r300_ring_start,
|
||||
.irq_set = &r100_irq_set,
|
||||
.irq_process = &r100_irq_process,
|
||||
.fence_ring_emit = &r300_fence_ring_emit,
|
||||
.cs_parse = &r300_cs_parse,
|
||||
.copy_blit = &r100_copy_blit,
|
||||
.copy_dma = &r300_copy_dma,
|
||||
.copy = &r100_copy_blit,
|
||||
.set_engine_clock = &radeon_legacy_set_engine_clock,
|
||||
.set_memory_clock = NULL,
|
||||
.set_pcie_lanes = NULL,
|
||||
.set_clock_gating = &radeon_legacy_set_clock_gating,
|
||||
};
|
||||
|
||||
|
||||
/*
|
||||
* rs600.
|
||||
*/
|
||||
void rs600_errata(struct radeon_device *rdev);
|
||||
void rs600_vram_info(struct radeon_device *rdev);
|
||||
int rs600_mc_init(struct radeon_device *rdev);
|
||||
void rs600_mc_fini(struct radeon_device *rdev);
|
||||
int rs600_irq_set(struct radeon_device *rdev);
|
||||
int rs600_gart_enable(struct radeon_device *rdev);
|
||||
void rs600_gart_disable(struct radeon_device *rdev);
|
||||
void rs600_gart_tlb_flush(struct radeon_device *rdev);
|
||||
int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
|
||||
uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg);
|
||||
void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
|
||||
static struct radeon_asic rs600_asic = {
|
||||
.errata = &rs600_errata,
|
||||
.vram_info = &rs600_vram_info,
|
||||
.gpu_reset = &r300_gpu_reset,
|
||||
.mc_init = &rs600_mc_init,
|
||||
.mc_fini = &rs600_mc_fini,
|
||||
.wb_init = &r100_wb_init,
|
||||
.wb_fini = &r100_wb_fini,
|
||||
.gart_enable = &rs600_gart_enable,
|
||||
.gart_disable = &rs600_gart_disable,
|
||||
.gart_tlb_flush = &rs600_gart_tlb_flush,
|
||||
.gart_set_page = &rs600_gart_set_page,
|
||||
.cp_init = &r100_cp_init,
|
||||
.cp_fini = &r100_cp_fini,
|
||||
.cp_disable = &r100_cp_disable,
|
||||
.ring_start = &r300_ring_start,
|
||||
.irq_set = &rs600_irq_set,
|
||||
.irq_process = &r100_irq_process,
|
||||
.fence_ring_emit = &r300_fence_ring_emit,
|
||||
.cs_parse = &r300_cs_parse,
|
||||
.copy_blit = &r100_copy_blit,
|
||||
.copy_dma = &r300_copy_dma,
|
||||
.copy = &r100_copy_blit,
|
||||
.set_engine_clock = &radeon_atom_set_engine_clock,
|
||||
.set_memory_clock = &radeon_atom_set_memory_clock,
|
||||
.set_pcie_lanes = NULL,
|
||||
.set_clock_gating = &radeon_atom_set_clock_gating,
|
||||
};
|
||||
|
||||
|
||||
/*
|
||||
* rs690,rs740
|
||||
*/
|
||||
void rs690_errata(struct radeon_device *rdev);
|
||||
void rs690_vram_info(struct radeon_device *rdev);
|
||||
int rs690_mc_init(struct radeon_device *rdev);
|
||||
void rs690_mc_fini(struct radeon_device *rdev);
|
||||
uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg);
|
||||
void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
|
||||
static struct radeon_asic rs690_asic = {
|
||||
.errata = &rs690_errata,
|
||||
.vram_info = &rs690_vram_info,
|
||||
.gpu_reset = &r300_gpu_reset,
|
||||
.mc_init = &rs690_mc_init,
|
||||
.mc_fini = &rs690_mc_fini,
|
||||
.wb_init = &r100_wb_init,
|
||||
.wb_fini = &r100_wb_fini,
|
||||
.gart_enable = &rs400_gart_enable,
|
||||
.gart_disable = &rs400_gart_disable,
|
||||
.gart_tlb_flush = &rs400_gart_tlb_flush,
|
||||
.gart_set_page = &rs400_gart_set_page,
|
||||
.cp_init = &r100_cp_init,
|
||||
.cp_fini = &r100_cp_fini,
|
||||
.cp_disable = &r100_cp_disable,
|
||||
.ring_start = &r300_ring_start,
|
||||
.irq_set = &rs600_irq_set,
|
||||
.irq_process = &r100_irq_process,
|
||||
.fence_ring_emit = &r300_fence_ring_emit,
|
||||
.cs_parse = &r300_cs_parse,
|
||||
.copy_blit = &r100_copy_blit,
|
||||
.copy_dma = &r300_copy_dma,
|
||||
.copy = &r300_copy_dma,
|
||||
.set_engine_clock = &radeon_atom_set_engine_clock,
|
||||
.set_memory_clock = &radeon_atom_set_memory_clock,
|
||||
.set_pcie_lanes = NULL,
|
||||
.set_clock_gating = &radeon_atom_set_clock_gating,
|
||||
};
|
||||
|
||||
|
||||
/*
|
||||
* rv515
|
||||
*/
|
||||
void rv515_errata(struct radeon_device *rdev);
|
||||
void rv515_vram_info(struct radeon_device *rdev);
|
||||
int rv515_gpu_reset(struct radeon_device *rdev);
|
||||
int rv515_mc_init(struct radeon_device *rdev);
|
||||
void rv515_mc_fini(struct radeon_device *rdev);
|
||||
uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg);
|
||||
void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
|
||||
void rv515_ring_start(struct radeon_device *rdev);
|
||||
uint32_t rv515_pcie_rreg(struct radeon_device *rdev, uint32_t reg);
|
||||
void rv515_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
|
||||
static struct radeon_asic rv515_asic = {
|
||||
.errata = &rv515_errata,
|
||||
.vram_info = &rv515_vram_info,
|
||||
.gpu_reset = &rv515_gpu_reset,
|
||||
.mc_init = &rv515_mc_init,
|
||||
.mc_fini = &rv515_mc_fini,
|
||||
.wb_init = &r100_wb_init,
|
||||
.wb_fini = &r100_wb_fini,
|
||||
.gart_enable = &r300_gart_enable,
|
||||
.gart_disable = &rv370_pcie_gart_disable,
|
||||
.gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
|
||||
.gart_set_page = &rv370_pcie_gart_set_page,
|
||||
.cp_init = &r100_cp_init,
|
||||
.cp_fini = &r100_cp_fini,
|
||||
.cp_disable = &r100_cp_disable,
|
||||
.ring_start = &rv515_ring_start,
|
||||
.irq_set = &r100_irq_set,
|
||||
.irq_process = &r100_irq_process,
|
||||
.fence_ring_emit = &r300_fence_ring_emit,
|
||||
.cs_parse = &r100_cs_parse,
|
||||
.copy_blit = &r100_copy_blit,
|
||||
.copy_dma = &r300_copy_dma,
|
||||
.copy = &r100_copy_blit,
|
||||
.set_engine_clock = &radeon_atom_set_engine_clock,
|
||||
.set_memory_clock = &radeon_atom_set_memory_clock,
|
||||
.set_pcie_lanes = &rv370_set_pcie_lanes,
|
||||
.set_clock_gating = &radeon_atom_set_clock_gating,
|
||||
};
|
||||
|
||||
|
||||
/*
|
||||
* r520,rv530,rv560,rv570,r580
|
||||
*/
|
||||
void r520_errata(struct radeon_device *rdev);
|
||||
void r520_vram_info(struct radeon_device *rdev);
|
||||
int r520_mc_init(struct radeon_device *rdev);
|
||||
void r520_mc_fini(struct radeon_device *rdev);
|
||||
static struct radeon_asic r520_asic = {
|
||||
.errata = &r520_errata,
|
||||
.vram_info = &r520_vram_info,
|
||||
.gpu_reset = &rv515_gpu_reset,
|
||||
.mc_init = &r520_mc_init,
|
||||
.mc_fini = &r520_mc_fini,
|
||||
.wb_init = &r100_wb_init,
|
||||
.wb_fini = &r100_wb_fini,
|
||||
.gart_enable = &r300_gart_enable,
|
||||
.gart_disable = &rv370_pcie_gart_disable,
|
||||
.gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
|
||||
.gart_set_page = &rv370_pcie_gart_set_page,
|
||||
.cp_init = &r100_cp_init,
|
||||
.cp_fini = &r100_cp_fini,
|
||||
.cp_disable = &r100_cp_disable,
|
||||
.ring_start = &rv515_ring_start,
|
||||
.irq_set = &r100_irq_set,
|
||||
.irq_process = &r100_irq_process,
|
||||
.fence_ring_emit = &r300_fence_ring_emit,
|
||||
.cs_parse = &r100_cs_parse,
|
||||
.copy_blit = &r100_copy_blit,
|
||||
.copy_dma = &r300_copy_dma,
|
||||
.copy = &r100_copy_blit,
|
||||
.set_engine_clock = &radeon_atom_set_engine_clock,
|
||||
.set_memory_clock = &radeon_atom_set_memory_clock,
|
||||
.set_pcie_lanes = &rv370_set_pcie_lanes,
|
||||
.set_clock_gating = &radeon_atom_set_clock_gating,
|
||||
};
|
||||
|
||||
/*
|
||||
* r600,rv610,rv630,rv620,rv635,rv670,rs780,rv770,rv730,rv710
|
||||
*/
|
||||
uint32_t r600_pciep_rreg(struct radeon_device *rdev, uint32_t reg);
|
||||
void r600_pciep_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
|
||||
|
||||
#endif
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,133 @@
|
|||
/*
|
||||
* Copyright 2009 Jerome Glisse.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Jerome Glisse
|
||||
*/
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/radeon_drm.h>
|
||||
#include "radeon_reg.h"
|
||||
#include "radeon.h"
|
||||
|
||||
void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize,
|
||||
unsigned sdomain, unsigned ddomain)
|
||||
{
|
||||
struct radeon_object *dobj = NULL;
|
||||
struct radeon_object *sobj = NULL;
|
||||
struct radeon_fence *fence = NULL;
|
||||
uint64_t saddr, daddr;
|
||||
unsigned long start_jiffies;
|
||||
unsigned long end_jiffies;
|
||||
unsigned long time;
|
||||
unsigned i, n, size;
|
||||
int r;
|
||||
|
||||
size = bsize;
|
||||
n = 1024;
|
||||
r = radeon_object_create(rdev, NULL, size, true, sdomain, false, &sobj);
|
||||
if (r) {
|
||||
goto out_cleanup;
|
||||
}
|
||||
r = radeon_object_pin(sobj, sdomain, &saddr);
|
||||
if (r) {
|
||||
goto out_cleanup;
|
||||
}
|
||||
r = radeon_object_create(rdev, NULL, size, true, ddomain, false, &dobj);
|
||||
if (r) {
|
||||
goto out_cleanup;
|
||||
}
|
||||
r = radeon_object_pin(dobj, ddomain, &daddr);
|
||||
if (r) {
|
||||
goto out_cleanup;
|
||||
}
|
||||
start_jiffies = jiffies;
|
||||
for (i = 0; i < n; i++) {
|
||||
r = radeon_fence_create(rdev, &fence);
|
||||
if (r) {
|
||||
goto out_cleanup;
|
||||
}
|
||||
r = radeon_copy_dma(rdev, saddr, daddr, size >> 14, fence);
|
||||
if (r) {
|
||||
goto out_cleanup;
|
||||
}
|
||||
r = radeon_fence_wait(fence, false);
|
||||
if (r) {
|
||||
goto out_cleanup;
|
||||
}
|
||||
radeon_fence_unref(&fence);
|
||||
}
|
||||
end_jiffies = jiffies;
|
||||
time = end_jiffies - start_jiffies;
|
||||
time = jiffies_to_msecs(time);
|
||||
if (time > 0) {
|
||||
i = ((n * size) >> 10) / time;
|
||||
printk(KERN_INFO "radeon: dma %u bo moves of %ukb from %d to %d"
|
||||
" in %lums (%ukb/ms %ukb/s %uM/s)\n", n, size >> 10,
|
||||
sdomain, ddomain, time, i, i * 1000, (i * 1000) / 1024);
|
||||
}
|
||||
start_jiffies = jiffies;
|
||||
for (i = 0; i < n; i++) {
|
||||
r = radeon_fence_create(rdev, &fence);
|
||||
if (r) {
|
||||
goto out_cleanup;
|
||||
}
|
||||
r = radeon_copy_blit(rdev, saddr, daddr, size >> 14, fence);
|
||||
if (r) {
|
||||
goto out_cleanup;
|
||||
}
|
||||
r = radeon_fence_wait(fence, false);
|
||||
if (r) {
|
||||
goto out_cleanup;
|
||||
}
|
||||
radeon_fence_unref(&fence);
|
||||
}
|
||||
end_jiffies = jiffies;
|
||||
time = end_jiffies - start_jiffies;
|
||||
time = jiffies_to_msecs(time);
|
||||
if (time > 0) {
|
||||
i = ((n * size) >> 10) / time;
|
||||
printk(KERN_INFO "radeon: blit %u bo moves of %ukb from %d to %d"
|
||||
" in %lums (%ukb/ms %ukb/s %uM/s)\n", n, size >> 10,
|
||||
sdomain, ddomain, time, i, i * 1000, (i * 1000) / 1024);
|
||||
}
|
||||
out_cleanup:
|
||||
if (sobj) {
|
||||
radeon_object_unpin(sobj);
|
||||
radeon_object_unref(&sobj);
|
||||
}
|
||||
if (dobj) {
|
||||
radeon_object_unpin(dobj);
|
||||
radeon_object_unref(&dobj);
|
||||
}
|
||||
if (fence) {
|
||||
radeon_fence_unref(&fence);
|
||||
}
|
||||
if (r) {
|
||||
printk(KERN_WARNING "Error while benchmarking BO move.\n");
|
||||
}
|
||||
}
|
||||
|
||||
void radeon_benchmark(struct radeon_device *rdev)
|
||||
{
|
||||
radeon_benchmark_move(rdev, 1024*1024, RADEON_GEM_DOMAIN_GTT,
|
||||
RADEON_GEM_DOMAIN_VRAM);
|
||||
radeon_benchmark_move(rdev, 1024*1024, RADEON_GEM_DOMAIN_VRAM,
|
||||
RADEON_GEM_DOMAIN_GTT);
|
||||
}
|
|
@ -0,0 +1,390 @@
|
|||
/*
|
||||
* Copyright 2008 Advanced Micro Devices, Inc.
|
||||
* Copyright 2008 Red Hat Inc.
|
||||
* Copyright 2009 Jerome Glisse.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Dave Airlie
|
||||
* Alex Deucher
|
||||
* Jerome Glisse
|
||||
*/
|
||||
#include "drmP.h"
|
||||
#include "radeon_reg.h"
|
||||
#include "radeon.h"
|
||||
#include "atom.h"
|
||||
|
||||
/*
|
||||
* BIOS.
|
||||
*/
|
||||
static bool radeon_read_bios(struct radeon_device *rdev)
|
||||
{
|
||||
uint8_t __iomem *bios;
|
||||
size_t size;
|
||||
|
||||
rdev->bios = NULL;
|
||||
bios = pci_map_rom(rdev->pdev, &size);
|
||||
if (!bios) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (size == 0 || bios[0] != 0x55 || bios[1] != 0xaa) {
|
||||
pci_unmap_rom(rdev->pdev, bios);
|
||||
return false;
|
||||
}
|
||||
rdev->bios = kmalloc(size, GFP_KERNEL);
|
||||
if (rdev->bios == NULL) {
|
||||
pci_unmap_rom(rdev->pdev, bios);
|
||||
return false;
|
||||
}
|
||||
memcpy(rdev->bios, bios, size);
|
||||
pci_unmap_rom(rdev->pdev, bios);
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool r700_read_disabled_bios(struct radeon_device *rdev)
|
||||
{
|
||||
uint32_t viph_control;
|
||||
uint32_t bus_cntl;
|
||||
uint32_t d1vga_control;
|
||||
uint32_t d2vga_control;
|
||||
uint32_t vga_render_control;
|
||||
uint32_t rom_cntl;
|
||||
uint32_t cg_spll_func_cntl = 0;
|
||||
uint32_t cg_spll_status;
|
||||
bool r;
|
||||
|
||||
viph_control = RREG32(RADEON_VIPH_CONTROL);
|
||||
bus_cntl = RREG32(RADEON_BUS_CNTL);
|
||||
d1vga_control = RREG32(AVIVO_D1VGA_CONTROL);
|
||||
d2vga_control = RREG32(AVIVO_D2VGA_CONTROL);
|
||||
vga_render_control = RREG32(AVIVO_VGA_RENDER_CONTROL);
|
||||
rom_cntl = RREG32(R600_ROM_CNTL);
|
||||
|
||||
/* disable VIP */
|
||||
WREG32(RADEON_VIPH_CONTROL, (viph_control & ~RADEON_VIPH_EN));
|
||||
/* enable the rom */
|
||||
WREG32(RADEON_BUS_CNTL, (bus_cntl & ~RADEON_BUS_BIOS_DIS_ROM));
|
||||
/* Disable VGA mode */
|
||||
WREG32(AVIVO_D1VGA_CONTROL,
|
||||
(d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
|
||||
AVIVO_DVGA_CONTROL_TIMING_SELECT)));
|
||||
WREG32(AVIVO_D2VGA_CONTROL,
|
||||
(d2vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
|
||||
AVIVO_DVGA_CONTROL_TIMING_SELECT)));
|
||||
WREG32(AVIVO_VGA_RENDER_CONTROL,
|
||||
(vga_render_control & ~AVIVO_VGA_VSTATUS_CNTL_MASK));
|
||||
|
||||
if (rdev->family == CHIP_RV730) {
|
||||
cg_spll_func_cntl = RREG32(R600_CG_SPLL_FUNC_CNTL);
|
||||
|
||||
/* enable bypass mode */
|
||||
WREG32(R600_CG_SPLL_FUNC_CNTL, (cg_spll_func_cntl |
|
||||
R600_SPLL_BYPASS_EN));
|
||||
|
||||
/* wait for SPLL_CHG_STATUS to change to 1 */
|
||||
cg_spll_status = 0;
|
||||
while (!(cg_spll_status & R600_SPLL_CHG_STATUS))
|
||||
cg_spll_status = RREG32(R600_CG_SPLL_STATUS);
|
||||
|
||||
WREG32(R600_ROM_CNTL, (rom_cntl & ~R600_SCK_OVERWRITE));
|
||||
} else
|
||||
WREG32(R600_ROM_CNTL, (rom_cntl | R600_SCK_OVERWRITE));
|
||||
|
||||
r = radeon_read_bios(rdev);
|
||||
|
||||
/* restore regs */
|
||||
if (rdev->family == CHIP_RV730) {
|
||||
WREG32(R600_CG_SPLL_FUNC_CNTL, cg_spll_func_cntl);
|
||||
|
||||
/* wait for SPLL_CHG_STATUS to change to 1 */
|
||||
cg_spll_status = 0;
|
||||
while (!(cg_spll_status & R600_SPLL_CHG_STATUS))
|
||||
cg_spll_status = RREG32(R600_CG_SPLL_STATUS);
|
||||
}
|
||||
WREG32(RADEON_VIPH_CONTROL, viph_control);
|
||||
WREG32(RADEON_BUS_CNTL, bus_cntl);
|
||||
WREG32(AVIVO_D1VGA_CONTROL, d1vga_control);
|
||||
WREG32(AVIVO_D2VGA_CONTROL, d2vga_control);
|
||||
WREG32(AVIVO_VGA_RENDER_CONTROL, vga_render_control);
|
||||
WREG32(R600_ROM_CNTL, rom_cntl);
|
||||
return r;
|
||||
}
|
||||
|
||||
static bool r600_read_disabled_bios(struct radeon_device *rdev)
|
||||
{
|
||||
uint32_t viph_control;
|
||||
uint32_t bus_cntl;
|
||||
uint32_t d1vga_control;
|
||||
uint32_t d2vga_control;
|
||||
uint32_t vga_render_control;
|
||||
uint32_t rom_cntl;
|
||||
uint32_t general_pwrmgt;
|
||||
uint32_t low_vid_lower_gpio_cntl;
|
||||
uint32_t medium_vid_lower_gpio_cntl;
|
||||
uint32_t high_vid_lower_gpio_cntl;
|
||||
uint32_t ctxsw_vid_lower_gpio_cntl;
|
||||
uint32_t lower_gpio_enable;
|
||||
bool r;
|
||||
|
||||
viph_control = RREG32(RADEON_VIPH_CONTROL);
|
||||
bus_cntl = RREG32(RADEON_BUS_CNTL);
|
||||
d1vga_control = RREG32(AVIVO_D1VGA_CONTROL);
|
||||
d2vga_control = RREG32(AVIVO_D2VGA_CONTROL);
|
||||
vga_render_control = RREG32(AVIVO_VGA_RENDER_CONTROL);
|
||||
rom_cntl = RREG32(R600_ROM_CNTL);
|
||||
general_pwrmgt = RREG32(R600_GENERAL_PWRMGT);
|
||||
low_vid_lower_gpio_cntl = RREG32(R600_LOW_VID_LOWER_GPIO_CNTL);
|
||||
medium_vid_lower_gpio_cntl = RREG32(R600_MEDIUM_VID_LOWER_GPIO_CNTL);
|
||||
high_vid_lower_gpio_cntl = RREG32(R600_HIGH_VID_LOWER_GPIO_CNTL);
|
||||
ctxsw_vid_lower_gpio_cntl = RREG32(R600_CTXSW_VID_LOWER_GPIO_CNTL);
|
||||
lower_gpio_enable = RREG32(R600_LOWER_GPIO_ENABLE);
|
||||
|
||||
/* disable VIP */
|
||||
WREG32(RADEON_VIPH_CONTROL, (viph_control & ~RADEON_VIPH_EN));
|
||||
/* enable the rom */
|
||||
WREG32(RADEON_BUS_CNTL, (bus_cntl & ~RADEON_BUS_BIOS_DIS_ROM));
|
||||
/* Disable VGA mode */
|
||||
WREG32(AVIVO_D1VGA_CONTROL,
|
||||
(d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
|
||||
AVIVO_DVGA_CONTROL_TIMING_SELECT)));
|
||||
WREG32(AVIVO_D2VGA_CONTROL,
|
||||
(d2vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
|
||||
AVIVO_DVGA_CONTROL_TIMING_SELECT)));
|
||||
WREG32(AVIVO_VGA_RENDER_CONTROL,
|
||||
(vga_render_control & ~AVIVO_VGA_VSTATUS_CNTL_MASK));
|
||||
|
||||
WREG32(R600_ROM_CNTL,
|
||||
((rom_cntl & ~R600_SCK_PRESCALE_CRYSTAL_CLK_MASK) |
|
||||
(1 << R600_SCK_PRESCALE_CRYSTAL_CLK_SHIFT) |
|
||||
R600_SCK_OVERWRITE));
|
||||
|
||||
WREG32(R600_GENERAL_PWRMGT, (general_pwrmgt & ~R600_OPEN_DRAIN_PADS));
|
||||
WREG32(R600_LOW_VID_LOWER_GPIO_CNTL,
|
||||
(low_vid_lower_gpio_cntl & ~0x400));
|
||||
WREG32(R600_MEDIUM_VID_LOWER_GPIO_CNTL,
|
||||
(medium_vid_lower_gpio_cntl & ~0x400));
|
||||
WREG32(R600_HIGH_VID_LOWER_GPIO_CNTL,
|
||||
(high_vid_lower_gpio_cntl & ~0x400));
|
||||
WREG32(R600_CTXSW_VID_LOWER_GPIO_CNTL,
|
||||
(ctxsw_vid_lower_gpio_cntl & ~0x400));
|
||||
WREG32(R600_LOWER_GPIO_ENABLE, (lower_gpio_enable | 0x400));
|
||||
|
||||
r = radeon_read_bios(rdev);
|
||||
|
||||
/* restore regs */
|
||||
WREG32(RADEON_VIPH_CONTROL, viph_control);
|
||||
WREG32(RADEON_BUS_CNTL, bus_cntl);
|
||||
WREG32(AVIVO_D1VGA_CONTROL, d1vga_control);
|
||||
WREG32(AVIVO_D2VGA_CONTROL, d2vga_control);
|
||||
WREG32(AVIVO_VGA_RENDER_CONTROL, vga_render_control);
|
||||
WREG32(R600_ROM_CNTL, rom_cntl);
|
||||
WREG32(R600_GENERAL_PWRMGT, general_pwrmgt);
|
||||
WREG32(R600_LOW_VID_LOWER_GPIO_CNTL, low_vid_lower_gpio_cntl);
|
||||
WREG32(R600_MEDIUM_VID_LOWER_GPIO_CNTL, medium_vid_lower_gpio_cntl);
|
||||
WREG32(R600_HIGH_VID_LOWER_GPIO_CNTL, high_vid_lower_gpio_cntl);
|
||||
WREG32(R600_CTXSW_VID_LOWER_GPIO_CNTL, ctxsw_vid_lower_gpio_cntl);
|
||||
WREG32(R600_LOWER_GPIO_ENABLE, lower_gpio_enable);
|
||||
return r;
|
||||
}
|
||||
|
||||
static bool avivo_read_disabled_bios(struct radeon_device *rdev)
|
||||
{
|
||||
uint32_t seprom_cntl1;
|
||||
uint32_t viph_control;
|
||||
uint32_t bus_cntl;
|
||||
uint32_t d1vga_control;
|
||||
uint32_t d2vga_control;
|
||||
uint32_t vga_render_control;
|
||||
uint32_t gpiopad_a;
|
||||
uint32_t gpiopad_en;
|
||||
uint32_t gpiopad_mask;
|
||||
bool r;
|
||||
|
||||
seprom_cntl1 = RREG32(RADEON_SEPROM_CNTL1);
|
||||
viph_control = RREG32(RADEON_VIPH_CONTROL);
|
||||
bus_cntl = RREG32(RADEON_BUS_CNTL);
|
||||
d1vga_control = RREG32(AVIVO_D1VGA_CONTROL);
|
||||
d2vga_control = RREG32(AVIVO_D2VGA_CONTROL);
|
||||
vga_render_control = RREG32(AVIVO_VGA_RENDER_CONTROL);
|
||||
gpiopad_a = RREG32(RADEON_GPIOPAD_A);
|
||||
gpiopad_en = RREG32(RADEON_GPIOPAD_EN);
|
||||
gpiopad_mask = RREG32(RADEON_GPIOPAD_MASK);
|
||||
|
||||
WREG32(RADEON_SEPROM_CNTL1,
|
||||
((seprom_cntl1 & ~RADEON_SCK_PRESCALE_MASK) |
|
||||
(0xc << RADEON_SCK_PRESCALE_SHIFT)));
|
||||
WREG32(RADEON_GPIOPAD_A, 0);
|
||||
WREG32(RADEON_GPIOPAD_EN, 0);
|
||||
WREG32(RADEON_GPIOPAD_MASK, 0);
|
||||
|
||||
/* disable VIP */
|
||||
WREG32(RADEON_VIPH_CONTROL, (viph_control & ~RADEON_VIPH_EN));
|
||||
|
||||
/* enable the rom */
|
||||
WREG32(RADEON_BUS_CNTL, (bus_cntl & ~RADEON_BUS_BIOS_DIS_ROM));
|
||||
|
||||
/* Disable VGA mode */
|
||||
WREG32(AVIVO_D1VGA_CONTROL,
|
||||
(d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
|
||||
AVIVO_DVGA_CONTROL_TIMING_SELECT)));
|
||||
WREG32(AVIVO_D2VGA_CONTROL,
|
||||
(d2vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
|
||||
AVIVO_DVGA_CONTROL_TIMING_SELECT)));
|
||||
WREG32(AVIVO_VGA_RENDER_CONTROL,
|
||||
(vga_render_control & ~AVIVO_VGA_VSTATUS_CNTL_MASK));
|
||||
|
||||
r = radeon_read_bios(rdev);
|
||||
|
||||
/* restore regs */
|
||||
WREG32(RADEON_SEPROM_CNTL1, seprom_cntl1);
|
||||
WREG32(RADEON_VIPH_CONTROL, viph_control);
|
||||
WREG32(RADEON_BUS_CNTL, bus_cntl);
|
||||
WREG32(AVIVO_D1VGA_CONTROL, d1vga_control);
|
||||
WREG32(AVIVO_D2VGA_CONTROL, d2vga_control);
|
||||
WREG32(AVIVO_VGA_RENDER_CONTROL, vga_render_control);
|
||||
WREG32(RADEON_GPIOPAD_A, gpiopad_a);
|
||||
WREG32(RADEON_GPIOPAD_EN, gpiopad_en);
|
||||
WREG32(RADEON_GPIOPAD_MASK, gpiopad_mask);
|
||||
return r;
|
||||
}
|
||||
|
||||
static bool legacy_read_disabled_bios(struct radeon_device *rdev)
|
||||
{
|
||||
uint32_t seprom_cntl1;
|
||||
uint32_t viph_control;
|
||||
uint32_t bus_cntl;
|
||||
uint32_t crtc_gen_cntl;
|
||||
uint32_t crtc2_gen_cntl;
|
||||
uint32_t crtc_ext_cntl;
|
||||
uint32_t fp2_gen_cntl;
|
||||
bool r;
|
||||
|
||||
seprom_cntl1 = RREG32(RADEON_SEPROM_CNTL1);
|
||||
viph_control = RREG32(RADEON_VIPH_CONTROL);
|
||||
bus_cntl = RREG32(RADEON_BUS_CNTL);
|
||||
crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL);
|
||||
crtc2_gen_cntl = 0;
|
||||
crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL);
|
||||
fp2_gen_cntl = 0;
|
||||
|
||||
if (rdev->ddev->pci_device == PCI_DEVICE_ID_ATI_RADEON_QY) {
|
||||
fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL);
|
||||
}
|
||||
|
||||
if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
|
||||
crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL);
|
||||
}
|
||||
|
||||
WREG32(RADEON_SEPROM_CNTL1,
|
||||
((seprom_cntl1 & ~RADEON_SCK_PRESCALE_MASK) |
|
||||
(0xc << RADEON_SCK_PRESCALE_SHIFT)));
|
||||
|
||||
/* disable VIP */
|
||||
WREG32(RADEON_VIPH_CONTROL, (viph_control & ~RADEON_VIPH_EN));
|
||||
|
||||
/* enable the rom */
|
||||
WREG32(RADEON_BUS_CNTL, (bus_cntl & ~RADEON_BUS_BIOS_DIS_ROM));
|
||||
|
||||
/* Turn off mem requests and CRTC for both controllers */
|
||||
WREG32(RADEON_CRTC_GEN_CNTL,
|
||||
((crtc_gen_cntl & ~RADEON_CRTC_EN) |
|
||||
(RADEON_CRTC_DISP_REQ_EN_B |
|
||||
RADEON_CRTC_EXT_DISP_EN)));
|
||||
if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
|
||||
WREG32(RADEON_CRTC2_GEN_CNTL,
|
||||
((crtc2_gen_cntl & ~RADEON_CRTC2_EN) |
|
||||
RADEON_CRTC2_DISP_REQ_EN_B));
|
||||
}
|
||||
/* Turn off CRTC */
|
||||
WREG32(RADEON_CRTC_EXT_CNTL,
|
||||
((crtc_ext_cntl & ~RADEON_CRTC_CRT_ON) |
|
||||
(RADEON_CRTC_SYNC_TRISTAT |
|
||||
RADEON_CRTC_DISPLAY_DIS)));
|
||||
|
||||
if (rdev->ddev->pci_device == PCI_DEVICE_ID_ATI_RADEON_QY) {
|
||||
WREG32(RADEON_FP2_GEN_CNTL, (fp2_gen_cntl & ~RADEON_FP2_ON));
|
||||
}
|
||||
|
||||
r = radeon_read_bios(rdev);
|
||||
|
||||
/* restore regs */
|
||||
WREG32(RADEON_SEPROM_CNTL1, seprom_cntl1);
|
||||
WREG32(RADEON_VIPH_CONTROL, viph_control);
|
||||
WREG32(RADEON_BUS_CNTL, bus_cntl);
|
||||
WREG32(RADEON_CRTC_GEN_CNTL, crtc_gen_cntl);
|
||||
if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
|
||||
WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl);
|
||||
}
|
||||
WREG32(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl);
|
||||
if (rdev->ddev->pci_device == PCI_DEVICE_ID_ATI_RADEON_QY) {
|
||||
WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl);
|
||||
}
|
||||
return r;
|
||||
}
|
||||
|
||||
static bool radeon_read_disabled_bios(struct radeon_device *rdev)
|
||||
{
|
||||
if (rdev->family >= CHIP_RV770)
|
||||
return r700_read_disabled_bios(rdev);
|
||||
else if (rdev->family >= CHIP_R600)
|
||||
return r600_read_disabled_bios(rdev);
|
||||
else if (rdev->family >= CHIP_RS600)
|
||||
return avivo_read_disabled_bios(rdev);
|
||||
else
|
||||
return legacy_read_disabled_bios(rdev);
|
||||
}
|
||||
|
||||
bool radeon_get_bios(struct radeon_device *rdev)
|
||||
{
|
||||
bool r;
|
||||
uint16_t tmp;
|
||||
|
||||
r = radeon_read_bios(rdev);
|
||||
if (r == false) {
|
||||
r = radeon_read_disabled_bios(rdev);
|
||||
}
|
||||
if (r == false || rdev->bios == NULL) {
|
||||
DRM_ERROR("Unable to locate a BIOS ROM\n");
|
||||
rdev->bios = NULL;
|
||||
return false;
|
||||
}
|
||||
if (rdev->bios[0] != 0x55 || rdev->bios[1] != 0xaa) {
|
||||
goto free_bios;
|
||||
}
|
||||
|
||||
rdev->bios_header_start = RBIOS16(0x48);
|
||||
if (!rdev->bios_header_start) {
|
||||
goto free_bios;
|
||||
}
|
||||
tmp = rdev->bios_header_start + 4;
|
||||
if (!memcmp(rdev->bios + tmp, "ATOM", 4) ||
|
||||
!memcmp(rdev->bios + tmp, "MOTA", 4)) {
|
||||
rdev->is_atom_bios = true;
|
||||
} else {
|
||||
rdev->is_atom_bios = false;
|
||||
}
|
||||
|
||||
DRM_DEBUG("%sBIOS detected\n", rdev->is_atom_bios ? "ATOM" : "COM");
|
||||
return true;
|
||||
free_bios:
|
||||
kfree(rdev->bios);
|
||||
rdev->bios = NULL;
|
||||
return false;
|
||||
}
|
|
@ -0,0 +1,833 @@
|
|||
/*
|
||||
* Copyright 2008 Advanced Micro Devices, Inc.
|
||||
* Copyright 2008 Red Hat Inc.
|
||||
* Copyright 2009 Jerome Glisse.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Dave Airlie
|
||||
* Alex Deucher
|
||||
* Jerome Glisse
|
||||
*/
|
||||
#include "drmP.h"
|
||||
#include "radeon_drm.h"
|
||||
#include "radeon_reg.h"
|
||||
#include "radeon.h"
|
||||
#include "atom.h"
|
||||
|
||||
/* 10 khz */
|
||||
static uint32_t radeon_legacy_get_engine_clock(struct radeon_device *rdev)
|
||||
{
|
||||
struct radeon_pll *spll = &rdev->clock.spll;
|
||||
uint32_t fb_div, ref_div, post_div, sclk;
|
||||
|
||||
fb_div = RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV);
|
||||
fb_div = (fb_div >> RADEON_SPLL_FB_DIV_SHIFT) & RADEON_SPLL_FB_DIV_MASK;
|
||||
fb_div <<= 1;
|
||||
fb_div *= spll->reference_freq;
|
||||
|
||||
ref_div =
|
||||
RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV) & RADEON_M_SPLL_REF_DIV_MASK;
|
||||
sclk = fb_div / ref_div;
|
||||
|
||||
post_div = RREG32_PLL(RADEON_SCLK_CNTL) & RADEON_SCLK_SRC_SEL_MASK;
|
||||
if (post_div == 2)
|
||||
sclk >>= 1;
|
||||
else if (post_div == 3)
|
||||
sclk >>= 2;
|
||||
else if (post_div == 4)
|
||||
sclk >>= 4;
|
||||
|
||||
return sclk;
|
||||
}
|
||||
|
||||
/* 10 khz */
|
||||
static uint32_t radeon_legacy_get_memory_clock(struct radeon_device *rdev)
|
||||
{
|
||||
struct radeon_pll *mpll = &rdev->clock.mpll;
|
||||
uint32_t fb_div, ref_div, post_div, mclk;
|
||||
|
||||
fb_div = RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV);
|
||||
fb_div = (fb_div >> RADEON_MPLL_FB_DIV_SHIFT) & RADEON_MPLL_FB_DIV_MASK;
|
||||
fb_div <<= 1;
|
||||
fb_div *= mpll->reference_freq;
|
||||
|
||||
ref_div =
|
||||
RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV) & RADEON_M_SPLL_REF_DIV_MASK;
|
||||
mclk = fb_div / ref_div;
|
||||
|
||||
post_div = RREG32_PLL(RADEON_MCLK_CNTL) & 0x7;
|
||||
if (post_div == 2)
|
||||
mclk >>= 1;
|
||||
else if (post_div == 3)
|
||||
mclk >>= 2;
|
||||
else if (post_div == 4)
|
||||
mclk >>= 4;
|
||||
|
||||
return mclk;
|
||||
}
|
||||
|
||||
void radeon_get_clock_info(struct drm_device *dev)
|
||||
{
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
struct radeon_pll *p1pll = &rdev->clock.p1pll;
|
||||
struct radeon_pll *p2pll = &rdev->clock.p2pll;
|
||||
struct radeon_pll *spll = &rdev->clock.spll;
|
||||
struct radeon_pll *mpll = &rdev->clock.mpll;
|
||||
int ret;
|
||||
|
||||
if (rdev->is_atom_bios)
|
||||
ret = radeon_atom_get_clock_info(dev);
|
||||
else
|
||||
ret = radeon_combios_get_clock_info(dev);
|
||||
|
||||
if (ret) {
|
||||
if (p1pll->reference_div < 2)
|
||||
p1pll->reference_div = 12;
|
||||
if (p2pll->reference_div < 2)
|
||||
p2pll->reference_div = 12;
|
||||
if (spll->reference_div < 2)
|
||||
spll->reference_div =
|
||||
RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV) &
|
||||
RADEON_M_SPLL_REF_DIV_MASK;
|
||||
if (mpll->reference_div < 2)
|
||||
mpll->reference_div = spll->reference_div;
|
||||
} else {
|
||||
if (ASIC_IS_AVIVO(rdev)) {
|
||||
/* TODO FALLBACK */
|
||||
} else {
|
||||
DRM_INFO("Using generic clock info\n");
|
||||
|
||||
if (rdev->flags & RADEON_IS_IGP) {
|
||||
p1pll->reference_freq = 1432;
|
||||
p2pll->reference_freq = 1432;
|
||||
spll->reference_freq = 1432;
|
||||
mpll->reference_freq = 1432;
|
||||
} else {
|
||||
p1pll->reference_freq = 2700;
|
||||
p2pll->reference_freq = 2700;
|
||||
spll->reference_freq = 2700;
|
||||
mpll->reference_freq = 2700;
|
||||
}
|
||||
p1pll->reference_div =
|
||||
RREG32_PLL(RADEON_PPLL_REF_DIV) & 0x3ff;
|
||||
if (p1pll->reference_div < 2)
|
||||
p1pll->reference_div = 12;
|
||||
p2pll->reference_div = p1pll->reference_div;
|
||||
|
||||
if (rdev->family >= CHIP_R420) {
|
||||
p1pll->pll_in_min = 100;
|
||||
p1pll->pll_in_max = 1350;
|
||||
p1pll->pll_out_min = 20000;
|
||||
p1pll->pll_out_max = 50000;
|
||||
p2pll->pll_in_min = 100;
|
||||
p2pll->pll_in_max = 1350;
|
||||
p2pll->pll_out_min = 20000;
|
||||
p2pll->pll_out_max = 50000;
|
||||
} else {
|
||||
p1pll->pll_in_min = 40;
|
||||
p1pll->pll_in_max = 500;
|
||||
p1pll->pll_out_min = 12500;
|
||||
p1pll->pll_out_max = 35000;
|
||||
p2pll->pll_in_min = 40;
|
||||
p2pll->pll_in_max = 500;
|
||||
p2pll->pll_out_min = 12500;
|
||||
p2pll->pll_out_max = 35000;
|
||||
}
|
||||
|
||||
spll->reference_div =
|
||||
RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV) &
|
||||
RADEON_M_SPLL_REF_DIV_MASK;
|
||||
mpll->reference_div = spll->reference_div;
|
||||
rdev->clock.default_sclk =
|
||||
radeon_legacy_get_engine_clock(rdev);
|
||||
rdev->clock.default_mclk =
|
||||
radeon_legacy_get_memory_clock(rdev);
|
||||
}
|
||||
}
|
||||
|
||||
/* pixel clocks */
|
||||
if (ASIC_IS_AVIVO(rdev)) {
|
||||
p1pll->min_post_div = 2;
|
||||
p1pll->max_post_div = 0x7f;
|
||||
p1pll->min_frac_feedback_div = 0;
|
||||
p1pll->max_frac_feedback_div = 9;
|
||||
p2pll->min_post_div = 2;
|
||||
p2pll->max_post_div = 0x7f;
|
||||
p2pll->min_frac_feedback_div = 0;
|
||||
p2pll->max_frac_feedback_div = 9;
|
||||
} else {
|
||||
p1pll->min_post_div = 1;
|
||||
p1pll->max_post_div = 16;
|
||||
p1pll->min_frac_feedback_div = 0;
|
||||
p1pll->max_frac_feedback_div = 0;
|
||||
p2pll->min_post_div = 1;
|
||||
p2pll->max_post_div = 12;
|
||||
p2pll->min_frac_feedback_div = 0;
|
||||
p2pll->max_frac_feedback_div = 0;
|
||||
}
|
||||
|
||||
p1pll->min_ref_div = 2;
|
||||
p1pll->max_ref_div = 0x3ff;
|
||||
p1pll->min_feedback_div = 4;
|
||||
p1pll->max_feedback_div = 0x7ff;
|
||||
p1pll->best_vco = 0;
|
||||
|
||||
p2pll->min_ref_div = 2;
|
||||
p2pll->max_ref_div = 0x3ff;
|
||||
p2pll->min_feedback_div = 4;
|
||||
p2pll->max_feedback_div = 0x7ff;
|
||||
p2pll->best_vco = 0;
|
||||
|
||||
/* system clock */
|
||||
spll->min_post_div = 1;
|
||||
spll->max_post_div = 1;
|
||||
spll->min_ref_div = 2;
|
||||
spll->max_ref_div = 0xff;
|
||||
spll->min_feedback_div = 4;
|
||||
spll->max_feedback_div = 0xff;
|
||||
spll->best_vco = 0;
|
||||
|
||||
/* memory clock */
|
||||
mpll->min_post_div = 1;
|
||||
mpll->max_post_div = 1;
|
||||
mpll->min_ref_div = 2;
|
||||
mpll->max_ref_div = 0xff;
|
||||
mpll->min_feedback_div = 4;
|
||||
mpll->max_feedback_div = 0xff;
|
||||
mpll->best_vco = 0;
|
||||
|
||||
}
|
||||
|
||||
/* 10 khz */
|
||||
static uint32_t calc_eng_mem_clock(struct radeon_device *rdev,
|
||||
uint32_t req_clock,
|
||||
int *fb_div, int *post_div)
|
||||
{
|
||||
struct radeon_pll *spll = &rdev->clock.spll;
|
||||
int ref_div = spll->reference_div;
|
||||
|
||||
if (!ref_div)
|
||||
ref_div =
|
||||
RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV) &
|
||||
RADEON_M_SPLL_REF_DIV_MASK;
|
||||
|
||||
if (req_clock < 15000) {
|
||||
*post_div = 8;
|
||||
req_clock *= 8;
|
||||
} else if (req_clock < 30000) {
|
||||
*post_div = 4;
|
||||
req_clock *= 4;
|
||||
} else if (req_clock < 60000) {
|
||||
*post_div = 2;
|
||||
req_clock *= 2;
|
||||
} else
|
||||
*post_div = 1;
|
||||
|
||||
req_clock *= ref_div;
|
||||
req_clock += spll->reference_freq;
|
||||
req_clock /= (2 * spll->reference_freq);
|
||||
|
||||
*fb_div = req_clock & 0xff;
|
||||
|
||||
req_clock = (req_clock & 0xffff) << 1;
|
||||
req_clock *= spll->reference_freq;
|
||||
req_clock /= ref_div;
|
||||
req_clock /= *post_div;
|
||||
|
||||
return req_clock;
|
||||
}
|
||||
|
||||
/* 10 khz */
|
||||
void radeon_legacy_set_engine_clock(struct radeon_device *rdev,
|
||||
uint32_t eng_clock)
|
||||
{
|
||||
uint32_t tmp;
|
||||
int fb_div, post_div;
|
||||
|
||||
/* XXX: wait for idle */
|
||||
|
||||
eng_clock = calc_eng_mem_clock(rdev, eng_clock, &fb_div, &post_div);
|
||||
|
||||
tmp = RREG32_PLL(RADEON_CLK_PIN_CNTL);
|
||||
tmp &= ~RADEON_DONT_USE_XTALIN;
|
||||
WREG32_PLL(RADEON_CLK_PIN_CNTL, tmp);
|
||||
|
||||
tmp = RREG32_PLL(RADEON_SCLK_CNTL);
|
||||
tmp &= ~RADEON_SCLK_SRC_SEL_MASK;
|
||||
WREG32_PLL(RADEON_SCLK_CNTL, tmp);
|
||||
|
||||
udelay(10);
|
||||
|
||||
tmp = RREG32_PLL(RADEON_SPLL_CNTL);
|
||||
tmp |= RADEON_SPLL_SLEEP;
|
||||
WREG32_PLL(RADEON_SPLL_CNTL, tmp);
|
||||
|
||||
udelay(2);
|
||||
|
||||
tmp = RREG32_PLL(RADEON_SPLL_CNTL);
|
||||
tmp |= RADEON_SPLL_RESET;
|
||||
WREG32_PLL(RADEON_SPLL_CNTL, tmp);
|
||||
|
||||
udelay(200);
|
||||
|
||||
tmp = RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV);
|
||||
tmp &= ~(RADEON_SPLL_FB_DIV_MASK << RADEON_SPLL_FB_DIV_SHIFT);
|
||||
tmp |= (fb_div & RADEON_SPLL_FB_DIV_MASK) << RADEON_SPLL_FB_DIV_SHIFT;
|
||||
WREG32_PLL(RADEON_M_SPLL_REF_FB_DIV, tmp);
|
||||
|
||||
/* XXX: verify on different asics */
|
||||
tmp = RREG32_PLL(RADEON_SPLL_CNTL);
|
||||
tmp &= ~RADEON_SPLL_PVG_MASK;
|
||||
if ((eng_clock * post_div) >= 90000)
|
||||
tmp |= (0x7 << RADEON_SPLL_PVG_SHIFT);
|
||||
else
|
||||
tmp |= (0x4 << RADEON_SPLL_PVG_SHIFT);
|
||||
WREG32_PLL(RADEON_SPLL_CNTL, tmp);
|
||||
|
||||
tmp = RREG32_PLL(RADEON_SPLL_CNTL);
|
||||
tmp &= ~RADEON_SPLL_SLEEP;
|
||||
WREG32_PLL(RADEON_SPLL_CNTL, tmp);
|
||||
|
||||
udelay(2);
|
||||
|
||||
tmp = RREG32_PLL(RADEON_SPLL_CNTL);
|
||||
tmp &= ~RADEON_SPLL_RESET;
|
||||
WREG32_PLL(RADEON_SPLL_CNTL, tmp);
|
||||
|
||||
udelay(200);
|
||||
|
||||
tmp = RREG32_PLL(RADEON_SCLK_CNTL);
|
||||
tmp &= ~RADEON_SCLK_SRC_SEL_MASK;
|
||||
switch (post_div) {
|
||||
case 1:
|
||||
default:
|
||||
tmp |= 1;
|
||||
break;
|
||||
case 2:
|
||||
tmp |= 2;
|
||||
break;
|
||||
case 4:
|
||||
tmp |= 3;
|
||||
break;
|
||||
case 8:
|
||||
tmp |= 4;
|
||||
break;
|
||||
}
|
||||
WREG32_PLL(RADEON_SCLK_CNTL, tmp);
|
||||
|
||||
udelay(20);
|
||||
|
||||
tmp = RREG32_PLL(RADEON_CLK_PIN_CNTL);
|
||||
tmp |= RADEON_DONT_USE_XTALIN;
|
||||
WREG32_PLL(RADEON_CLK_PIN_CNTL, tmp);
|
||||
|
||||
udelay(10);
|
||||
}
|
||||
|
||||
void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable)
|
||||
{
|
||||
uint32_t tmp;
|
||||
|
||||
if (enable) {
|
||||
if (rdev->flags & RADEON_SINGLE_CRTC) {
|
||||
tmp = RREG32_PLL(RADEON_SCLK_CNTL);
|
||||
if ((RREG32(RADEON_CONFIG_CNTL) &
|
||||
RADEON_CFG_ATI_REV_ID_MASK) >
|
||||
RADEON_CFG_ATI_REV_A13) {
|
||||
tmp &=
|
||||
~(RADEON_SCLK_FORCE_CP |
|
||||
RADEON_SCLK_FORCE_RB);
|
||||
}
|
||||
tmp &=
|
||||
~(RADEON_SCLK_FORCE_HDP | RADEON_SCLK_FORCE_DISP1 |
|
||||
RADEON_SCLK_FORCE_TOP | RADEON_SCLK_FORCE_SE |
|
||||
RADEON_SCLK_FORCE_IDCT | RADEON_SCLK_FORCE_RE |
|
||||
RADEON_SCLK_FORCE_PB | RADEON_SCLK_FORCE_TAM |
|
||||
RADEON_SCLK_FORCE_TDM);
|
||||
WREG32_PLL(RADEON_SCLK_CNTL, tmp);
|
||||
} else if (ASIC_IS_R300(rdev)) {
|
||||
if ((rdev->family == CHIP_RS400) ||
|
||||
(rdev->family == CHIP_RS480)) {
|
||||
tmp = RREG32_PLL(RADEON_SCLK_CNTL);
|
||||
tmp &=
|
||||
~(RADEON_SCLK_FORCE_DISP2 |
|
||||
RADEON_SCLK_FORCE_CP |
|
||||
RADEON_SCLK_FORCE_HDP |
|
||||
RADEON_SCLK_FORCE_DISP1 |
|
||||
RADEON_SCLK_FORCE_TOP |
|
||||
RADEON_SCLK_FORCE_E2 | R300_SCLK_FORCE_VAP
|
||||
| RADEON_SCLK_FORCE_IDCT |
|
||||
RADEON_SCLK_FORCE_VIP | R300_SCLK_FORCE_SR
|
||||
| R300_SCLK_FORCE_PX | R300_SCLK_FORCE_TX
|
||||
| R300_SCLK_FORCE_US |
|
||||
RADEON_SCLK_FORCE_TV_SCLK |
|
||||
R300_SCLK_FORCE_SU |
|
||||
RADEON_SCLK_FORCE_OV0);
|
||||
tmp |= RADEON_DYN_STOP_LAT_MASK;
|
||||
tmp |=
|
||||
RADEON_SCLK_FORCE_TOP |
|
||||
RADEON_SCLK_FORCE_VIP;
|
||||
WREG32_PLL(RADEON_SCLK_CNTL, tmp);
|
||||
|
||||
tmp = RREG32_PLL(RADEON_SCLK_MORE_CNTL);
|
||||
tmp &= ~RADEON_SCLK_MORE_FORCEON;
|
||||
tmp |= RADEON_SCLK_MORE_MAX_DYN_STOP_LAT;
|
||||
WREG32_PLL(RADEON_SCLK_MORE_CNTL, tmp);
|
||||
|
||||
tmp = RREG32_PLL(RADEON_VCLK_ECP_CNTL);
|
||||
tmp |= (RADEON_PIXCLK_ALWAYS_ONb |
|
||||
RADEON_PIXCLK_DAC_ALWAYS_ONb);
|
||||
WREG32_PLL(RADEON_VCLK_ECP_CNTL, tmp);
|
||||
|
||||
tmp = RREG32_PLL(RADEON_PIXCLKS_CNTL);
|
||||
tmp |= (RADEON_PIX2CLK_ALWAYS_ONb |
|
||||
RADEON_PIX2CLK_DAC_ALWAYS_ONb |
|
||||
RADEON_DISP_TVOUT_PIXCLK_TV_ALWAYS_ONb |
|
||||
R300_DVOCLK_ALWAYS_ONb |
|
||||
RADEON_PIXCLK_BLEND_ALWAYS_ONb |
|
||||
RADEON_PIXCLK_GV_ALWAYS_ONb |
|
||||
R300_PIXCLK_DVO_ALWAYS_ONb |
|
||||
RADEON_PIXCLK_LVDS_ALWAYS_ONb |
|
||||
RADEON_PIXCLK_TMDS_ALWAYS_ONb |
|
||||
R300_PIXCLK_TRANS_ALWAYS_ONb |
|
||||
R300_PIXCLK_TVO_ALWAYS_ONb |
|
||||
R300_P2G2CLK_ALWAYS_ONb |
|
||||
R300_P2G2CLK_ALWAYS_ONb);
|
||||
WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp);
|
||||
} else if (rdev->family >= CHIP_RV350) {
|
||||
tmp = RREG32_PLL(R300_SCLK_CNTL2);
|
||||
tmp &= ~(R300_SCLK_FORCE_TCL |
|
||||
R300_SCLK_FORCE_GA |
|
||||
R300_SCLK_FORCE_CBA);
|
||||
tmp |= (R300_SCLK_TCL_MAX_DYN_STOP_LAT |
|
||||
R300_SCLK_GA_MAX_DYN_STOP_LAT |
|
||||
R300_SCLK_CBA_MAX_DYN_STOP_LAT);
|
||||
WREG32_PLL(R300_SCLK_CNTL2, tmp);
|
||||
|
||||
tmp = RREG32_PLL(RADEON_SCLK_CNTL);
|
||||
tmp &=
|
||||
~(RADEON_SCLK_FORCE_DISP2 |
|
||||
RADEON_SCLK_FORCE_CP |
|
||||
RADEON_SCLK_FORCE_HDP |
|
||||
RADEON_SCLK_FORCE_DISP1 |
|
||||
RADEON_SCLK_FORCE_TOP |
|
||||
RADEON_SCLK_FORCE_E2 | R300_SCLK_FORCE_VAP
|
||||
| RADEON_SCLK_FORCE_IDCT |
|
||||
RADEON_SCLK_FORCE_VIP | R300_SCLK_FORCE_SR
|
||||
| R300_SCLK_FORCE_PX | R300_SCLK_FORCE_TX
|
||||
| R300_SCLK_FORCE_US |
|
||||
RADEON_SCLK_FORCE_TV_SCLK |
|
||||
R300_SCLK_FORCE_SU |
|
||||
RADEON_SCLK_FORCE_OV0);
|
||||
tmp |= RADEON_DYN_STOP_LAT_MASK;
|
||||
WREG32_PLL(RADEON_SCLK_CNTL, tmp);
|
||||
|
||||
tmp = RREG32_PLL(RADEON_SCLK_MORE_CNTL);
|
||||
tmp &= ~RADEON_SCLK_MORE_FORCEON;
|
||||
tmp |= RADEON_SCLK_MORE_MAX_DYN_STOP_LAT;
|
||||
WREG32_PLL(RADEON_SCLK_MORE_CNTL, tmp);
|
||||
|
||||
tmp = RREG32_PLL(RADEON_VCLK_ECP_CNTL);
|
||||
tmp |= (RADEON_PIXCLK_ALWAYS_ONb |
|
||||
RADEON_PIXCLK_DAC_ALWAYS_ONb);
|
||||
WREG32_PLL(RADEON_VCLK_ECP_CNTL, tmp);
|
||||
|
||||
tmp = RREG32_PLL(RADEON_PIXCLKS_CNTL);
|
||||
tmp |= (RADEON_PIX2CLK_ALWAYS_ONb |
|
||||
RADEON_PIX2CLK_DAC_ALWAYS_ONb |
|
||||
RADEON_DISP_TVOUT_PIXCLK_TV_ALWAYS_ONb |
|
||||
R300_DVOCLK_ALWAYS_ONb |
|
||||
RADEON_PIXCLK_BLEND_ALWAYS_ONb |
|
||||
RADEON_PIXCLK_GV_ALWAYS_ONb |
|
||||
R300_PIXCLK_DVO_ALWAYS_ONb |
|
||||
RADEON_PIXCLK_LVDS_ALWAYS_ONb |
|
||||
RADEON_PIXCLK_TMDS_ALWAYS_ONb |
|
||||
R300_PIXCLK_TRANS_ALWAYS_ONb |
|
||||
R300_PIXCLK_TVO_ALWAYS_ONb |
|
||||
R300_P2G2CLK_ALWAYS_ONb |
|
||||
R300_P2G2CLK_ALWAYS_ONb);
|
||||
WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp);
|
||||
|
||||
tmp = RREG32_PLL(RADEON_MCLK_MISC);
|
||||
tmp |= (RADEON_MC_MCLK_DYN_ENABLE |
|
||||
RADEON_IO_MCLK_DYN_ENABLE);
|
||||
WREG32_PLL(RADEON_MCLK_MISC, tmp);
|
||||
|
||||
tmp = RREG32_PLL(RADEON_MCLK_CNTL);
|
||||
tmp |= (RADEON_FORCEON_MCLKA |
|
||||
RADEON_FORCEON_MCLKB);
|
||||
|
||||
tmp &= ~(RADEON_FORCEON_YCLKA |
|
||||
RADEON_FORCEON_YCLKB |
|
||||
RADEON_FORCEON_MC);
|
||||
|
||||
/* Some releases of vbios have set DISABLE_MC_MCLKA
|
||||
and DISABLE_MC_MCLKB bits in the vbios table. Setting these
|
||||
bits will cause H/W hang when reading video memory with dynamic clocking
|
||||
enabled. */
|
||||
if ((tmp & R300_DISABLE_MC_MCLKA) &&
|
||||
(tmp & R300_DISABLE_MC_MCLKB)) {
|
||||
/* If both bits are set, then check the active channels */
|
||||
tmp = RREG32_PLL(RADEON_MCLK_CNTL);
|
||||
if (rdev->mc.vram_width == 64) {
|
||||
if (RREG32(RADEON_MEM_CNTL) &
|
||||
R300_MEM_USE_CD_CH_ONLY)
|
||||
tmp &=
|
||||
~R300_DISABLE_MC_MCLKB;
|
||||
else
|
||||
tmp &=
|
||||
~R300_DISABLE_MC_MCLKA;
|
||||
} else {
|
||||
tmp &= ~(R300_DISABLE_MC_MCLKA |
|
||||
R300_DISABLE_MC_MCLKB);
|
||||
}
|
||||
}
|
||||
|
||||
WREG32_PLL(RADEON_MCLK_CNTL, tmp);
|
||||
} else {
|
||||
tmp = RREG32_PLL(RADEON_SCLK_CNTL);
|
||||
tmp &= ~(R300_SCLK_FORCE_VAP);
|
||||
tmp |= RADEON_SCLK_FORCE_CP;
|
||||
WREG32_PLL(RADEON_SCLK_CNTL, tmp);
|
||||
udelay(15000);
|
||||
|
||||
tmp = RREG32_PLL(R300_SCLK_CNTL2);
|
||||
tmp &= ~(R300_SCLK_FORCE_TCL |
|
||||
R300_SCLK_FORCE_GA |
|
||||
R300_SCLK_FORCE_CBA);
|
||||
WREG32_PLL(R300_SCLK_CNTL2, tmp);
|
||||
}
|
||||
} else {
|
||||
tmp = RREG32_PLL(RADEON_CLK_PWRMGT_CNTL);
|
||||
|
||||
tmp &= ~(RADEON_ACTIVE_HILO_LAT_MASK |
|
||||
RADEON_DISP_DYN_STOP_LAT_MASK |
|
||||
RADEON_DYN_STOP_MODE_MASK);
|
||||
|
||||
tmp |= (RADEON_ENGIN_DYNCLK_MODE |
|
||||
(0x01 << RADEON_ACTIVE_HILO_LAT_SHIFT));
|
||||
WREG32_PLL(RADEON_CLK_PWRMGT_CNTL, tmp);
|
||||
udelay(15000);
|
||||
|
||||
tmp = RREG32_PLL(RADEON_CLK_PIN_CNTL);
|
||||
tmp |= RADEON_SCLK_DYN_START_CNTL;
|
||||
WREG32_PLL(RADEON_CLK_PIN_CNTL, tmp);
|
||||
udelay(15000);
|
||||
|
||||
/* When DRI is enabled, setting DYN_STOP_LAT to zero can cause some R200
|
||||
to lockup randomly, leave them as set by BIOS.
|
||||
*/
|
||||
tmp = RREG32_PLL(RADEON_SCLK_CNTL);
|
||||
/*tmp &= RADEON_SCLK_SRC_SEL_MASK; */
|
||||
tmp &= ~RADEON_SCLK_FORCEON_MASK;
|
||||
|
||||
/*RAGE_6::A11 A12 A12N1 A13, RV250::A11 A12, R300 */
|
||||
if (((rdev->family == CHIP_RV250) &&
|
||||
((RREG32(RADEON_CONFIG_CNTL) &
|
||||
RADEON_CFG_ATI_REV_ID_MASK) <
|
||||
RADEON_CFG_ATI_REV_A13))
|
||||
|| ((rdev->family == CHIP_RV100)
|
||||
&&
|
||||
((RREG32(RADEON_CONFIG_CNTL) &
|
||||
RADEON_CFG_ATI_REV_ID_MASK) <=
|
||||
RADEON_CFG_ATI_REV_A13))) {
|
||||
tmp |= RADEON_SCLK_FORCE_CP;
|
||||
tmp |= RADEON_SCLK_FORCE_VIP;
|
||||
}
|
||||
|
||||
WREG32_PLL(RADEON_SCLK_CNTL, tmp);
|
||||
|
||||
if ((rdev->family == CHIP_RV200) ||
|
||||
(rdev->family == CHIP_RV250) ||
|
||||
(rdev->family == CHIP_RV280)) {
|
||||
tmp = RREG32_PLL(RADEON_SCLK_MORE_CNTL);
|
||||
tmp &= ~RADEON_SCLK_MORE_FORCEON;
|
||||
|
||||
/* RV200::A11 A12 RV250::A11 A12 */
|
||||
if (((rdev->family == CHIP_RV200) ||
|
||||
(rdev->family == CHIP_RV250)) &&
|
||||
((RREG32(RADEON_CONFIG_CNTL) &
|
||||
RADEON_CFG_ATI_REV_ID_MASK) <
|
||||
RADEON_CFG_ATI_REV_A13)) {
|
||||
tmp |= RADEON_SCLK_MORE_FORCEON;
|
||||
}
|
||||
WREG32_PLL(RADEON_SCLK_MORE_CNTL, tmp);
|
||||
udelay(15000);
|
||||
}
|
||||
|
||||
/* RV200::A11 A12, RV250::A11 A12 */
|
||||
if (((rdev->family == CHIP_RV200) ||
|
||||
(rdev->family == CHIP_RV250)) &&
|
||||
((RREG32(RADEON_CONFIG_CNTL) &
|
||||
RADEON_CFG_ATI_REV_ID_MASK) <
|
||||
RADEON_CFG_ATI_REV_A13)) {
|
||||
tmp = RREG32_PLL(RADEON_PLL_PWRMGT_CNTL);
|
||||
tmp |= RADEON_TCL_BYPASS_DISABLE;
|
||||
WREG32_PLL(RADEON_PLL_PWRMGT_CNTL, tmp);
|
||||
}
|
||||
udelay(15000);
|
||||
|
||||
/*enable dynamic mode for display clocks (PIXCLK and PIX2CLK) */
|
||||
tmp = RREG32_PLL(RADEON_PIXCLKS_CNTL);
|
||||
tmp |= (RADEON_PIX2CLK_ALWAYS_ONb |
|
||||
RADEON_PIX2CLK_DAC_ALWAYS_ONb |
|
||||
RADEON_PIXCLK_BLEND_ALWAYS_ONb |
|
||||
RADEON_PIXCLK_GV_ALWAYS_ONb |
|
||||
RADEON_PIXCLK_DIG_TMDS_ALWAYS_ONb |
|
||||
RADEON_PIXCLK_LVDS_ALWAYS_ONb |
|
||||
RADEON_PIXCLK_TMDS_ALWAYS_ONb);
|
||||
|
||||
WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp);
|
||||
udelay(15000);
|
||||
|
||||
tmp = RREG32_PLL(RADEON_VCLK_ECP_CNTL);
|
||||
tmp |= (RADEON_PIXCLK_ALWAYS_ONb |
|
||||
RADEON_PIXCLK_DAC_ALWAYS_ONb);
|
||||
|
||||
WREG32_PLL(RADEON_VCLK_ECP_CNTL, tmp);
|
||||
udelay(15000);
|
||||
}
|
||||
} else {
|
||||
/* Turn everything OFF (ForceON to everything) */
|
||||
if (rdev->flags & RADEON_SINGLE_CRTC) {
|
||||
tmp = RREG32_PLL(RADEON_SCLK_CNTL);
|
||||
tmp |= (RADEON_SCLK_FORCE_CP | RADEON_SCLK_FORCE_HDP |
|
||||
RADEON_SCLK_FORCE_DISP1 | RADEON_SCLK_FORCE_TOP
|
||||
| RADEON_SCLK_FORCE_E2 | RADEON_SCLK_FORCE_SE |
|
||||
RADEON_SCLK_FORCE_IDCT | RADEON_SCLK_FORCE_VIP |
|
||||
RADEON_SCLK_FORCE_RE | RADEON_SCLK_FORCE_PB |
|
||||
RADEON_SCLK_FORCE_TAM | RADEON_SCLK_FORCE_TDM |
|
||||
RADEON_SCLK_FORCE_RB);
|
||||
WREG32_PLL(RADEON_SCLK_CNTL, tmp);
|
||||
} else if ((rdev->family == CHIP_RS400) ||
|
||||
(rdev->family == CHIP_RS480)) {
|
||||
tmp = RREG32_PLL(RADEON_SCLK_CNTL);
|
||||
tmp |= (RADEON_SCLK_FORCE_DISP2 | RADEON_SCLK_FORCE_CP |
|
||||
RADEON_SCLK_FORCE_HDP | RADEON_SCLK_FORCE_DISP1
|
||||
| RADEON_SCLK_FORCE_TOP | RADEON_SCLK_FORCE_E2 |
|
||||
R300_SCLK_FORCE_VAP | RADEON_SCLK_FORCE_IDCT |
|
||||
RADEON_SCLK_FORCE_VIP | R300_SCLK_FORCE_SR |
|
||||
R300_SCLK_FORCE_PX | R300_SCLK_FORCE_TX |
|
||||
R300_SCLK_FORCE_US | RADEON_SCLK_FORCE_TV_SCLK |
|
||||
R300_SCLK_FORCE_SU | RADEON_SCLK_FORCE_OV0);
|
||||
WREG32_PLL(RADEON_SCLK_CNTL, tmp);
|
||||
|
||||
tmp = RREG32_PLL(RADEON_SCLK_MORE_CNTL);
|
||||
tmp |= RADEON_SCLK_MORE_FORCEON;
|
||||
WREG32_PLL(RADEON_SCLK_MORE_CNTL, tmp);
|
||||
|
||||
tmp = RREG32_PLL(RADEON_VCLK_ECP_CNTL);
|
||||
tmp &= ~(RADEON_PIXCLK_ALWAYS_ONb |
|
||||
RADEON_PIXCLK_DAC_ALWAYS_ONb |
|
||||
R300_DISP_DAC_PIXCLK_DAC_BLANK_OFF);
|
||||
WREG32_PLL(RADEON_VCLK_ECP_CNTL, tmp);
|
||||
|
||||
tmp = RREG32_PLL(RADEON_PIXCLKS_CNTL);
|
||||
tmp &= ~(RADEON_PIX2CLK_ALWAYS_ONb |
|
||||
RADEON_PIX2CLK_DAC_ALWAYS_ONb |
|
||||
RADEON_DISP_TVOUT_PIXCLK_TV_ALWAYS_ONb |
|
||||
R300_DVOCLK_ALWAYS_ONb |
|
||||
RADEON_PIXCLK_BLEND_ALWAYS_ONb |
|
||||
RADEON_PIXCLK_GV_ALWAYS_ONb |
|
||||
R300_PIXCLK_DVO_ALWAYS_ONb |
|
||||
RADEON_PIXCLK_LVDS_ALWAYS_ONb |
|
||||
RADEON_PIXCLK_TMDS_ALWAYS_ONb |
|
||||
R300_PIXCLK_TRANS_ALWAYS_ONb |
|
||||
R300_PIXCLK_TVO_ALWAYS_ONb |
|
||||
R300_P2G2CLK_ALWAYS_ONb |
|
||||
R300_P2G2CLK_ALWAYS_ONb |
|
||||
R300_DISP_DAC_PIXCLK_DAC2_BLANK_OFF);
|
||||
WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp);
|
||||
} else if (rdev->family >= CHIP_RV350) {
|
||||
/* for RV350/M10, no delays are required. */
|
||||
tmp = RREG32_PLL(R300_SCLK_CNTL2);
|
||||
tmp |= (R300_SCLK_FORCE_TCL |
|
||||
R300_SCLK_FORCE_GA | R300_SCLK_FORCE_CBA);
|
||||
WREG32_PLL(R300_SCLK_CNTL2, tmp);
|
||||
|
||||
tmp = RREG32_PLL(RADEON_SCLK_CNTL);
|
||||
tmp |= (RADEON_SCLK_FORCE_DISP2 | RADEON_SCLK_FORCE_CP |
|
||||
RADEON_SCLK_FORCE_HDP | RADEON_SCLK_FORCE_DISP1
|
||||
| RADEON_SCLK_FORCE_TOP | RADEON_SCLK_FORCE_E2 |
|
||||
R300_SCLK_FORCE_VAP | RADEON_SCLK_FORCE_IDCT |
|
||||
RADEON_SCLK_FORCE_VIP | R300_SCLK_FORCE_SR |
|
||||
R300_SCLK_FORCE_PX | R300_SCLK_FORCE_TX |
|
||||
R300_SCLK_FORCE_US | RADEON_SCLK_FORCE_TV_SCLK |
|
||||
R300_SCLK_FORCE_SU | RADEON_SCLK_FORCE_OV0);
|
||||
WREG32_PLL(RADEON_SCLK_CNTL, tmp);
|
||||
|
||||
tmp = RREG32_PLL(RADEON_SCLK_MORE_CNTL);
|
||||
tmp |= RADEON_SCLK_MORE_FORCEON;
|
||||
WREG32_PLL(RADEON_SCLK_MORE_CNTL, tmp);
|
||||
|
||||
tmp = RREG32_PLL(RADEON_MCLK_CNTL);
|
||||
tmp |= (RADEON_FORCEON_MCLKA |
|
||||
RADEON_FORCEON_MCLKB |
|
||||
RADEON_FORCEON_YCLKA |
|
||||
RADEON_FORCEON_YCLKB | RADEON_FORCEON_MC);
|
||||
WREG32_PLL(RADEON_MCLK_CNTL, tmp);
|
||||
|
||||
tmp = RREG32_PLL(RADEON_VCLK_ECP_CNTL);
|
||||
tmp &= ~(RADEON_PIXCLK_ALWAYS_ONb |
|
||||
RADEON_PIXCLK_DAC_ALWAYS_ONb |
|
||||
R300_DISP_DAC_PIXCLK_DAC_BLANK_OFF);
|
||||
WREG32_PLL(RADEON_VCLK_ECP_CNTL, tmp);
|
||||
|
||||
tmp = RREG32_PLL(RADEON_PIXCLKS_CNTL);
|
||||
tmp &= ~(RADEON_PIX2CLK_ALWAYS_ONb |
|
||||
RADEON_PIX2CLK_DAC_ALWAYS_ONb |
|
||||
RADEON_DISP_TVOUT_PIXCLK_TV_ALWAYS_ONb |
|
||||
R300_DVOCLK_ALWAYS_ONb |
|
||||
RADEON_PIXCLK_BLEND_ALWAYS_ONb |
|
||||
RADEON_PIXCLK_GV_ALWAYS_ONb |
|
||||
R300_PIXCLK_DVO_ALWAYS_ONb |
|
||||
RADEON_PIXCLK_LVDS_ALWAYS_ONb |
|
||||
RADEON_PIXCLK_TMDS_ALWAYS_ONb |
|
||||
R300_PIXCLK_TRANS_ALWAYS_ONb |
|
||||
R300_PIXCLK_TVO_ALWAYS_ONb |
|
||||
R300_P2G2CLK_ALWAYS_ONb |
|
||||
R300_P2G2CLK_ALWAYS_ONb |
|
||||
R300_DISP_DAC_PIXCLK_DAC2_BLANK_OFF);
|
||||
WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp);
|
||||
} else {
|
||||
tmp = RREG32_PLL(RADEON_SCLK_CNTL);
|
||||
tmp |= (RADEON_SCLK_FORCE_CP | RADEON_SCLK_FORCE_E2);
|
||||
tmp |= RADEON_SCLK_FORCE_SE;
|
||||
|
||||
if (rdev->flags & RADEON_SINGLE_CRTC) {
|
||||
tmp |= (RADEON_SCLK_FORCE_RB |
|
||||
RADEON_SCLK_FORCE_TDM |
|
||||
RADEON_SCLK_FORCE_TAM |
|
||||
RADEON_SCLK_FORCE_PB |
|
||||
RADEON_SCLK_FORCE_RE |
|
||||
RADEON_SCLK_FORCE_VIP |
|
||||
RADEON_SCLK_FORCE_IDCT |
|
||||
RADEON_SCLK_FORCE_TOP |
|
||||
RADEON_SCLK_FORCE_DISP1 |
|
||||
RADEON_SCLK_FORCE_DISP2 |
|
||||
RADEON_SCLK_FORCE_HDP);
|
||||
} else if ((rdev->family == CHIP_R300) ||
|
||||
(rdev->family == CHIP_R350)) {
|
||||
tmp |= (RADEON_SCLK_FORCE_HDP |
|
||||
RADEON_SCLK_FORCE_DISP1 |
|
||||
RADEON_SCLK_FORCE_DISP2 |
|
||||
RADEON_SCLK_FORCE_TOP |
|
||||
RADEON_SCLK_FORCE_IDCT |
|
||||
RADEON_SCLK_FORCE_VIP);
|
||||
}
|
||||
WREG32_PLL(RADEON_SCLK_CNTL, tmp);
|
||||
|
||||
udelay(16000);
|
||||
|
||||
if ((rdev->family == CHIP_R300) ||
|
||||
(rdev->family == CHIP_R350)) {
|
||||
tmp = RREG32_PLL(R300_SCLK_CNTL2);
|
||||
tmp |= (R300_SCLK_FORCE_TCL |
|
||||
R300_SCLK_FORCE_GA |
|
||||
R300_SCLK_FORCE_CBA);
|
||||
WREG32_PLL(R300_SCLK_CNTL2, tmp);
|
||||
udelay(16000);
|
||||
}
|
||||
|
||||
if (rdev->flags & RADEON_IS_IGP) {
|
||||
tmp = RREG32_PLL(RADEON_MCLK_CNTL);
|
||||
tmp &= ~(RADEON_FORCEON_MCLKA |
|
||||
RADEON_FORCEON_YCLKA);
|
||||
WREG32_PLL(RADEON_MCLK_CNTL, tmp);
|
||||
udelay(16000);
|
||||
}
|
||||
|
||||
if ((rdev->family == CHIP_RV200) ||
|
||||
(rdev->family == CHIP_RV250) ||
|
||||
(rdev->family == CHIP_RV280)) {
|
||||
tmp = RREG32_PLL(RADEON_SCLK_MORE_CNTL);
|
||||
tmp |= RADEON_SCLK_MORE_FORCEON;
|
||||
WREG32_PLL(RADEON_SCLK_MORE_CNTL, tmp);
|
||||
udelay(16000);
|
||||
}
|
||||
|
||||
tmp = RREG32_PLL(RADEON_PIXCLKS_CNTL);
|
||||
tmp &= ~(RADEON_PIX2CLK_ALWAYS_ONb |
|
||||
RADEON_PIX2CLK_DAC_ALWAYS_ONb |
|
||||
RADEON_PIXCLK_BLEND_ALWAYS_ONb |
|
||||
RADEON_PIXCLK_GV_ALWAYS_ONb |
|
||||
RADEON_PIXCLK_DIG_TMDS_ALWAYS_ONb |
|
||||
RADEON_PIXCLK_LVDS_ALWAYS_ONb |
|
||||
RADEON_PIXCLK_TMDS_ALWAYS_ONb);
|
||||
|
||||
WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp);
|
||||
udelay(16000);
|
||||
|
||||
tmp = RREG32_PLL(RADEON_VCLK_ECP_CNTL);
|
||||
tmp &= ~(RADEON_PIXCLK_ALWAYS_ONb |
|
||||
RADEON_PIXCLK_DAC_ALWAYS_ONb);
|
||||
WREG32_PLL(RADEON_VCLK_ECP_CNTL, tmp);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void radeon_apply_clock_quirks(struct radeon_device *rdev)
|
||||
{
|
||||
uint32_t tmp;
|
||||
|
||||
/* XXX make sure engine is idle */
|
||||
|
||||
if (rdev->family < CHIP_RS600) {
|
||||
tmp = RREG32_PLL(RADEON_SCLK_CNTL);
|
||||
if (ASIC_IS_R300(rdev) || ASIC_IS_RV100(rdev))
|
||||
tmp |= RADEON_SCLK_FORCE_CP | RADEON_SCLK_FORCE_VIP;
|
||||
if ((rdev->family == CHIP_RV250)
|
||||
|| (rdev->family == CHIP_RV280))
|
||||
tmp |=
|
||||
RADEON_SCLK_FORCE_DISP1 | RADEON_SCLK_FORCE_DISP2;
|
||||
if ((rdev->family == CHIP_RV350)
|
||||
|| (rdev->family == CHIP_RV380))
|
||||
tmp |= R300_SCLK_FORCE_VAP;
|
||||
if (rdev->family == CHIP_R420)
|
||||
tmp |= R300_SCLK_FORCE_PX | R300_SCLK_FORCE_TX;
|
||||
WREG32_PLL(RADEON_SCLK_CNTL, tmp);
|
||||
} else if (rdev->family < CHIP_R600) {
|
||||
tmp = RREG32_PLL(AVIVO_CP_DYN_CNTL);
|
||||
tmp |= AVIVO_CP_FORCEON;
|
||||
WREG32_PLL(AVIVO_CP_DYN_CNTL, tmp);
|
||||
|
||||
tmp = RREG32_PLL(AVIVO_E2_DYN_CNTL);
|
||||
tmp |= AVIVO_E2_FORCEON;
|
||||
WREG32_PLL(AVIVO_E2_DYN_CNTL, tmp);
|
||||
|
||||
tmp = RREG32_PLL(AVIVO_IDCT_DYN_CNTL);
|
||||
tmp |= AVIVO_IDCT_FORCEON;
|
||||
WREG32_PLL(AVIVO_IDCT_DYN_CNTL, tmp);
|
||||
}
|
||||
}
|
||||
|
||||
int radeon_static_clocks_init(struct drm_device *dev)
|
||||
{
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
|
||||
/* XXX make sure engine is idle */
|
||||
|
||||
if (radeon_dynclks != -1) {
|
||||
if (radeon_dynclks)
|
||||
radeon_set_clock_gating(rdev, 1);
|
||||
}
|
||||
radeon_apply_clock_quirks(rdev);
|
||||
return 0;
|
||||
}
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,603 @@
|
|||
/*
|
||||
* Copyright 2007-8 Advanced Micro Devices, Inc.
|
||||
* Copyright 2008 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Dave Airlie
|
||||
* Alex Deucher
|
||||
*/
|
||||
#include "drmP.h"
|
||||
#include "drm_edid.h"
|
||||
#include "drm_crtc_helper.h"
|
||||
#include "radeon_drm.h"
|
||||
#include "radeon.h"
|
||||
|
||||
extern void
|
||||
radeon_combios_connected_scratch_regs(struct drm_connector *connector,
|
||||
struct drm_encoder *encoder,
|
||||
bool connected);
|
||||
extern void
|
||||
radeon_atombios_connected_scratch_regs(struct drm_connector *connector,
|
||||
struct drm_encoder *encoder,
|
||||
bool connected);
|
||||
|
||||
static void
|
||||
radeon_connector_update_scratch_regs(struct drm_connector *connector, enum drm_connector_status status)
|
||||
{
|
||||
struct drm_device *dev = connector->dev;
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
struct drm_encoder *best_encoder = NULL;
|
||||
struct drm_encoder *encoder = NULL;
|
||||
struct drm_connector_helper_funcs *connector_funcs = connector->helper_private;
|
||||
struct drm_mode_object *obj;
|
||||
bool connected;
|
||||
int i;
|
||||
|
||||
best_encoder = connector_funcs->best_encoder(connector);
|
||||
|
||||
for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
|
||||
if (connector->encoder_ids[i] == 0)
|
||||
break;
|
||||
|
||||
obj = drm_mode_object_find(connector->dev,
|
||||
connector->encoder_ids[i],
|
||||
DRM_MODE_OBJECT_ENCODER);
|
||||
if (!obj)
|
||||
continue;
|
||||
|
||||
encoder = obj_to_encoder(obj);
|
||||
|
||||
if ((encoder == best_encoder) && (status == connector_status_connected))
|
||||
connected = true;
|
||||
else
|
||||
connected = false;
|
||||
|
||||
if (rdev->is_atom_bios)
|
||||
radeon_atombios_connected_scratch_regs(connector, encoder, connected);
|
||||
else
|
||||
radeon_combios_connected_scratch_regs(connector, encoder, connected);
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
struct drm_encoder *radeon_best_single_encoder(struct drm_connector *connector)
|
||||
{
|
||||
int enc_id = connector->encoder_ids[0];
|
||||
struct drm_mode_object *obj;
|
||||
struct drm_encoder *encoder;
|
||||
|
||||
/* pick the encoder ids */
|
||||
if (enc_id) {
|
||||
obj = drm_mode_object_find(connector->dev, enc_id, DRM_MODE_OBJECT_ENCODER);
|
||||
if (!obj)
|
||||
return NULL;
|
||||
encoder = obj_to_encoder(obj);
|
||||
return encoder;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct drm_display_mode *radeon_fp_native_mode(struct drm_encoder *encoder)
|
||||
{
|
||||
struct drm_device *dev = encoder->dev;
|
||||
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
|
||||
struct drm_display_mode *mode = NULL;
|
||||
struct radeon_native_mode *native_mode = &radeon_encoder->native_mode;
|
||||
|
||||
if (native_mode->panel_xres != 0 &&
|
||||
native_mode->panel_yres != 0 &&
|
||||
native_mode->dotclock != 0) {
|
||||
mode = drm_mode_create(dev);
|
||||
|
||||
mode->hdisplay = native_mode->panel_xres;
|
||||
mode->vdisplay = native_mode->panel_yres;
|
||||
|
||||
mode->htotal = mode->hdisplay + native_mode->hblank;
|
||||
mode->hsync_start = mode->hdisplay + native_mode->hoverplus;
|
||||
mode->hsync_end = mode->hsync_start + native_mode->hsync_width;
|
||||
mode->vtotal = mode->vdisplay + native_mode->vblank;
|
||||
mode->vsync_start = mode->vdisplay + native_mode->voverplus;
|
||||
mode->vsync_end = mode->vsync_start + native_mode->vsync_width;
|
||||
mode->clock = native_mode->dotclock;
|
||||
mode->flags = 0;
|
||||
|
||||
mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER;
|
||||
drm_mode_set_name(mode);
|
||||
|
||||
DRM_DEBUG("Adding native panel mode %s\n", mode->name);
|
||||
}
|
||||
return mode;
|
||||
}
|
||||
|
||||
int radeon_connector_set_property(struct drm_connector *connector, struct drm_property *property,
|
||||
uint64_t val)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static int radeon_lvds_get_modes(struct drm_connector *connector)
|
||||
{
|
||||
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
|
||||
struct drm_encoder *encoder;
|
||||
int ret = 0;
|
||||
struct drm_display_mode *mode;
|
||||
|
||||
if (radeon_connector->ddc_bus) {
|
||||
ret = radeon_ddc_get_modes(radeon_connector);
|
||||
if (ret > 0) {
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
encoder = radeon_best_single_encoder(connector);
|
||||
if (!encoder)
|
||||
return 0;
|
||||
|
||||
/* we have no EDID modes */
|
||||
mode = radeon_fp_native_mode(encoder);
|
||||
if (mode) {
|
||||
ret = 1;
|
||||
drm_mode_probed_add(connector, mode);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int radeon_lvds_mode_valid(struct drm_connector *connector,
|
||||
struct drm_display_mode *mode)
|
||||
{
|
||||
return MODE_OK;
|
||||
}
|
||||
|
||||
static enum drm_connector_status radeon_lvds_detect(struct drm_connector *connector)
|
||||
{
|
||||
enum drm_connector_status ret = connector_status_connected;
|
||||
/* check acpi lid status ??? */
|
||||
radeon_connector_update_scratch_regs(connector, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void radeon_connector_destroy(struct drm_connector *connector)
|
||||
{
|
||||
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
|
||||
|
||||
if (radeon_connector->ddc_bus)
|
||||
radeon_i2c_destroy(radeon_connector->ddc_bus);
|
||||
kfree(radeon_connector->con_priv);
|
||||
drm_sysfs_connector_remove(connector);
|
||||
drm_connector_cleanup(connector);
|
||||
kfree(connector);
|
||||
}
|
||||
|
||||
struct drm_connector_helper_funcs radeon_lvds_connector_helper_funcs = {
|
||||
.get_modes = radeon_lvds_get_modes,
|
||||
.mode_valid = radeon_lvds_mode_valid,
|
||||
.best_encoder = radeon_best_single_encoder,
|
||||
};
|
||||
|
||||
struct drm_connector_funcs radeon_lvds_connector_funcs = {
|
||||
.dpms = drm_helper_connector_dpms,
|
||||
.detect = radeon_lvds_detect,
|
||||
.fill_modes = drm_helper_probe_single_connector_modes,
|
||||
.destroy = radeon_connector_destroy,
|
||||
.set_property = radeon_connector_set_property,
|
||||
};
|
||||
|
||||
static int radeon_vga_get_modes(struct drm_connector *connector)
|
||||
{
|
||||
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
|
||||
int ret;
|
||||
|
||||
ret = radeon_ddc_get_modes(radeon_connector);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int radeon_vga_mode_valid(struct drm_connector *connector,
|
||||
struct drm_display_mode *mode)
|
||||
{
|
||||
|
||||
return MODE_OK;
|
||||
}
|
||||
|
||||
static enum drm_connector_status radeon_vga_detect(struct drm_connector *connector)
|
||||
{
|
||||
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
|
||||
struct drm_encoder *encoder;
|
||||
struct drm_encoder_helper_funcs *encoder_funcs;
|
||||
bool dret;
|
||||
enum drm_connector_status ret = connector_status_disconnected;
|
||||
|
||||
radeon_i2c_do_lock(radeon_connector, 1);
|
||||
dret = radeon_ddc_probe(radeon_connector);
|
||||
radeon_i2c_do_lock(radeon_connector, 0);
|
||||
if (dret)
|
||||
ret = connector_status_connected;
|
||||
else {
|
||||
/* if EDID fails to a load detect */
|
||||
encoder = radeon_best_single_encoder(connector);
|
||||
if (!encoder)
|
||||
ret = connector_status_disconnected;
|
||||
else {
|
||||
encoder_funcs = encoder->helper_private;
|
||||
ret = encoder_funcs->detect(encoder, connector);
|
||||
}
|
||||
}
|
||||
|
||||
radeon_connector_update_scratch_regs(connector, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
struct drm_connector_helper_funcs radeon_vga_connector_helper_funcs = {
|
||||
.get_modes = radeon_vga_get_modes,
|
||||
.mode_valid = radeon_vga_mode_valid,
|
||||
.best_encoder = radeon_best_single_encoder,
|
||||
};
|
||||
|
||||
struct drm_connector_funcs radeon_vga_connector_funcs = {
|
||||
.dpms = drm_helper_connector_dpms,
|
||||
.detect = radeon_vga_detect,
|
||||
.fill_modes = drm_helper_probe_single_connector_modes,
|
||||
.destroy = radeon_connector_destroy,
|
||||
.set_property = radeon_connector_set_property,
|
||||
};
|
||||
|
||||
static int radeon_dvi_get_modes(struct drm_connector *connector)
|
||||
{
|
||||
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
|
||||
int ret;
|
||||
|
||||
ret = radeon_ddc_get_modes(radeon_connector);
|
||||
/* reset scratch regs here since radeon_dvi_detect doesn't check digital bit */
|
||||
radeon_connector_update_scratch_regs(connector, connector_status_connected);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connector)
|
||||
{
|
||||
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
|
||||
struct drm_encoder *encoder;
|
||||
struct drm_encoder_helper_funcs *encoder_funcs;
|
||||
struct drm_mode_object *obj;
|
||||
int i;
|
||||
enum drm_connector_status ret = connector_status_disconnected;
|
||||
bool dret;
|
||||
|
||||
radeon_i2c_do_lock(radeon_connector, 1);
|
||||
dret = radeon_ddc_probe(radeon_connector);
|
||||
radeon_i2c_do_lock(radeon_connector, 0);
|
||||
if (dret)
|
||||
ret = connector_status_connected;
|
||||
else {
|
||||
for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
|
||||
if (connector->encoder_ids[i] == 0)
|
||||
break;
|
||||
|
||||
obj = drm_mode_object_find(connector->dev,
|
||||
connector->encoder_ids[i],
|
||||
DRM_MODE_OBJECT_ENCODER);
|
||||
if (!obj)
|
||||
continue;
|
||||
|
||||
encoder = obj_to_encoder(obj);
|
||||
|
||||
encoder_funcs = encoder->helper_private;
|
||||
if (encoder_funcs->detect) {
|
||||
ret = encoder_funcs->detect(encoder, connector);
|
||||
if (ret == connector_status_connected) {
|
||||
radeon_connector->use_digital = 0;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* updated in get modes as well since we need to know if it's analog or digital */
|
||||
radeon_connector_update_scratch_regs(connector, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* okay need to be smart in here about which encoder to pick */
|
||||
struct drm_encoder *radeon_dvi_encoder(struct drm_connector *connector)
|
||||
{
|
||||
int enc_id = connector->encoder_ids[0];
|
||||
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
|
||||
struct drm_mode_object *obj;
|
||||
struct drm_encoder *encoder;
|
||||
int i;
|
||||
for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
|
||||
if (connector->encoder_ids[i] == 0)
|
||||
break;
|
||||
|
||||
obj = drm_mode_object_find(connector->dev, connector->encoder_ids[i], DRM_MODE_OBJECT_ENCODER);
|
||||
if (!obj)
|
||||
continue;
|
||||
|
||||
encoder = obj_to_encoder(obj);
|
||||
|
||||
if (radeon_connector->use_digital) {
|
||||
if (encoder->encoder_type == DRM_MODE_ENCODER_TMDS)
|
||||
return encoder;
|
||||
} else {
|
||||
if (encoder->encoder_type == DRM_MODE_ENCODER_DAC ||
|
||||
encoder->encoder_type == DRM_MODE_ENCODER_TVDAC)
|
||||
return encoder;
|
||||
}
|
||||
}
|
||||
|
||||
/* see if we have a default encoder TODO */
|
||||
|
||||
/* then check use digitial */
|
||||
/* pick the first one */
|
||||
if (enc_id) {
|
||||
obj = drm_mode_object_find(connector->dev, enc_id, DRM_MODE_OBJECT_ENCODER);
|
||||
if (!obj)
|
||||
return NULL;
|
||||
encoder = obj_to_encoder(obj);
|
||||
return encoder;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
struct drm_connector_helper_funcs radeon_dvi_connector_helper_funcs = {
|
||||
.get_modes = radeon_dvi_get_modes,
|
||||
.mode_valid = radeon_vga_mode_valid,
|
||||
.best_encoder = radeon_dvi_encoder,
|
||||
};
|
||||
|
||||
struct drm_connector_funcs radeon_dvi_connector_funcs = {
|
||||
.dpms = drm_helper_connector_dpms,
|
||||
.detect = radeon_dvi_detect,
|
||||
.fill_modes = drm_helper_probe_single_connector_modes,
|
||||
.set_property = radeon_connector_set_property,
|
||||
.destroy = radeon_connector_destroy,
|
||||
};
|
||||
|
||||
void
|
||||
radeon_add_atom_connector(struct drm_device *dev,
|
||||
uint32_t connector_id,
|
||||
uint32_t supported_device,
|
||||
int connector_type,
|
||||
struct radeon_i2c_bus_rec *i2c_bus,
|
||||
bool linkb,
|
||||
uint32_t igp_lane_info)
|
||||
{
|
||||
struct drm_connector *connector;
|
||||
struct radeon_connector *radeon_connector;
|
||||
struct radeon_connector_atom_dig *radeon_dig_connector;
|
||||
uint32_t subpixel_order = SubPixelNone;
|
||||
|
||||
/* fixme - tv/cv/din */
|
||||
if ((connector_type == DRM_MODE_CONNECTOR_Unknown) ||
|
||||
(connector_type == DRM_MODE_CONNECTOR_SVIDEO) ||
|
||||
(connector_type == DRM_MODE_CONNECTOR_Composite) ||
|
||||
(connector_type == DRM_MODE_CONNECTOR_9PinDIN))
|
||||
return;
|
||||
|
||||
/* see if we already added it */
|
||||
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
|
||||
radeon_connector = to_radeon_connector(connector);
|
||||
if (radeon_connector->connector_id == connector_id) {
|
||||
radeon_connector->devices |= supported_device;
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
radeon_connector = kzalloc(sizeof(struct radeon_connector), GFP_KERNEL);
|
||||
if (!radeon_connector)
|
||||
return;
|
||||
|
||||
connector = &radeon_connector->base;
|
||||
|
||||
radeon_connector->connector_id = connector_id;
|
||||
radeon_connector->devices = supported_device;
|
||||
switch (connector_type) {
|
||||
case DRM_MODE_CONNECTOR_VGA:
|
||||
drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
|
||||
drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
|
||||
if (i2c_bus->valid) {
|
||||
radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "VGA");
|
||||
if (!radeon_connector->ddc_bus)
|
||||
goto failed;
|
||||
}
|
||||
break;
|
||||
case DRM_MODE_CONNECTOR_DVIA:
|
||||
drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
|
||||
drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
|
||||
if (i2c_bus->valid) {
|
||||
radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI");
|
||||
if (!radeon_connector->ddc_bus)
|
||||
goto failed;
|
||||
}
|
||||
break;
|
||||
case DRM_MODE_CONNECTOR_DVII:
|
||||
case DRM_MODE_CONNECTOR_DVID:
|
||||
radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
|
||||
if (!radeon_dig_connector)
|
||||
goto failed;
|
||||
radeon_dig_connector->linkb = linkb;
|
||||
radeon_dig_connector->igp_lane_info = igp_lane_info;
|
||||
radeon_connector->con_priv = radeon_dig_connector;
|
||||
drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
|
||||
drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
|
||||
if (i2c_bus->valid) {
|
||||
radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI");
|
||||
if (!radeon_connector->ddc_bus)
|
||||
goto failed;
|
||||
}
|
||||
subpixel_order = SubPixelHorizontalRGB;
|
||||
break;
|
||||
case DRM_MODE_CONNECTOR_HDMIA:
|
||||
case DRM_MODE_CONNECTOR_HDMIB:
|
||||
radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
|
||||
if (!radeon_dig_connector)
|
||||
goto failed;
|
||||
radeon_dig_connector->linkb = linkb;
|
||||
radeon_dig_connector->igp_lane_info = igp_lane_info;
|
||||
radeon_connector->con_priv = radeon_dig_connector;
|
||||
drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
|
||||
drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
|
||||
if (i2c_bus->valid) {
|
||||
radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "HDMI");
|
||||
if (!radeon_connector->ddc_bus)
|
||||
goto failed;
|
||||
}
|
||||
subpixel_order = SubPixelHorizontalRGB;
|
||||
break;
|
||||
case DRM_MODE_CONNECTOR_DisplayPort:
|
||||
radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
|
||||
if (!radeon_dig_connector)
|
||||
goto failed;
|
||||
radeon_dig_connector->linkb = linkb;
|
||||
radeon_dig_connector->igp_lane_info = igp_lane_info;
|
||||
radeon_connector->con_priv = radeon_dig_connector;
|
||||
drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
|
||||
drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
|
||||
if (i2c_bus->valid) {
|
||||
radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DP");
|
||||
if (!radeon_connector->ddc_bus)
|
||||
goto failed;
|
||||
}
|
||||
subpixel_order = SubPixelHorizontalRGB;
|
||||
break;
|
||||
case DRM_MODE_CONNECTOR_SVIDEO:
|
||||
case DRM_MODE_CONNECTOR_Composite:
|
||||
case DRM_MODE_CONNECTOR_9PinDIN:
|
||||
break;
|
||||
case DRM_MODE_CONNECTOR_LVDS:
|
||||
radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
|
||||
if (!radeon_dig_connector)
|
||||
goto failed;
|
||||
radeon_dig_connector->linkb = linkb;
|
||||
radeon_dig_connector->igp_lane_info = igp_lane_info;
|
||||
radeon_connector->con_priv = radeon_dig_connector;
|
||||
drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type);
|
||||
drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs);
|
||||
if (i2c_bus->valid) {
|
||||
radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "LVDS");
|
||||
if (!radeon_connector->ddc_bus)
|
||||
goto failed;
|
||||
}
|
||||
subpixel_order = SubPixelHorizontalRGB;
|
||||
break;
|
||||
}
|
||||
|
||||
connector->display_info.subpixel_order = subpixel_order;
|
||||
drm_sysfs_connector_add(connector);
|
||||
return;
|
||||
|
||||
failed:
|
||||
if (radeon_connector->ddc_bus)
|
||||
radeon_i2c_destroy(radeon_connector->ddc_bus);
|
||||
drm_connector_cleanup(connector);
|
||||
kfree(connector);
|
||||
}
|
||||
|
||||
void
|
||||
radeon_add_legacy_connector(struct drm_device *dev,
|
||||
uint32_t connector_id,
|
||||
uint32_t supported_device,
|
||||
int connector_type,
|
||||
struct radeon_i2c_bus_rec *i2c_bus)
|
||||
{
|
||||
struct drm_connector *connector;
|
||||
struct radeon_connector *radeon_connector;
|
||||
uint32_t subpixel_order = SubPixelNone;
|
||||
|
||||
/* fixme - tv/cv/din */
|
||||
if ((connector_type == DRM_MODE_CONNECTOR_Unknown) ||
|
||||
(connector_type == DRM_MODE_CONNECTOR_SVIDEO) ||
|
||||
(connector_type == DRM_MODE_CONNECTOR_Composite) ||
|
||||
(connector_type == DRM_MODE_CONNECTOR_9PinDIN))
|
||||
return;
|
||||
|
||||
/* see if we already added it */
|
||||
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
|
||||
radeon_connector = to_radeon_connector(connector);
|
||||
if (radeon_connector->connector_id == connector_id) {
|
||||
radeon_connector->devices |= supported_device;
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
radeon_connector = kzalloc(sizeof(struct radeon_connector), GFP_KERNEL);
|
||||
if (!radeon_connector)
|
||||
return;
|
||||
|
||||
connector = &radeon_connector->base;
|
||||
|
||||
radeon_connector->connector_id = connector_id;
|
||||
radeon_connector->devices = supported_device;
|
||||
switch (connector_type) {
|
||||
case DRM_MODE_CONNECTOR_VGA:
|
||||
drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
|
||||
drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
|
||||
if (i2c_bus->valid) {
|
||||
radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "VGA");
|
||||
if (!radeon_connector->ddc_bus)
|
||||
goto failed;
|
||||
}
|
||||
break;
|
||||
case DRM_MODE_CONNECTOR_DVIA:
|
||||
drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
|
||||
drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
|
||||
if (i2c_bus->valid) {
|
||||
radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI");
|
||||
if (!radeon_connector->ddc_bus)
|
||||
goto failed;
|
||||
}
|
||||
break;
|
||||
case DRM_MODE_CONNECTOR_DVII:
|
||||
case DRM_MODE_CONNECTOR_DVID:
|
||||
drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
|
||||
drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
|
||||
if (i2c_bus->valid) {
|
||||
radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI");
|
||||
if (!radeon_connector->ddc_bus)
|
||||
goto failed;
|
||||
}
|
||||
subpixel_order = SubPixelHorizontalRGB;
|
||||
break;
|
||||
case DRM_MODE_CONNECTOR_SVIDEO:
|
||||
case DRM_MODE_CONNECTOR_Composite:
|
||||
case DRM_MODE_CONNECTOR_9PinDIN:
|
||||
break;
|
||||
case DRM_MODE_CONNECTOR_LVDS:
|
||||
drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type);
|
||||
drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs);
|
||||
if (i2c_bus->valid) {
|
||||
radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "LVDS");
|
||||
if (!radeon_connector->ddc_bus)
|
||||
goto failed;
|
||||
}
|
||||
subpixel_order = SubPixelHorizontalRGB;
|
||||
break;
|
||||
}
|
||||
|
||||
connector->display_info.subpixel_order = subpixel_order;
|
||||
drm_sysfs_connector_add(connector);
|
||||
return;
|
||||
|
||||
failed:
|
||||
if (radeon_connector->ddc_bus)
|
||||
radeon_i2c_destroy(radeon_connector->ddc_bus);
|
||||
drm_connector_cleanup(connector);
|
||||
kfree(connector);
|
||||
}
|
|
@ -0,0 +1,249 @@
|
|||
/*
|
||||
* Copyright 2008 Jerome Glisse.
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors:
|
||||
* Jerome Glisse <glisse@freedesktop.org>
|
||||
*/
|
||||
#include "drmP.h"
|
||||
#include "radeon_drm.h"
|
||||
#include "radeon_reg.h"
|
||||
#include "radeon.h"
|
||||
|
||||
void r100_cs_dump_packet(struct radeon_cs_parser *p,
|
||||
struct radeon_cs_packet *pkt);
|
||||
|
||||
int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
|
||||
{
|
||||
struct drm_device *ddev = p->rdev->ddev;
|
||||
struct radeon_cs_chunk *chunk;
|
||||
unsigned i, j;
|
||||
bool duplicate;
|
||||
|
||||
if (p->chunk_relocs_idx == -1) {
|
||||
return 0;
|
||||
}
|
||||
chunk = &p->chunks[p->chunk_relocs_idx];
|
||||
/* FIXME: we assume that each relocs use 4 dwords */
|
||||
p->nrelocs = chunk->length_dw / 4;
|
||||
p->relocs_ptr = kcalloc(p->nrelocs, sizeof(void *), GFP_KERNEL);
|
||||
if (p->relocs_ptr == NULL) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
p->relocs = kcalloc(p->nrelocs, sizeof(struct radeon_cs_reloc), GFP_KERNEL);
|
||||
if (p->relocs == NULL) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
for (i = 0; i < p->nrelocs; i++) {
|
||||
struct drm_radeon_cs_reloc *r;
|
||||
|
||||
duplicate = false;
|
||||
r = (struct drm_radeon_cs_reloc *)&chunk->kdata[i*4];
|
||||
for (j = 0; j < p->nrelocs; j++) {
|
||||
if (r->handle == p->relocs[j].handle) {
|
||||
p->relocs_ptr[i] = &p->relocs[j];
|
||||
duplicate = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!duplicate) {
|
||||
p->relocs[i].gobj = drm_gem_object_lookup(ddev,
|
||||
p->filp,
|
||||
r->handle);
|
||||
if (p->relocs[i].gobj == NULL) {
|
||||
DRM_ERROR("gem object lookup failed 0x%x\n",
|
||||
r->handle);
|
||||
return -EINVAL;
|
||||
}
|
||||
p->relocs_ptr[i] = &p->relocs[i];
|
||||
p->relocs[i].robj = p->relocs[i].gobj->driver_private;
|
||||
p->relocs[i].lobj.robj = p->relocs[i].robj;
|
||||
p->relocs[i].lobj.rdomain = r->read_domains;
|
||||
p->relocs[i].lobj.wdomain = r->write_domain;
|
||||
p->relocs[i].handle = r->handle;
|
||||
p->relocs[i].flags = r->flags;
|
||||
INIT_LIST_HEAD(&p->relocs[i].lobj.list);
|
||||
radeon_object_list_add_object(&p->relocs[i].lobj,
|
||||
&p->validated);
|
||||
}
|
||||
}
|
||||
return radeon_object_list_validate(&p->validated, p->ib->fence);
|
||||
}
|
||||
|
||||
int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
|
||||
{
|
||||
struct drm_radeon_cs *cs = data;
|
||||
uint64_t *chunk_array_ptr;
|
||||
unsigned size, i;
|
||||
|
||||
if (!cs->num_chunks) {
|
||||
return 0;
|
||||
}
|
||||
/* get chunks */
|
||||
INIT_LIST_HEAD(&p->validated);
|
||||
p->idx = 0;
|
||||
p->chunk_ib_idx = -1;
|
||||
p->chunk_relocs_idx = -1;
|
||||
p->chunks_array = kcalloc(cs->num_chunks, sizeof(uint64_t), GFP_KERNEL);
|
||||
if (p->chunks_array == NULL) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
chunk_array_ptr = (uint64_t *)(unsigned long)(cs->chunks);
|
||||
if (DRM_COPY_FROM_USER(p->chunks_array, chunk_array_ptr,
|
||||
sizeof(uint64_t)*cs->num_chunks)) {
|
||||
return -EFAULT;
|
||||
}
|
||||
p->nchunks = cs->num_chunks;
|
||||
p->chunks = kcalloc(p->nchunks, sizeof(struct radeon_cs_chunk), GFP_KERNEL);
|
||||
if (p->chunks == NULL) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
for (i = 0; i < p->nchunks; i++) {
|
||||
struct drm_radeon_cs_chunk __user **chunk_ptr = NULL;
|
||||
struct drm_radeon_cs_chunk user_chunk;
|
||||
uint32_t __user *cdata;
|
||||
|
||||
chunk_ptr = (void __user*)(unsigned long)p->chunks_array[i];
|
||||
if (DRM_COPY_FROM_USER(&user_chunk, chunk_ptr,
|
||||
sizeof(struct drm_radeon_cs_chunk))) {
|
||||
return -EFAULT;
|
||||
}
|
||||
p->chunks[i].chunk_id = user_chunk.chunk_id;
|
||||
if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS) {
|
||||
p->chunk_relocs_idx = i;
|
||||
}
|
||||
if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_IB) {
|
||||
p->chunk_ib_idx = i;
|
||||
}
|
||||
p->chunks[i].length_dw = user_chunk.length_dw;
|
||||
cdata = (uint32_t *)(unsigned long)user_chunk.chunk_data;
|
||||
|
||||
p->chunks[i].kdata = NULL;
|
||||
size = p->chunks[i].length_dw * sizeof(uint32_t);
|
||||
p->chunks[i].kdata = kzalloc(size, GFP_KERNEL);
|
||||
if (p->chunks[i].kdata == NULL) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
if (DRM_COPY_FROM_USER(p->chunks[i].kdata, cdata, size)) {
|
||||
return -EFAULT;
|
||||
}
|
||||
}
|
||||
if (p->chunks[p->chunk_ib_idx].length_dw > (16 * 1024)) {
|
||||
DRM_ERROR("cs IB too big: %d\n",
|
||||
p->chunks[p->chunk_ib_idx].length_dw);
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* cs_parser_fini() - clean parser states
|
||||
* @parser: parser structure holding parsing context.
|
||||
* @error: error number
|
||||
*
|
||||
* If error is set than unvalidate buffer, otherwise just free memory
|
||||
* used by parsing context.
|
||||
**/
|
||||
static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error)
|
||||
{
|
||||
unsigned i;
|
||||
|
||||
if (error) {
|
||||
radeon_object_list_unvalidate(&parser->validated);
|
||||
} else {
|
||||
radeon_object_list_clean(&parser->validated);
|
||||
}
|
||||
for (i = 0; i < parser->nrelocs; i++) {
|
||||
if (parser->relocs[i].gobj) {
|
||||
mutex_lock(&parser->rdev->ddev->struct_mutex);
|
||||
drm_gem_object_unreference(parser->relocs[i].gobj);
|
||||
mutex_unlock(&parser->rdev->ddev->struct_mutex);
|
||||
}
|
||||
}
|
||||
kfree(parser->relocs);
|
||||
kfree(parser->relocs_ptr);
|
||||
for (i = 0; i < parser->nchunks; i++) {
|
||||
kfree(parser->chunks[i].kdata);
|
||||
}
|
||||
kfree(parser->chunks);
|
||||
kfree(parser->chunks_array);
|
||||
radeon_ib_free(parser->rdev, &parser->ib);
|
||||
}
|
||||
|
||||
int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
|
||||
{
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
struct radeon_cs_parser parser;
|
||||
struct radeon_cs_chunk *ib_chunk;
|
||||
int r;
|
||||
|
||||
mutex_lock(&rdev->cs_mutex);
|
||||
if (rdev->gpu_lockup) {
|
||||
mutex_unlock(&rdev->cs_mutex);
|
||||
return -EINVAL;
|
||||
}
|
||||
/* initialize parser */
|
||||
memset(&parser, 0, sizeof(struct radeon_cs_parser));
|
||||
parser.filp = filp;
|
||||
parser.rdev = rdev;
|
||||
r = radeon_cs_parser_init(&parser, data);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to initialize parser !\n");
|
||||
radeon_cs_parser_fini(&parser, r);
|
||||
mutex_unlock(&rdev->cs_mutex);
|
||||
return r;
|
||||
}
|
||||
r = radeon_ib_get(rdev, &parser.ib);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to get ib !\n");
|
||||
radeon_cs_parser_fini(&parser, r);
|
||||
mutex_unlock(&rdev->cs_mutex);
|
||||
return r;
|
||||
}
|
||||
r = radeon_cs_parser_relocs(&parser);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to parse relocation !\n");
|
||||
radeon_cs_parser_fini(&parser, r);
|
||||
mutex_unlock(&rdev->cs_mutex);
|
||||
return r;
|
||||
}
|
||||
/* Copy the packet into the IB, the parser will read from the
|
||||
* input memory (cached) and write to the IB (which can be
|
||||
* uncached). */
|
||||
ib_chunk = &parser.chunks[parser.chunk_ib_idx];
|
||||
parser.ib->length_dw = ib_chunk->length_dw;
|
||||
memcpy((void *)parser.ib->ptr, ib_chunk->kdata, ib_chunk->length_dw*4);
|
||||
r = radeon_cs_parse(&parser);
|
||||
if (r) {
|
||||
DRM_ERROR("Invalid command stream !\n");
|
||||
radeon_cs_parser_fini(&parser, r);
|
||||
mutex_unlock(&rdev->cs_mutex);
|
||||
return r;
|
||||
}
|
||||
r = radeon_ib_schedule(rdev, parser.ib);
|
||||
if (r) {
|
||||
DRM_ERROR("Faild to schedule IB !\n");
|
||||
}
|
||||
radeon_cs_parser_fini(&parser, r);
|
||||
mutex_unlock(&rdev->cs_mutex);
|
||||
return r;
|
||||
}
|
|
@ -0,0 +1,252 @@
|
|||
/*
|
||||
* Copyright 2007-8 Advanced Micro Devices, Inc.
|
||||
* Copyright 2008 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Dave Airlie
|
||||
* Alex Deucher
|
||||
*/
|
||||
#include "drmP.h"
|
||||
#include "radeon_drm.h"
|
||||
#include "radeon.h"
|
||||
|
||||
#define CURSOR_WIDTH 64
|
||||
#define CURSOR_HEIGHT 64
|
||||
|
||||
static void radeon_lock_cursor(struct drm_crtc *crtc, bool lock)
|
||||
{
|
||||
struct radeon_device *rdev = crtc->dev->dev_private;
|
||||
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
|
||||
uint32_t cur_lock;
|
||||
|
||||
if (ASIC_IS_AVIVO(rdev)) {
|
||||
cur_lock = RREG32(AVIVO_D1CUR_UPDATE + radeon_crtc->crtc_offset);
|
||||
if (lock)
|
||||
cur_lock |= AVIVO_D1CURSOR_UPDATE_LOCK;
|
||||
else
|
||||
cur_lock &= ~AVIVO_D1CURSOR_UPDATE_LOCK;
|
||||
WREG32(AVIVO_D1CUR_UPDATE + radeon_crtc->crtc_offset, cur_lock);
|
||||
} else {
|
||||
cur_lock = RREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset);
|
||||
if (lock)
|
||||
cur_lock |= RADEON_CUR_LOCK;
|
||||
else
|
||||
cur_lock &= ~RADEON_CUR_LOCK;
|
||||
WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, cur_lock);
|
||||
}
|
||||
}
|
||||
|
||||
static void radeon_hide_cursor(struct drm_crtc *crtc)
|
||||
{
|
||||
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
|
||||
struct radeon_device *rdev = crtc->dev->dev_private;
|
||||
|
||||
if (ASIC_IS_AVIVO(rdev)) {
|
||||
WREG32(RADEON_MM_INDEX, AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset);
|
||||
WREG32(RADEON_MM_DATA, (AVIVO_D1CURSOR_MODE_24BPP << AVIVO_D1CURSOR_MODE_SHIFT));
|
||||
} else {
|
||||
switch (radeon_crtc->crtc_id) {
|
||||
case 0:
|
||||
WREG32(RADEON_MM_INDEX, RADEON_CRTC_GEN_CNTL);
|
||||
break;
|
||||
case 1:
|
||||
WREG32(RADEON_MM_INDEX, RADEON_CRTC2_GEN_CNTL);
|
||||
break;
|
||||
default:
|
||||
return;
|
||||
}
|
||||
WREG32_P(RADEON_MM_DATA, 0, ~RADEON_CRTC_CUR_EN);
|
||||
}
|
||||
}
|
||||
|
||||
static void radeon_show_cursor(struct drm_crtc *crtc)
|
||||
{
|
||||
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
|
||||
struct radeon_device *rdev = crtc->dev->dev_private;
|
||||
|
||||
if (ASIC_IS_AVIVO(rdev)) {
|
||||
WREG32(RADEON_MM_INDEX, AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset);
|
||||
WREG32(RADEON_MM_DATA, AVIVO_D1CURSOR_EN |
|
||||
(AVIVO_D1CURSOR_MODE_24BPP << AVIVO_D1CURSOR_MODE_SHIFT));
|
||||
} else {
|
||||
switch (radeon_crtc->crtc_id) {
|
||||
case 0:
|
||||
WREG32(RADEON_MM_INDEX, RADEON_CRTC_GEN_CNTL);
|
||||
break;
|
||||
case 1:
|
||||
WREG32(RADEON_MM_INDEX, RADEON_CRTC2_GEN_CNTL);
|
||||
break;
|
||||
default:
|
||||
return;
|
||||
}
|
||||
|
||||
WREG32_P(RADEON_MM_DATA, (RADEON_CRTC_CUR_EN |
|
||||
(RADEON_CRTC_CUR_MODE_24BPP << RADEON_CRTC_CUR_MODE_SHIFT)),
|
||||
~(RADEON_CRTC_CUR_EN | RADEON_CRTC_CUR_MODE_MASK));
|
||||
}
|
||||
}
|
||||
|
||||
static void radeon_set_cursor(struct drm_crtc *crtc, struct drm_gem_object *obj,
|
||||
uint32_t gpu_addr)
|
||||
{
|
||||
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
|
||||
struct radeon_device *rdev = crtc->dev->dev_private;
|
||||
|
||||
if (ASIC_IS_AVIVO(rdev))
|
||||
WREG32(AVIVO_D1CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset, gpu_addr);
|
||||
else
|
||||
/* offset is from DISP(2)_BASE_ADDRESS */
|
||||
WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, gpu_addr);
|
||||
}
|
||||
|
||||
int radeon_crtc_cursor_set(struct drm_crtc *crtc,
|
||||
struct drm_file *file_priv,
|
||||
uint32_t handle,
|
||||
uint32_t width,
|
||||
uint32_t height)
|
||||
{
|
||||
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
|
||||
struct drm_gem_object *obj;
|
||||
uint64_t gpu_addr;
|
||||
int ret;
|
||||
|
||||
if (!handle) {
|
||||
/* turn off cursor */
|
||||
radeon_hide_cursor(crtc);
|
||||
obj = NULL;
|
||||
goto unpin;
|
||||
}
|
||||
|
||||
if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) {
|
||||
DRM_ERROR("bad cursor width or height %d x %d\n", width, height);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
radeon_crtc->cursor_width = width;
|
||||
radeon_crtc->cursor_height = height;
|
||||
|
||||
obj = drm_gem_object_lookup(crtc->dev, file_priv, handle);
|
||||
if (!obj) {
|
||||
DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, radeon_crtc->crtc_id);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = radeon_gem_object_pin(obj, RADEON_GEM_DOMAIN_VRAM, &gpu_addr);
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
||||
radeon_lock_cursor(crtc, true);
|
||||
/* XXX only 27 bit offset for legacy cursor */
|
||||
radeon_set_cursor(crtc, obj, gpu_addr);
|
||||
radeon_show_cursor(crtc);
|
||||
radeon_lock_cursor(crtc, false);
|
||||
|
||||
unpin:
|
||||
if (radeon_crtc->cursor_bo) {
|
||||
radeon_gem_object_unpin(radeon_crtc->cursor_bo);
|
||||
mutex_lock(&crtc->dev->struct_mutex);
|
||||
drm_gem_object_unreference(radeon_crtc->cursor_bo);
|
||||
mutex_unlock(&crtc->dev->struct_mutex);
|
||||
}
|
||||
|
||||
radeon_crtc->cursor_bo = obj;
|
||||
return 0;
|
||||
fail:
|
||||
mutex_lock(&crtc->dev->struct_mutex);
|
||||
drm_gem_object_unreference(obj);
|
||||
mutex_unlock(&crtc->dev->struct_mutex);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int radeon_crtc_cursor_move(struct drm_crtc *crtc,
|
||||
int x, int y)
|
||||
{
|
||||
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
|
||||
struct radeon_device *rdev = crtc->dev->dev_private;
|
||||
int xorigin = 0, yorigin = 0;
|
||||
|
||||
if (x < 0)
|
||||
xorigin = -x + 1;
|
||||
if (y < 0)
|
||||
yorigin = -y + 1;
|
||||
if (xorigin >= CURSOR_WIDTH)
|
||||
xorigin = CURSOR_WIDTH - 1;
|
||||
if (yorigin >= CURSOR_HEIGHT)
|
||||
yorigin = CURSOR_HEIGHT - 1;
|
||||
|
||||
radeon_lock_cursor(crtc, true);
|
||||
if (ASIC_IS_AVIVO(rdev)) {
|
||||
int w = radeon_crtc->cursor_width;
|
||||
int i = 0;
|
||||
struct drm_crtc *crtc_p;
|
||||
|
||||
/* avivo cursor are offset into the total surface */
|
||||
x += crtc->x;
|
||||
y += crtc->y;
|
||||
DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
|
||||
|
||||
/* avivo cursor image can't end on 128 pixel boundry or
|
||||
* go past the end of the frame if both crtcs are enabled
|
||||
*/
|
||||
list_for_each_entry(crtc_p, &crtc->dev->mode_config.crtc_list, head) {
|
||||
if (crtc_p->enabled)
|
||||
i++;
|
||||
}
|
||||
if (i > 1) {
|
||||
int cursor_end, frame_end;
|
||||
|
||||
cursor_end = x - xorigin + w;
|
||||
frame_end = crtc->x + crtc->mode.crtc_hdisplay;
|
||||
if (cursor_end >= frame_end) {
|
||||
w = w - (cursor_end - frame_end);
|
||||
if (!(frame_end & 0x7f))
|
||||
w--;
|
||||
} else {
|
||||
if (!(cursor_end & 0x7f))
|
||||
w--;
|
||||
}
|
||||
if (w <= 0)
|
||||
w = 1;
|
||||
}
|
||||
|
||||
WREG32(AVIVO_D1CUR_POSITION + radeon_crtc->crtc_offset,
|
||||
((xorigin ? 0 : x) << 16) |
|
||||
(yorigin ? 0 : y));
|
||||
WREG32(AVIVO_D1CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin);
|
||||
WREG32(AVIVO_D1CUR_SIZE + radeon_crtc->crtc_offset,
|
||||
((w - 1) << 16) | (radeon_crtc->cursor_height - 1));
|
||||
} else {
|
||||
if (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)
|
||||
y *= 2;
|
||||
|
||||
WREG32(RADEON_CUR_HORZ_VERT_OFF + radeon_crtc->crtc_offset,
|
||||
(RADEON_CUR_LOCK
|
||||
| (xorigin << 16)
|
||||
| yorigin));
|
||||
WREG32(RADEON_CUR_HORZ_VERT_POSN + radeon_crtc->crtc_offset,
|
||||
(RADEON_CUR_LOCK
|
||||
| ((xorigin ? 0 : x) << 16)
|
||||
| (yorigin ? 0 : y)));
|
||||
}
|
||||
radeon_lock_cursor(crtc, false);
|
||||
|
||||
return 0;
|
||||
}
|
|
@ -0,0 +1,813 @@
|
|||
/*
|
||||
* Copyright 2008 Advanced Micro Devices, Inc.
|
||||
* Copyright 2008 Red Hat Inc.
|
||||
* Copyright 2009 Jerome Glisse.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Dave Airlie
|
||||
* Alex Deucher
|
||||
* Jerome Glisse
|
||||
*/
|
||||
#include <linux/console.h>
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/drm_crtc_helper.h>
|
||||
#include <drm/radeon_drm.h>
|
||||
#include "radeon_reg.h"
|
||||
#include "radeon.h"
|
||||
#include "radeon_asic.h"
|
||||
#include "atom.h"
|
||||
|
||||
/*
|
||||
* GPU scratch registers helpers function.
|
||||
*/
|
||||
static void radeon_scratch_init(struct radeon_device *rdev)
|
||||
{
|
||||
int i;
|
||||
|
||||
/* FIXME: check this out */
|
||||
if (rdev->family < CHIP_R300) {
|
||||
rdev->scratch.num_reg = 5;
|
||||
} else {
|
||||
rdev->scratch.num_reg = 7;
|
||||
}
|
||||
for (i = 0; i < rdev->scratch.num_reg; i++) {
|
||||
rdev->scratch.free[i] = true;
|
||||
rdev->scratch.reg[i] = RADEON_SCRATCH_REG0 + (i * 4);
|
||||
}
|
||||
}
|
||||
|
||||
int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < rdev->scratch.num_reg; i++) {
|
||||
if (rdev->scratch.free[i]) {
|
||||
rdev->scratch.free[i] = false;
|
||||
*reg = rdev->scratch.reg[i];
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < rdev->scratch.num_reg; i++) {
|
||||
if (rdev->scratch.reg[i] == reg) {
|
||||
rdev->scratch.free[i] = true;
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* MC common functions
|
||||
*/
|
||||
int radeon_mc_setup(struct radeon_device *rdev)
|
||||
{
|
||||
uint32_t tmp;
|
||||
|
||||
/* Some chips have an "issue" with the memory controller, the
|
||||
* location must be aligned to the size. We just align it down,
|
||||
* too bad if we walk over the top of system memory, we don't
|
||||
* use DMA without a remapped anyway.
|
||||
* Affected chips are rv280, all r3xx, and all r4xx, but not IGP
|
||||
*/
|
||||
/* FGLRX seems to setup like this, VRAM a 0, then GART.
|
||||
*/
|
||||
/*
|
||||
* Note: from R6xx the address space is 40bits but here we only
|
||||
* use 32bits (still have to see a card which would exhaust 4G
|
||||
* address space).
|
||||
*/
|
||||
if (rdev->mc.vram_location != 0xFFFFFFFFUL) {
|
||||
/* vram location was already setup try to put gtt after
|
||||
* if it fits */
|
||||
tmp = rdev->mc.vram_location + rdev->mc.vram_size;
|
||||
tmp = (tmp + rdev->mc.gtt_size - 1) & ~(rdev->mc.gtt_size - 1);
|
||||
if ((0xFFFFFFFFUL - tmp) >= rdev->mc.gtt_size) {
|
||||
rdev->mc.gtt_location = tmp;
|
||||
} else {
|
||||
if (rdev->mc.gtt_size >= rdev->mc.vram_location) {
|
||||
printk(KERN_ERR "[drm] GTT too big to fit "
|
||||
"before or after vram location.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
rdev->mc.gtt_location = 0;
|
||||
}
|
||||
} else if (rdev->mc.gtt_location != 0xFFFFFFFFUL) {
|
||||
/* gtt location was already setup try to put vram before
|
||||
* if it fits */
|
||||
if (rdev->mc.vram_size < rdev->mc.gtt_location) {
|
||||
rdev->mc.vram_location = 0;
|
||||
} else {
|
||||
tmp = rdev->mc.gtt_location + rdev->mc.gtt_size;
|
||||
tmp += (rdev->mc.vram_size - 1);
|
||||
tmp &= ~(rdev->mc.vram_size - 1);
|
||||
if ((0xFFFFFFFFUL - tmp) >= rdev->mc.vram_size) {
|
||||
rdev->mc.vram_location = tmp;
|
||||
} else {
|
||||
printk(KERN_ERR "[drm] vram too big to fit "
|
||||
"before or after GTT location.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
rdev->mc.vram_location = 0;
|
||||
rdev->mc.gtt_location = rdev->mc.vram_size;
|
||||
}
|
||||
DRM_INFO("radeon: VRAM %uM\n", rdev->mc.vram_size >> 20);
|
||||
DRM_INFO("radeon: VRAM from 0x%08X to 0x%08X\n",
|
||||
rdev->mc.vram_location,
|
||||
rdev->mc.vram_location + rdev->mc.vram_size - 1);
|
||||
DRM_INFO("radeon: GTT %uM\n", rdev->mc.gtt_size >> 20);
|
||||
DRM_INFO("radeon: GTT from 0x%08X to 0x%08X\n",
|
||||
rdev->mc.gtt_location,
|
||||
rdev->mc.gtt_location + rdev->mc.gtt_size - 1);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* GPU helpers function.
|
||||
*/
|
||||
static bool radeon_card_posted(struct radeon_device *rdev)
|
||||
{
|
||||
uint32_t reg;
|
||||
|
||||
/* first check CRTCs */
|
||||
if (ASIC_IS_AVIVO(rdev)) {
|
||||
reg = RREG32(AVIVO_D1CRTC_CONTROL) |
|
||||
RREG32(AVIVO_D2CRTC_CONTROL);
|
||||
if (reg & AVIVO_CRTC_EN) {
|
||||
return true;
|
||||
}
|
||||
} else {
|
||||
reg = RREG32(RADEON_CRTC_GEN_CNTL) |
|
||||
RREG32(RADEON_CRTC2_GEN_CNTL);
|
||||
if (reg & RADEON_CRTC_EN) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
/* then check MEM_SIZE, in case the crtcs are off */
|
||||
if (rdev->family >= CHIP_R600)
|
||||
reg = RREG32(R600_CONFIG_MEMSIZE);
|
||||
else
|
||||
reg = RREG32(RADEON_CONFIG_MEMSIZE);
|
||||
|
||||
if (reg)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Registers accessors functions.
|
||||
*/
|
||||
uint32_t radeon_invalid_rreg(struct radeon_device *rdev, uint32_t reg)
|
||||
{
|
||||
DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
|
||||
BUG_ON(1);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void radeon_invalid_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
|
||||
{
|
||||
DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
|
||||
reg, v);
|
||||
BUG_ON(1);
|
||||
}
|
||||
|
||||
void radeon_register_accessor_init(struct radeon_device *rdev)
|
||||
{
|
||||
rdev->mm_rreg = &r100_mm_rreg;
|
||||
rdev->mm_wreg = &r100_mm_wreg;
|
||||
rdev->mc_rreg = &radeon_invalid_rreg;
|
||||
rdev->mc_wreg = &radeon_invalid_wreg;
|
||||
rdev->pll_rreg = &radeon_invalid_rreg;
|
||||
rdev->pll_wreg = &radeon_invalid_wreg;
|
||||
rdev->pcie_rreg = &radeon_invalid_rreg;
|
||||
rdev->pcie_wreg = &radeon_invalid_wreg;
|
||||
rdev->pciep_rreg = &radeon_invalid_rreg;
|
||||
rdev->pciep_wreg = &radeon_invalid_wreg;
|
||||
|
||||
/* Don't change order as we are overridding accessor. */
|
||||
if (rdev->family < CHIP_RV515) {
|
||||
rdev->pcie_rreg = &rv370_pcie_rreg;
|
||||
rdev->pcie_wreg = &rv370_pcie_wreg;
|
||||
}
|
||||
if (rdev->family >= CHIP_RV515) {
|
||||
rdev->pcie_rreg = &rv515_pcie_rreg;
|
||||
rdev->pcie_wreg = &rv515_pcie_wreg;
|
||||
}
|
||||
/* FIXME: not sure here */
|
||||
if (rdev->family <= CHIP_R580) {
|
||||
rdev->pll_rreg = &r100_pll_rreg;
|
||||
rdev->pll_wreg = &r100_pll_wreg;
|
||||
}
|
||||
if (rdev->family >= CHIP_RV515) {
|
||||
rdev->mc_rreg = &rv515_mc_rreg;
|
||||
rdev->mc_wreg = &rv515_mc_wreg;
|
||||
}
|
||||
if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480) {
|
||||
rdev->mc_rreg = &rs400_mc_rreg;
|
||||
rdev->mc_wreg = &rs400_mc_wreg;
|
||||
}
|
||||
if (rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) {
|
||||
rdev->mc_rreg = &rs690_mc_rreg;
|
||||
rdev->mc_wreg = &rs690_mc_wreg;
|
||||
}
|
||||
if (rdev->family == CHIP_RS600) {
|
||||
rdev->mc_rreg = &rs600_mc_rreg;
|
||||
rdev->mc_wreg = &rs600_mc_wreg;
|
||||
}
|
||||
if (rdev->family >= CHIP_R600) {
|
||||
rdev->pciep_rreg = &r600_pciep_rreg;
|
||||
rdev->pciep_wreg = &r600_pciep_wreg;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* ASIC
|
||||
*/
|
||||
int radeon_asic_init(struct radeon_device *rdev)
|
||||
{
|
||||
radeon_register_accessor_init(rdev);
|
||||
switch (rdev->family) {
|
||||
case CHIP_R100:
|
||||
case CHIP_RV100:
|
||||
case CHIP_RS100:
|
||||
case CHIP_RV200:
|
||||
case CHIP_RS200:
|
||||
case CHIP_R200:
|
||||
case CHIP_RV250:
|
||||
case CHIP_RS300:
|
||||
case CHIP_RV280:
|
||||
rdev->asic = &r100_asic;
|
||||
break;
|
||||
case CHIP_R300:
|
||||
case CHIP_R350:
|
||||
case CHIP_RV350:
|
||||
case CHIP_RV380:
|
||||
rdev->asic = &r300_asic;
|
||||
break;
|
||||
case CHIP_R420:
|
||||
case CHIP_R423:
|
||||
case CHIP_RV410:
|
||||
rdev->asic = &r420_asic;
|
||||
break;
|
||||
case CHIP_RS400:
|
||||
case CHIP_RS480:
|
||||
rdev->asic = &rs400_asic;
|
||||
break;
|
||||
case CHIP_RS600:
|
||||
rdev->asic = &rs600_asic;
|
||||
break;
|
||||
case CHIP_RS690:
|
||||
case CHIP_RS740:
|
||||
rdev->asic = &rs690_asic;
|
||||
break;
|
||||
case CHIP_RV515:
|
||||
rdev->asic = &rv515_asic;
|
||||
break;
|
||||
case CHIP_R520:
|
||||
case CHIP_RV530:
|
||||
case CHIP_RV560:
|
||||
case CHIP_RV570:
|
||||
case CHIP_R580:
|
||||
rdev->asic = &r520_asic;
|
||||
break;
|
||||
case CHIP_R600:
|
||||
case CHIP_RV610:
|
||||
case CHIP_RV630:
|
||||
case CHIP_RV620:
|
||||
case CHIP_RV635:
|
||||
case CHIP_RV670:
|
||||
case CHIP_RS780:
|
||||
case CHIP_RV770:
|
||||
case CHIP_RV730:
|
||||
case CHIP_RV710:
|
||||
default:
|
||||
/* FIXME: not supported yet */
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Wrapper around modesetting bits.
|
||||
*/
|
||||
int radeon_clocks_init(struct radeon_device *rdev)
|
||||
{
|
||||
int r;
|
||||
|
||||
radeon_get_clock_info(rdev->ddev);
|
||||
r = radeon_static_clocks_init(rdev->ddev);
|
||||
if (r) {
|
||||
return r;
|
||||
}
|
||||
DRM_INFO("Clocks initialized !\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
void radeon_clocks_fini(struct radeon_device *rdev)
|
||||
{
|
||||
}
|
||||
|
||||
/* ATOM accessor methods */
|
||||
static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
|
||||
{
|
||||
struct radeon_device *rdev = info->dev->dev_private;
|
||||
uint32_t r;
|
||||
|
||||
r = rdev->pll_rreg(rdev, reg);
|
||||
return r;
|
||||
}
|
||||
|
||||
static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
|
||||
{
|
||||
struct radeon_device *rdev = info->dev->dev_private;
|
||||
|
||||
rdev->pll_wreg(rdev, reg, val);
|
||||
}
|
||||
|
||||
static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
|
||||
{
|
||||
struct radeon_device *rdev = info->dev->dev_private;
|
||||
uint32_t r;
|
||||
|
||||
r = rdev->mc_rreg(rdev, reg);
|
||||
return r;
|
||||
}
|
||||
|
||||
static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
|
||||
{
|
||||
struct radeon_device *rdev = info->dev->dev_private;
|
||||
|
||||
rdev->mc_wreg(rdev, reg, val);
|
||||
}
|
||||
|
||||
static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
|
||||
{
|
||||
struct radeon_device *rdev = info->dev->dev_private;
|
||||
|
||||
WREG32(reg*4, val);
|
||||
}
|
||||
|
||||
static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
|
||||
{
|
||||
struct radeon_device *rdev = info->dev->dev_private;
|
||||
uint32_t r;
|
||||
|
||||
r = RREG32(reg*4);
|
||||
return r;
|
||||
}
|
||||
|
||||
static struct card_info atom_card_info = {
|
||||
.dev = NULL,
|
||||
.reg_read = cail_reg_read,
|
||||
.reg_write = cail_reg_write,
|
||||
.mc_read = cail_mc_read,
|
||||
.mc_write = cail_mc_write,
|
||||
.pll_read = cail_pll_read,
|
||||
.pll_write = cail_pll_write,
|
||||
};
|
||||
|
||||
int radeon_atombios_init(struct radeon_device *rdev)
|
||||
{
|
||||
atom_card_info.dev = rdev->ddev;
|
||||
rdev->mode_info.atom_context = atom_parse(&atom_card_info, rdev->bios);
|
||||
radeon_atom_initialize_bios_scratch_regs(rdev->ddev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void radeon_atombios_fini(struct radeon_device *rdev)
|
||||
{
|
||||
kfree(rdev->mode_info.atom_context);
|
||||
}
|
||||
|
||||
int radeon_combios_init(struct radeon_device *rdev)
|
||||
{
|
||||
radeon_combios_initialize_bios_scratch_regs(rdev->ddev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void radeon_combios_fini(struct radeon_device *rdev)
|
||||
{
|
||||
}
|
||||
|
||||
int radeon_modeset_init(struct radeon_device *rdev);
|
||||
void radeon_modeset_fini(struct radeon_device *rdev);
|
||||
|
||||
|
||||
/*
|
||||
* Radeon device.
|
||||
*/
|
||||
int radeon_device_init(struct radeon_device *rdev,
|
||||
struct drm_device *ddev,
|
||||
struct pci_dev *pdev,
|
||||
uint32_t flags)
|
||||
{
|
||||
int r, ret;
|
||||
|
||||
DRM_INFO("radeon: Initializing kernel modesetting.\n");
|
||||
rdev->shutdown = false;
|
||||
rdev->ddev = ddev;
|
||||
rdev->pdev = pdev;
|
||||
rdev->flags = flags;
|
||||
rdev->family = flags & RADEON_FAMILY_MASK;
|
||||
rdev->is_atom_bios = false;
|
||||
rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT;
|
||||
rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
|
||||
rdev->gpu_lockup = false;
|
||||
/* mutex initialization are all done here so we
|
||||
* can recall function without having locking issues */
|
||||
mutex_init(&rdev->cs_mutex);
|
||||
mutex_init(&rdev->ib_pool.mutex);
|
||||
mutex_init(&rdev->cp.mutex);
|
||||
rwlock_init(&rdev->fence_drv.lock);
|
||||
|
||||
if (radeon_agpmode == -1) {
|
||||
rdev->flags &= ~RADEON_IS_AGP;
|
||||
if (rdev->family > CHIP_RV515 ||
|
||||
rdev->family == CHIP_RV380 ||
|
||||
rdev->family == CHIP_RV410 ||
|
||||
rdev->family == CHIP_R423) {
|
||||
DRM_INFO("Forcing AGP to PCIE mode\n");
|
||||
rdev->flags |= RADEON_IS_PCIE;
|
||||
} else {
|
||||
DRM_INFO("Forcing AGP to PCI mode\n");
|
||||
rdev->flags |= RADEON_IS_PCI;
|
||||
}
|
||||
}
|
||||
|
||||
/* Set asic functions */
|
||||
r = radeon_asic_init(rdev);
|
||||
if (r) {
|
||||
return r;
|
||||
}
|
||||
|
||||
/* Report DMA addressing limitation */
|
||||
r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(32));
|
||||
if (r) {
|
||||
printk(KERN_WARNING "radeon: No suitable DMA available.\n");
|
||||
}
|
||||
|
||||
/* Registers mapping */
|
||||
/* TODO: block userspace mapping of io register */
|
||||
rdev->rmmio_base = drm_get_resource_start(rdev->ddev, 2);
|
||||
rdev->rmmio_size = drm_get_resource_len(rdev->ddev, 2);
|
||||
rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size);
|
||||
if (rdev->rmmio == NULL) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base);
|
||||
DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size);
|
||||
|
||||
/* Setup errata flags */
|
||||
radeon_errata(rdev);
|
||||
/* Initialize scratch registers */
|
||||
radeon_scratch_init(rdev);
|
||||
|
||||
/* TODO: disable VGA need to use VGA request */
|
||||
/* BIOS*/
|
||||
if (!radeon_get_bios(rdev)) {
|
||||
if (ASIC_IS_AVIVO(rdev))
|
||||
return -EINVAL;
|
||||
}
|
||||
if (rdev->is_atom_bios) {
|
||||
r = radeon_atombios_init(rdev);
|
||||
if (r) {
|
||||
return r;
|
||||
}
|
||||
} else {
|
||||
r = radeon_combios_init(rdev);
|
||||
if (r) {
|
||||
return r;
|
||||
}
|
||||
}
|
||||
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
|
||||
if (radeon_gpu_reset(rdev)) {
|
||||
/* FIXME: what do we want to do here ? */
|
||||
}
|
||||
/* check if cards are posted or not */
|
||||
if (!radeon_card_posted(rdev) && rdev->bios) {
|
||||
DRM_INFO("GPU not posted. posting now...\n");
|
||||
if (rdev->is_atom_bios) {
|
||||
atom_asic_init(rdev->mode_info.atom_context);
|
||||
} else {
|
||||
radeon_combios_asic_init(rdev->ddev);
|
||||
}
|
||||
}
|
||||
/* Get vram informations */
|
||||
radeon_vram_info(rdev);
|
||||
/* Device is severly broken if aper size > vram size.
|
||||
* for RN50/M6/M7 - Novell bug 204882 ?
|
||||
*/
|
||||
if (rdev->mc.vram_size < rdev->mc.aper_size) {
|
||||
rdev->mc.aper_size = rdev->mc.vram_size;
|
||||
}
|
||||
/* Add an MTRR for the VRAM */
|
||||
rdev->mc.vram_mtrr = mtrr_add(rdev->mc.aper_base, rdev->mc.aper_size,
|
||||
MTRR_TYPE_WRCOMB, 1);
|
||||
DRM_INFO("Detected VRAM RAM=%uM, BAR=%uM\n",
|
||||
rdev->mc.vram_size >> 20,
|
||||
(unsigned)rdev->mc.aper_size >> 20);
|
||||
DRM_INFO("RAM width %dbits %cDR\n",
|
||||
rdev->mc.vram_width, rdev->mc.vram_is_ddr ? 'D' : 'S');
|
||||
/* Initialize clocks */
|
||||
r = radeon_clocks_init(rdev);
|
||||
if (r) {
|
||||
return r;
|
||||
}
|
||||
/* Initialize memory controller (also test AGP) */
|
||||
r = radeon_mc_init(rdev);
|
||||
if (r) {
|
||||
return r;
|
||||
}
|
||||
/* Fence driver */
|
||||
r = radeon_fence_driver_init(rdev);
|
||||
if (r) {
|
||||
return r;
|
||||
}
|
||||
r = radeon_irq_kms_init(rdev);
|
||||
if (r) {
|
||||
return r;
|
||||
}
|
||||
/* Memory manager */
|
||||
r = radeon_object_init(rdev);
|
||||
if (r) {
|
||||
return r;
|
||||
}
|
||||
/* Initialize GART (initialize after TTM so we can allocate
|
||||
* memory through TTM but finalize after TTM) */
|
||||
r = radeon_gart_enable(rdev);
|
||||
if (!r) {
|
||||
r = radeon_gem_init(rdev);
|
||||
}
|
||||
|
||||
/* 1M ring buffer */
|
||||
if (!r) {
|
||||
r = radeon_cp_init(rdev, 1024 * 1024);
|
||||
}
|
||||
if (!r) {
|
||||
r = radeon_wb_init(rdev);
|
||||
if (r) {
|
||||
DRM_ERROR("radeon: failled initializing WB (%d).\n", r);
|
||||
return r;
|
||||
}
|
||||
}
|
||||
if (!r) {
|
||||
r = radeon_ib_pool_init(rdev);
|
||||
if (r) {
|
||||
DRM_ERROR("radeon: failled initializing IB pool (%d).\n", r);
|
||||
return r;
|
||||
}
|
||||
}
|
||||
if (!r) {
|
||||
r = radeon_ib_test(rdev);
|
||||
if (r) {
|
||||
DRM_ERROR("radeon: failled testing IB (%d).\n", r);
|
||||
return r;
|
||||
}
|
||||
}
|
||||
ret = r;
|
||||
r = radeon_modeset_init(rdev);
|
||||
if (r) {
|
||||
return r;
|
||||
}
|
||||
if (rdev->fbdev_rfb && rdev->fbdev_rfb->obj) {
|
||||
rdev->fbdev_robj = rdev->fbdev_rfb->obj->driver_private;
|
||||
}
|
||||
if (!ret) {
|
||||
DRM_INFO("radeon: kernel modesetting successfully initialized.\n");
|
||||
}
|
||||
if (radeon_benchmarking) {
|
||||
radeon_benchmark(rdev);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
void radeon_device_fini(struct radeon_device *rdev)
|
||||
{
|
||||
if (rdev == NULL || rdev->rmmio == NULL) {
|
||||
return;
|
||||
}
|
||||
DRM_INFO("radeon: finishing device.\n");
|
||||
rdev->shutdown = true;
|
||||
/* Order matter so becarefull if you rearrange anythings */
|
||||
radeon_modeset_fini(rdev);
|
||||
radeon_ib_pool_fini(rdev);
|
||||
radeon_cp_fini(rdev);
|
||||
radeon_wb_fini(rdev);
|
||||
radeon_gem_fini(rdev);
|
||||
radeon_object_fini(rdev);
|
||||
/* mc_fini must be after object_fini */
|
||||
radeon_mc_fini(rdev);
|
||||
#if __OS_HAS_AGP
|
||||
radeon_agp_fini(rdev);
|
||||
#endif
|
||||
radeon_irq_kms_fini(rdev);
|
||||
radeon_fence_driver_fini(rdev);
|
||||
radeon_clocks_fini(rdev);
|
||||
if (rdev->is_atom_bios) {
|
||||
radeon_atombios_fini(rdev);
|
||||
} else {
|
||||
radeon_combios_fini(rdev);
|
||||
}
|
||||
kfree(rdev->bios);
|
||||
rdev->bios = NULL;
|
||||
iounmap(rdev->rmmio);
|
||||
rdev->rmmio = NULL;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Suspend & resume.
|
||||
*/
|
||||
int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
|
||||
{
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
struct drm_crtc *crtc;
|
||||
|
||||
if (dev == NULL || rdev == NULL) {
|
||||
return -ENODEV;
|
||||
}
|
||||
if (state.event == PM_EVENT_PRETHAW) {
|
||||
return 0;
|
||||
}
|
||||
/* unpin the front buffers */
|
||||
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
|
||||
struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->fb);
|
||||
struct radeon_object *robj;
|
||||
|
||||
if (rfb == NULL || rfb->obj == NULL) {
|
||||
continue;
|
||||
}
|
||||
robj = rfb->obj->driver_private;
|
||||
if (robj != rdev->fbdev_robj) {
|
||||
radeon_object_unpin(robj);
|
||||
}
|
||||
}
|
||||
/* evict vram memory */
|
||||
radeon_object_evict_vram(rdev);
|
||||
/* wait for gpu to finish processing current batch */
|
||||
radeon_fence_wait_last(rdev);
|
||||
|
||||
radeon_cp_disable(rdev);
|
||||
radeon_gart_disable(rdev);
|
||||
|
||||
/* evict remaining vram memory */
|
||||
radeon_object_evict_vram(rdev);
|
||||
|
||||
rdev->irq.sw_int = false;
|
||||
radeon_irq_set(rdev);
|
||||
|
||||
pci_save_state(dev->pdev);
|
||||
if (state.event == PM_EVENT_SUSPEND) {
|
||||
/* Shut down the device */
|
||||
pci_disable_device(dev->pdev);
|
||||
pci_set_power_state(dev->pdev, PCI_D3hot);
|
||||
}
|
||||
acquire_console_sem();
|
||||
fb_set_suspend(rdev->fbdev_info, 1);
|
||||
release_console_sem();
|
||||
return 0;
|
||||
}
|
||||
|
||||
int radeon_resume_kms(struct drm_device *dev)
|
||||
{
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
int r;
|
||||
|
||||
acquire_console_sem();
|
||||
pci_set_power_state(dev->pdev, PCI_D0);
|
||||
pci_restore_state(dev->pdev);
|
||||
if (pci_enable_device(dev->pdev)) {
|
||||
release_console_sem();
|
||||
return -1;
|
||||
}
|
||||
pci_set_master(dev->pdev);
|
||||
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
|
||||
if (radeon_gpu_reset(rdev)) {
|
||||
/* FIXME: what do we want to do here ? */
|
||||
}
|
||||
/* post card */
|
||||
if (rdev->is_atom_bios) {
|
||||
atom_asic_init(rdev->mode_info.atom_context);
|
||||
} else {
|
||||
radeon_combios_asic_init(rdev->ddev);
|
||||
}
|
||||
/* Initialize clocks */
|
||||
r = radeon_clocks_init(rdev);
|
||||
if (r) {
|
||||
release_console_sem();
|
||||
return r;
|
||||
}
|
||||
/* Enable IRQ */
|
||||
rdev->irq.sw_int = true;
|
||||
radeon_irq_set(rdev);
|
||||
/* Initialize GPU Memory Controller */
|
||||
r = radeon_mc_init(rdev);
|
||||
if (r) {
|
||||
goto out;
|
||||
}
|
||||
r = radeon_gart_enable(rdev);
|
||||
if (r) {
|
||||
goto out;
|
||||
}
|
||||
r = radeon_cp_init(rdev, rdev->cp.ring_size);
|
||||
if (r) {
|
||||
goto out;
|
||||
}
|
||||
out:
|
||||
fb_set_suspend(rdev->fbdev_info, 0);
|
||||
release_console_sem();
|
||||
|
||||
/* blat the mode back in */
|
||||
drm_helper_resume_force_mode(dev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Debugfs
|
||||
*/
|
||||
struct radeon_debugfs {
|
||||
struct drm_info_list *files;
|
||||
unsigned num_files;
|
||||
};
|
||||
static struct radeon_debugfs _radeon_debugfs[RADEON_DEBUGFS_MAX_NUM_FILES];
|
||||
static unsigned _radeon_debugfs_count = 0;
|
||||
|
||||
int radeon_debugfs_add_files(struct radeon_device *rdev,
|
||||
struct drm_info_list *files,
|
||||
unsigned nfiles)
|
||||
{
|
||||
unsigned i;
|
||||
|
||||
for (i = 0; i < _radeon_debugfs_count; i++) {
|
||||
if (_radeon_debugfs[i].files == files) {
|
||||
/* Already registered */
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
if ((_radeon_debugfs_count + nfiles) > RADEON_DEBUGFS_MAX_NUM_FILES) {
|
||||
DRM_ERROR("Reached maximum number of debugfs files.\n");
|
||||
DRM_ERROR("Report so we increase RADEON_DEBUGFS_MAX_NUM_FILES.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
_radeon_debugfs[_radeon_debugfs_count].files = files;
|
||||
_radeon_debugfs[_radeon_debugfs_count].num_files = nfiles;
|
||||
_radeon_debugfs_count++;
|
||||
#if defined(CONFIG_DEBUG_FS)
|
||||
drm_debugfs_create_files(files, nfiles,
|
||||
rdev->ddev->control->debugfs_root,
|
||||
rdev->ddev->control);
|
||||
drm_debugfs_create_files(files, nfiles,
|
||||
rdev->ddev->primary->debugfs_root,
|
||||
rdev->ddev->primary);
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
#if defined(CONFIG_DEBUG_FS)
|
||||
int radeon_debugfs_init(struct drm_minor *minor)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
void radeon_debugfs_cleanup(struct drm_minor *minor)
|
||||
{
|
||||
unsigned i;
|
||||
|
||||
for (i = 0; i < _radeon_debugfs_count; i++) {
|
||||
drm_debugfs_remove_files(_radeon_debugfs[i].files,
|
||||
_radeon_debugfs[i].num_files, minor);
|
||||
}
|
||||
}
|
||||
#endif
|
|
@ -0,0 +1,692 @@
|
|||
/*
|
||||
* Copyright 2007-8 Advanced Micro Devices, Inc.
|
||||
* Copyright 2008 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Dave Airlie
|
||||
* Alex Deucher
|
||||
*/
|
||||
#include "drmP.h"
|
||||
#include "radeon_drm.h"
|
||||
#include "radeon.h"
|
||||
|
||||
#include "atom.h"
|
||||
#include <asm/div64.h>
|
||||
|
||||
#include "drm_crtc_helper.h"
|
||||
#include "drm_edid.h"
|
||||
|
||||
static int radeon_ddc_dump(struct drm_connector *connector);
|
||||
|
||||
static void avivo_crtc_load_lut(struct drm_crtc *crtc)
|
||||
{
|
||||
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
int i;
|
||||
|
||||
DRM_DEBUG("%d\n", radeon_crtc->crtc_id);
|
||||
WREG32(AVIVO_DC_LUTA_CONTROL + radeon_crtc->crtc_offset, 0);
|
||||
|
||||
WREG32(AVIVO_DC_LUTA_BLACK_OFFSET_BLUE + radeon_crtc->crtc_offset, 0);
|
||||
WREG32(AVIVO_DC_LUTA_BLACK_OFFSET_GREEN + radeon_crtc->crtc_offset, 0);
|
||||
WREG32(AVIVO_DC_LUTA_BLACK_OFFSET_RED + radeon_crtc->crtc_offset, 0);
|
||||
|
||||
WREG32(AVIVO_DC_LUTA_WHITE_OFFSET_BLUE + radeon_crtc->crtc_offset, 0xffff);
|
||||
WREG32(AVIVO_DC_LUTA_WHITE_OFFSET_GREEN + radeon_crtc->crtc_offset, 0xffff);
|
||||
WREG32(AVIVO_DC_LUTA_WHITE_OFFSET_RED + radeon_crtc->crtc_offset, 0xffff);
|
||||
|
||||
WREG32(AVIVO_DC_LUT_RW_SELECT, radeon_crtc->crtc_id);
|
||||
WREG32(AVIVO_DC_LUT_RW_MODE, 0);
|
||||
WREG32(AVIVO_DC_LUT_WRITE_EN_MASK, 0x0000003f);
|
||||
|
||||
WREG8(AVIVO_DC_LUT_RW_INDEX, 0);
|
||||
for (i = 0; i < 256; i++) {
|
||||
WREG32(AVIVO_DC_LUT_30_COLOR,
|
||||
(radeon_crtc->lut_r[i] << 20) |
|
||||
(radeon_crtc->lut_g[i] << 10) |
|
||||
(radeon_crtc->lut_b[i] << 0));
|
||||
}
|
||||
|
||||
WREG32(AVIVO_D1GRPH_LUT_SEL + radeon_crtc->crtc_offset, radeon_crtc->crtc_id);
|
||||
}
|
||||
|
||||
static void legacy_crtc_load_lut(struct drm_crtc *crtc)
|
||||
{
|
||||
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
int i;
|
||||
uint32_t dac2_cntl;
|
||||
|
||||
dac2_cntl = RREG32(RADEON_DAC_CNTL2);
|
||||
if (radeon_crtc->crtc_id == 0)
|
||||
dac2_cntl &= (uint32_t)~RADEON_DAC2_PALETTE_ACC_CTL;
|
||||
else
|
||||
dac2_cntl |= RADEON_DAC2_PALETTE_ACC_CTL;
|
||||
WREG32(RADEON_DAC_CNTL2, dac2_cntl);
|
||||
|
||||
WREG8(RADEON_PALETTE_INDEX, 0);
|
||||
for (i = 0; i < 256; i++) {
|
||||
WREG32(RADEON_PALETTE_30_DATA,
|
||||
(radeon_crtc->lut_r[i] << 20) |
|
||||
(radeon_crtc->lut_g[i] << 10) |
|
||||
(radeon_crtc->lut_b[i] << 0));
|
||||
}
|
||||
}
|
||||
|
||||
void radeon_crtc_load_lut(struct drm_crtc *crtc)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
|
||||
if (!crtc->enabled)
|
||||
return;
|
||||
|
||||
if (ASIC_IS_AVIVO(rdev))
|
||||
avivo_crtc_load_lut(crtc);
|
||||
else
|
||||
legacy_crtc_load_lut(crtc);
|
||||
}
|
||||
|
||||
/** Sets the color ramps on behalf of RandR */
|
||||
void radeon_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
|
||||
u16 blue, int regno)
|
||||
{
|
||||
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
|
||||
|
||||
if (regno == 0)
|
||||
DRM_DEBUG("gamma set %d\n", radeon_crtc->crtc_id);
|
||||
radeon_crtc->lut_r[regno] = red >> 6;
|
||||
radeon_crtc->lut_g[regno] = green >> 6;
|
||||
radeon_crtc->lut_b[regno] = blue >> 6;
|
||||
}
|
||||
|
||||
static void radeon_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
|
||||
u16 *blue, uint32_t size)
|
||||
{
|
||||
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
|
||||
int i, j;
|
||||
|
||||
if (size != 256) {
|
||||
return;
|
||||
}
|
||||
if (crtc->fb == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (crtc->fb->depth == 16) {
|
||||
for (i = 0; i < 64; i++) {
|
||||
if (i <= 31) {
|
||||
for (j = 0; j < 8; j++) {
|
||||
radeon_crtc->lut_r[i * 8 + j] = red[i] >> 6;
|
||||
radeon_crtc->lut_b[i * 8 + j] = blue[i] >> 6;
|
||||
}
|
||||
}
|
||||
for (j = 0; j < 4; j++)
|
||||
radeon_crtc->lut_g[i * 4 + j] = green[i] >> 6;
|
||||
}
|
||||
} else {
|
||||
for (i = 0; i < 256; i++) {
|
||||
radeon_crtc->lut_r[i] = red[i] >> 6;
|
||||
radeon_crtc->lut_g[i] = green[i] >> 6;
|
||||
radeon_crtc->lut_b[i] = blue[i] >> 6;
|
||||
}
|
||||
}
|
||||
|
||||
radeon_crtc_load_lut(crtc);
|
||||
}
|
||||
|
||||
static void radeon_crtc_destroy(struct drm_crtc *crtc)
|
||||
{
|
||||
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
|
||||
|
||||
if (radeon_crtc->mode_set.mode) {
|
||||
drm_mode_destroy(crtc->dev, radeon_crtc->mode_set.mode);
|
||||
}
|
||||
drm_crtc_cleanup(crtc);
|
||||
kfree(radeon_crtc);
|
||||
}
|
||||
|
||||
static const struct drm_crtc_funcs radeon_crtc_funcs = {
|
||||
.cursor_set = radeon_crtc_cursor_set,
|
||||
.cursor_move = radeon_crtc_cursor_move,
|
||||
.gamma_set = radeon_crtc_gamma_set,
|
||||
.set_config = drm_crtc_helper_set_config,
|
||||
.destroy = radeon_crtc_destroy,
|
||||
};
|
||||
|
||||
static void radeon_crtc_init(struct drm_device *dev, int index)
|
||||
{
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
struct radeon_crtc *radeon_crtc;
|
||||
int i;
|
||||
|
||||
radeon_crtc = kzalloc(sizeof(struct radeon_crtc) + (RADEONFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
|
||||
if (radeon_crtc == NULL)
|
||||
return;
|
||||
|
||||
drm_crtc_init(dev, &radeon_crtc->base, &radeon_crtc_funcs);
|
||||
|
||||
drm_mode_crtc_set_gamma_size(&radeon_crtc->base, 256);
|
||||
radeon_crtc->crtc_id = index;
|
||||
|
||||
radeon_crtc->mode_set.crtc = &radeon_crtc->base;
|
||||
radeon_crtc->mode_set.connectors = (struct drm_connector **)(radeon_crtc + 1);
|
||||
radeon_crtc->mode_set.num_connectors = 0;
|
||||
|
||||
for (i = 0; i < 256; i++) {
|
||||
radeon_crtc->lut_r[i] = i << 2;
|
||||
radeon_crtc->lut_g[i] = i << 2;
|
||||
radeon_crtc->lut_b[i] = i << 2;
|
||||
}
|
||||
|
||||
if (rdev->is_atom_bios && (ASIC_IS_AVIVO(rdev) || radeon_r4xx_atom))
|
||||
radeon_atombios_init_crtc(dev, radeon_crtc);
|
||||
else
|
||||
radeon_legacy_init_crtc(dev, radeon_crtc);
|
||||
}
|
||||
|
||||
static const char *encoder_names[34] = {
|
||||
"NONE",
|
||||
"INTERNAL_LVDS",
|
||||
"INTERNAL_TMDS1",
|
||||
"INTERNAL_TMDS2",
|
||||
"INTERNAL_DAC1",
|
||||
"INTERNAL_DAC2",
|
||||
"INTERNAL_SDVOA",
|
||||
"INTERNAL_SDVOB",
|
||||
"SI170B",
|
||||
"CH7303",
|
||||
"CH7301",
|
||||
"INTERNAL_DVO1",
|
||||
"EXTERNAL_SDVOA",
|
||||
"EXTERNAL_SDVOB",
|
||||
"TITFP513",
|
||||
"INTERNAL_LVTM1",
|
||||
"VT1623",
|
||||
"HDMI_SI1930",
|
||||
"HDMI_INTERNAL",
|
||||
"INTERNAL_KLDSCP_TMDS1",
|
||||
"INTERNAL_KLDSCP_DVO1",
|
||||
"INTERNAL_KLDSCP_DAC1",
|
||||
"INTERNAL_KLDSCP_DAC2",
|
||||
"SI178",
|
||||
"MVPU_FPGA",
|
||||
"INTERNAL_DDI",
|
||||
"VT1625",
|
||||
"HDMI_SI1932",
|
||||
"DP_AN9801",
|
||||
"DP_DP501",
|
||||
"INTERNAL_UNIPHY",
|
||||
"INTERNAL_KLDSCP_LVTMA",
|
||||
"INTERNAL_UNIPHY1",
|
||||
"INTERNAL_UNIPHY2",
|
||||
};
|
||||
|
||||
static const char *connector_names[13] = {
|
||||
"Unknown",
|
||||
"VGA",
|
||||
"DVI-I",
|
||||
"DVI-D",
|
||||
"DVI-A",
|
||||
"Composite",
|
||||
"S-video",
|
||||
"LVDS",
|
||||
"Component",
|
||||
"DIN",
|
||||
"DisplayPort",
|
||||
"HDMI-A",
|
||||
"HDMI-B",
|
||||
};
|
||||
|
||||
static void radeon_print_display_setup(struct drm_device *dev)
|
||||
{
|
||||
struct drm_connector *connector;
|
||||
struct radeon_connector *radeon_connector;
|
||||
struct drm_encoder *encoder;
|
||||
struct radeon_encoder *radeon_encoder;
|
||||
uint32_t devices;
|
||||
int i = 0;
|
||||
|
||||
DRM_INFO("Radeon Display Connectors\n");
|
||||
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
|
||||
radeon_connector = to_radeon_connector(connector);
|
||||
DRM_INFO("Connector %d:\n", i);
|
||||
DRM_INFO(" %s\n", connector_names[connector->connector_type]);
|
||||
if (radeon_connector->ddc_bus)
|
||||
DRM_INFO(" DDC: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
|
||||
radeon_connector->ddc_bus->rec.mask_clk_reg,
|
||||
radeon_connector->ddc_bus->rec.mask_data_reg,
|
||||
radeon_connector->ddc_bus->rec.a_clk_reg,
|
||||
radeon_connector->ddc_bus->rec.a_data_reg,
|
||||
radeon_connector->ddc_bus->rec.put_clk_reg,
|
||||
radeon_connector->ddc_bus->rec.put_data_reg,
|
||||
radeon_connector->ddc_bus->rec.get_clk_reg,
|
||||
radeon_connector->ddc_bus->rec.get_data_reg);
|
||||
DRM_INFO(" Encoders:\n");
|
||||
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
|
||||
radeon_encoder = to_radeon_encoder(encoder);
|
||||
devices = radeon_encoder->devices & radeon_connector->devices;
|
||||
if (devices) {
|
||||
if (devices & ATOM_DEVICE_CRT1_SUPPORT)
|
||||
DRM_INFO(" CRT1: %s\n", encoder_names[radeon_encoder->encoder_id]);
|
||||
if (devices & ATOM_DEVICE_CRT2_SUPPORT)
|
||||
DRM_INFO(" CRT2: %s\n", encoder_names[radeon_encoder->encoder_id]);
|
||||
if (devices & ATOM_DEVICE_LCD1_SUPPORT)
|
||||
DRM_INFO(" LCD1: %s\n", encoder_names[radeon_encoder->encoder_id]);
|
||||
if (devices & ATOM_DEVICE_DFP1_SUPPORT)
|
||||
DRM_INFO(" DFP1: %s\n", encoder_names[radeon_encoder->encoder_id]);
|
||||
if (devices & ATOM_DEVICE_DFP2_SUPPORT)
|
||||
DRM_INFO(" DFP2: %s\n", encoder_names[radeon_encoder->encoder_id]);
|
||||
if (devices & ATOM_DEVICE_DFP3_SUPPORT)
|
||||
DRM_INFO(" DFP3: %s\n", encoder_names[radeon_encoder->encoder_id]);
|
||||
if (devices & ATOM_DEVICE_DFP4_SUPPORT)
|
||||
DRM_INFO(" DFP4: %s\n", encoder_names[radeon_encoder->encoder_id]);
|
||||
if (devices & ATOM_DEVICE_DFP5_SUPPORT)
|
||||
DRM_INFO(" DFP5: %s\n", encoder_names[radeon_encoder->encoder_id]);
|
||||
if (devices & ATOM_DEVICE_TV1_SUPPORT)
|
||||
DRM_INFO(" TV1: %s\n", encoder_names[radeon_encoder->encoder_id]);
|
||||
if (devices & ATOM_DEVICE_CV_SUPPORT)
|
||||
DRM_INFO(" CV: %s\n", encoder_names[radeon_encoder->encoder_id]);
|
||||
}
|
||||
}
|
||||
i++;
|
||||
}
|
||||
}
|
||||
|
||||
bool radeon_setup_enc_conn(struct drm_device *dev)
|
||||
{
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
struct drm_connector *drm_connector;
|
||||
bool ret = false;
|
||||
|
||||
if (rdev->bios) {
|
||||
if (rdev->is_atom_bios) {
|
||||
if (rdev->family >= CHIP_R600)
|
||||
ret = radeon_get_atom_connector_info_from_object_table(dev);
|
||||
else
|
||||
ret = radeon_get_atom_connector_info_from_supported_devices_table(dev);
|
||||
} else
|
||||
ret = radeon_get_legacy_connector_info_from_bios(dev);
|
||||
} else {
|
||||
if (!ASIC_IS_AVIVO(rdev))
|
||||
ret = radeon_get_legacy_connector_info_from_table(dev);
|
||||
}
|
||||
if (ret) {
|
||||
radeon_print_display_setup(dev);
|
||||
list_for_each_entry(drm_connector, &dev->mode_config.connector_list, head)
|
||||
radeon_ddc_dump(drm_connector);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
|
||||
{
|
||||
struct edid *edid;
|
||||
int ret = 0;
|
||||
|
||||
if (!radeon_connector->ddc_bus)
|
||||
return -1;
|
||||
radeon_i2c_do_lock(radeon_connector, 1);
|
||||
edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
|
||||
radeon_i2c_do_lock(radeon_connector, 0);
|
||||
if (edid) {
|
||||
/* update digital bits here */
|
||||
if (edid->digital)
|
||||
radeon_connector->use_digital = 1;
|
||||
else
|
||||
radeon_connector->use_digital = 0;
|
||||
drm_mode_connector_update_edid_property(&radeon_connector->base, edid);
|
||||
ret = drm_add_edid_modes(&radeon_connector->base, edid);
|
||||
kfree(edid);
|
||||
return ret;
|
||||
}
|
||||
drm_mode_connector_update_edid_property(&radeon_connector->base, NULL);
|
||||
return -1;
|
||||
}
|
||||
|
||||
static int radeon_ddc_dump(struct drm_connector *connector)
|
||||
{
|
||||
struct edid *edid;
|
||||
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
|
||||
int ret = 0;
|
||||
|
||||
if (!radeon_connector->ddc_bus)
|
||||
return -1;
|
||||
radeon_i2c_do_lock(radeon_connector, 1);
|
||||
edid = drm_get_edid(connector, &radeon_connector->ddc_bus->adapter);
|
||||
radeon_i2c_do_lock(radeon_connector, 0);
|
||||
if (edid) {
|
||||
kfree(edid);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline uint32_t radeon_div(uint64_t n, uint32_t d)
|
||||
{
|
||||
uint64_t mod;
|
||||
|
||||
n += d / 2;
|
||||
|
||||
mod = do_div(n, d);
|
||||
return n;
|
||||
}
|
||||
|
||||
void radeon_compute_pll(struct radeon_pll *pll,
|
||||
uint64_t freq,
|
||||
uint32_t *dot_clock_p,
|
||||
uint32_t *fb_div_p,
|
||||
uint32_t *frac_fb_div_p,
|
||||
uint32_t *ref_div_p,
|
||||
uint32_t *post_div_p,
|
||||
int flags)
|
||||
{
|
||||
uint32_t min_ref_div = pll->min_ref_div;
|
||||
uint32_t max_ref_div = pll->max_ref_div;
|
||||
uint32_t min_fractional_feed_div = 0;
|
||||
uint32_t max_fractional_feed_div = 0;
|
||||
uint32_t best_vco = pll->best_vco;
|
||||
uint32_t best_post_div = 1;
|
||||
uint32_t best_ref_div = 1;
|
||||
uint32_t best_feedback_div = 1;
|
||||
uint32_t best_frac_feedback_div = 0;
|
||||
uint32_t best_freq = -1;
|
||||
uint32_t best_error = 0xffffffff;
|
||||
uint32_t best_vco_diff = 1;
|
||||
uint32_t post_div;
|
||||
|
||||
DRM_DEBUG("PLL freq %llu %u %u\n", freq, pll->min_ref_div, pll->max_ref_div);
|
||||
freq = freq * 1000;
|
||||
|
||||
if (flags & RADEON_PLL_USE_REF_DIV)
|
||||
min_ref_div = max_ref_div = pll->reference_div;
|
||||
else {
|
||||
while (min_ref_div < max_ref_div-1) {
|
||||
uint32_t mid = (min_ref_div + max_ref_div) / 2;
|
||||
uint32_t pll_in = pll->reference_freq / mid;
|
||||
if (pll_in < pll->pll_in_min)
|
||||
max_ref_div = mid;
|
||||
else if (pll_in > pll->pll_in_max)
|
||||
min_ref_div = mid;
|
||||
else
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (flags & RADEON_PLL_USE_FRAC_FB_DIV) {
|
||||
min_fractional_feed_div = pll->min_frac_feedback_div;
|
||||
max_fractional_feed_div = pll->max_frac_feedback_div;
|
||||
}
|
||||
|
||||
for (post_div = pll->min_post_div; post_div <= pll->max_post_div; ++post_div) {
|
||||
uint32_t ref_div;
|
||||
|
||||
if ((flags & RADEON_PLL_NO_ODD_POST_DIV) && (post_div & 1))
|
||||
continue;
|
||||
|
||||
/* legacy radeons only have a few post_divs */
|
||||
if (flags & RADEON_PLL_LEGACY) {
|
||||
if ((post_div == 5) ||
|
||||
(post_div == 7) ||
|
||||
(post_div == 9) ||
|
||||
(post_div == 10) ||
|
||||
(post_div == 11) ||
|
||||
(post_div == 13) ||
|
||||
(post_div == 14) ||
|
||||
(post_div == 15))
|
||||
continue;
|
||||
}
|
||||
|
||||
for (ref_div = min_ref_div; ref_div <= max_ref_div; ++ref_div) {
|
||||
uint32_t feedback_div, current_freq = 0, error, vco_diff;
|
||||
uint32_t pll_in = pll->reference_freq / ref_div;
|
||||
uint32_t min_feed_div = pll->min_feedback_div;
|
||||
uint32_t max_feed_div = pll->max_feedback_div + 1;
|
||||
|
||||
if (pll_in < pll->pll_in_min || pll_in > pll->pll_in_max)
|
||||
continue;
|
||||
|
||||
while (min_feed_div < max_feed_div) {
|
||||
uint32_t vco;
|
||||
uint32_t min_frac_feed_div = min_fractional_feed_div;
|
||||
uint32_t max_frac_feed_div = max_fractional_feed_div + 1;
|
||||
uint32_t frac_feedback_div;
|
||||
uint64_t tmp;
|
||||
|
||||
feedback_div = (min_feed_div + max_feed_div) / 2;
|
||||
|
||||
tmp = (uint64_t)pll->reference_freq * feedback_div;
|
||||
vco = radeon_div(tmp, ref_div);
|
||||
|
||||
if (vco < pll->pll_out_min) {
|
||||
min_feed_div = feedback_div + 1;
|
||||
continue;
|
||||
} else if (vco > pll->pll_out_max) {
|
||||
max_feed_div = feedback_div;
|
||||
continue;
|
||||
}
|
||||
|
||||
while (min_frac_feed_div < max_frac_feed_div) {
|
||||
frac_feedback_div = (min_frac_feed_div + max_frac_feed_div) / 2;
|
||||
tmp = (uint64_t)pll->reference_freq * 10000 * feedback_div;
|
||||
tmp += (uint64_t)pll->reference_freq * 1000 * frac_feedback_div;
|
||||
current_freq = radeon_div(tmp, ref_div * post_div);
|
||||
|
||||
error = abs(current_freq - freq);
|
||||
vco_diff = abs(vco - best_vco);
|
||||
|
||||
if ((best_vco == 0 && error < best_error) ||
|
||||
(best_vco != 0 &&
|
||||
(error < best_error - 100 ||
|
||||
(abs(error - best_error) < 100 && vco_diff < best_vco_diff)))) {
|
||||
best_post_div = post_div;
|
||||
best_ref_div = ref_div;
|
||||
best_feedback_div = feedback_div;
|
||||
best_frac_feedback_div = frac_feedback_div;
|
||||
best_freq = current_freq;
|
||||
best_error = error;
|
||||
best_vco_diff = vco_diff;
|
||||
} else if (current_freq == freq) {
|
||||
if (best_freq == -1) {
|
||||
best_post_div = post_div;
|
||||
best_ref_div = ref_div;
|
||||
best_feedback_div = feedback_div;
|
||||
best_frac_feedback_div = frac_feedback_div;
|
||||
best_freq = current_freq;
|
||||
best_error = error;
|
||||
best_vco_diff = vco_diff;
|
||||
} else if (((flags & RADEON_PLL_PREFER_LOW_REF_DIV) && (ref_div < best_ref_div)) ||
|
||||
((flags & RADEON_PLL_PREFER_HIGH_REF_DIV) && (ref_div > best_ref_div)) ||
|
||||
((flags & RADEON_PLL_PREFER_LOW_FB_DIV) && (feedback_div < best_feedback_div)) ||
|
||||
((flags & RADEON_PLL_PREFER_HIGH_FB_DIV) && (feedback_div > best_feedback_div)) ||
|
||||
((flags & RADEON_PLL_PREFER_LOW_POST_DIV) && (post_div < best_post_div)) ||
|
||||
((flags & RADEON_PLL_PREFER_HIGH_POST_DIV) && (post_div > best_post_div))) {
|
||||
best_post_div = post_div;
|
||||
best_ref_div = ref_div;
|
||||
best_feedback_div = feedback_div;
|
||||
best_frac_feedback_div = frac_feedback_div;
|
||||
best_freq = current_freq;
|
||||
best_error = error;
|
||||
best_vco_diff = vco_diff;
|
||||
}
|
||||
}
|
||||
if (current_freq < freq)
|
||||
min_frac_feed_div = frac_feedback_div + 1;
|
||||
else
|
||||
max_frac_feed_div = frac_feedback_div;
|
||||
}
|
||||
if (current_freq < freq)
|
||||
min_feed_div = feedback_div + 1;
|
||||
else
|
||||
max_feed_div = feedback_div;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
*dot_clock_p = best_freq / 10000;
|
||||
*fb_div_p = best_feedback_div;
|
||||
*frac_fb_div_p = best_frac_feedback_div;
|
||||
*ref_div_p = best_ref_div;
|
||||
*post_div_p = best_post_div;
|
||||
}
|
||||
|
||||
static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb)
|
||||
{
|
||||
struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb);
|
||||
struct drm_device *dev = fb->dev;
|
||||
|
||||
if (fb->fbdev)
|
||||
radeonfb_remove(dev, fb);
|
||||
|
||||
if (radeon_fb->obj) {
|
||||
radeon_gem_object_unpin(radeon_fb->obj);
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
drm_gem_object_unreference(radeon_fb->obj);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
}
|
||||
drm_framebuffer_cleanup(fb);
|
||||
kfree(radeon_fb);
|
||||
}
|
||||
|
||||
static int radeon_user_framebuffer_create_handle(struct drm_framebuffer *fb,
|
||||
struct drm_file *file_priv,
|
||||
unsigned int *handle)
|
||||
{
|
||||
struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb);
|
||||
|
||||
return drm_gem_handle_create(file_priv, radeon_fb->obj, handle);
|
||||
}
|
||||
|
||||
static const struct drm_framebuffer_funcs radeon_fb_funcs = {
|
||||
.destroy = radeon_user_framebuffer_destroy,
|
||||
.create_handle = radeon_user_framebuffer_create_handle,
|
||||
};
|
||||
|
||||
struct drm_framebuffer *
|
||||
radeon_framebuffer_create(struct drm_device *dev,
|
||||
struct drm_mode_fb_cmd *mode_cmd,
|
||||
struct drm_gem_object *obj)
|
||||
{
|
||||
struct radeon_framebuffer *radeon_fb;
|
||||
|
||||
radeon_fb = kzalloc(sizeof(*radeon_fb), GFP_KERNEL);
|
||||
if (radeon_fb == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
drm_framebuffer_init(dev, &radeon_fb->base, &radeon_fb_funcs);
|
||||
drm_helper_mode_fill_fb_struct(&radeon_fb->base, mode_cmd);
|
||||
radeon_fb->obj = obj;
|
||||
return &radeon_fb->base;
|
||||
}
|
||||
|
||||
static struct drm_framebuffer *
|
||||
radeon_user_framebuffer_create(struct drm_device *dev,
|
||||
struct drm_file *file_priv,
|
||||
struct drm_mode_fb_cmd *mode_cmd)
|
||||
{
|
||||
struct drm_gem_object *obj;
|
||||
|
||||
obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handle);
|
||||
|
||||
return radeon_framebuffer_create(dev, mode_cmd, obj);
|
||||
}
|
||||
|
||||
static const struct drm_mode_config_funcs radeon_mode_funcs = {
|
||||
.fb_create = radeon_user_framebuffer_create,
|
||||
.fb_changed = radeonfb_probe,
|
||||
};
|
||||
|
||||
int radeon_modeset_init(struct radeon_device *rdev)
|
||||
{
|
||||
int num_crtc = 2, i;
|
||||
int ret;
|
||||
|
||||
drm_mode_config_init(rdev->ddev);
|
||||
rdev->mode_info.mode_config_initialized = true;
|
||||
|
||||
rdev->ddev->mode_config.funcs = (void *)&radeon_mode_funcs;
|
||||
|
||||
if (ASIC_IS_AVIVO(rdev)) {
|
||||
rdev->ddev->mode_config.max_width = 8192;
|
||||
rdev->ddev->mode_config.max_height = 8192;
|
||||
} else {
|
||||
rdev->ddev->mode_config.max_width = 4096;
|
||||
rdev->ddev->mode_config.max_height = 4096;
|
||||
}
|
||||
|
||||
rdev->ddev->mode_config.fb_base = rdev->mc.aper_base;
|
||||
|
||||
/* allocate crtcs - TODO single crtc */
|
||||
for (i = 0; i < num_crtc; i++) {
|
||||
radeon_crtc_init(rdev->ddev, i);
|
||||
}
|
||||
|
||||
/* okay we should have all the bios connectors */
|
||||
ret = radeon_setup_enc_conn(rdev->ddev);
|
||||
if (!ret) {
|
||||
return ret;
|
||||
}
|
||||
drm_helper_initial_config(rdev->ddev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void radeon_modeset_fini(struct radeon_device *rdev)
|
||||
{
|
||||
if (rdev->mode_info.mode_config_initialized) {
|
||||
drm_mode_config_cleanup(rdev->ddev);
|
||||
rdev->mode_info.mode_config_initialized = false;
|
||||
}
|
||||
}
|
||||
|
||||
void radeon_init_disp_bandwidth(struct drm_device *dev)
|
||||
{
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
struct drm_display_mode *modes[2];
|
||||
int pixel_bytes[2];
|
||||
struct drm_crtc *crtc;
|
||||
|
||||
pixel_bytes[0] = pixel_bytes[1] = 0;
|
||||
modes[0] = modes[1] = NULL;
|
||||
|
||||
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
|
||||
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
|
||||
|
||||
if (crtc->enabled && crtc->fb) {
|
||||
modes[radeon_crtc->crtc_id] = &crtc->mode;
|
||||
pixel_bytes[radeon_crtc->crtc_id] = crtc->fb->bits_per_pixel / 8;
|
||||
}
|
||||
}
|
||||
|
||||
if (ASIC_IS_AVIVO(rdev)) {
|
||||
radeon_init_disp_bw_avivo(dev,
|
||||
modes[0],
|
||||
pixel_bytes[0],
|
||||
modes[1],
|
||||
pixel_bytes[1]);
|
||||
} else {
|
||||
radeon_init_disp_bw_legacy(dev,
|
||||
modes[0],
|
||||
pixel_bytes[0],
|
||||
modes[1],
|
||||
pixel_bytes[1]);
|
||||
}
|
||||
}
|
|
@ -35,12 +35,92 @@
|
|||
#include "radeon_drv.h"
|
||||
|
||||
#include "drm_pciids.h"
|
||||
#include <linux/console.h>
|
||||
|
||||
|
||||
#if defined(CONFIG_DRM_RADEON_KMS)
|
||||
/*
|
||||
* KMS wrapper.
|
||||
*/
|
||||
#define KMS_DRIVER_MAJOR 2
|
||||
#define KMS_DRIVER_MINOR 0
|
||||
#define KMS_DRIVER_PATCHLEVEL 0
|
||||
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
|
||||
int radeon_driver_unload_kms(struct drm_device *dev);
|
||||
int radeon_driver_firstopen_kms(struct drm_device *dev);
|
||||
void radeon_driver_lastclose_kms(struct drm_device *dev);
|
||||
int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv);
|
||||
void radeon_driver_postclose_kms(struct drm_device *dev,
|
||||
struct drm_file *file_priv);
|
||||
void radeon_driver_preclose_kms(struct drm_device *dev,
|
||||
struct drm_file *file_priv);
|
||||
int radeon_suspend_kms(struct drm_device *dev, pm_message_t state);
|
||||
int radeon_resume_kms(struct drm_device *dev);
|
||||
u32 radeon_get_vblank_counter_kms(struct drm_device *dev, int crtc);
|
||||
int radeon_enable_vblank_kms(struct drm_device *dev, int crtc);
|
||||
void radeon_disable_vblank_kms(struct drm_device *dev, int crtc);
|
||||
void radeon_driver_irq_preinstall_kms(struct drm_device *dev);
|
||||
int radeon_driver_irq_postinstall_kms(struct drm_device *dev);
|
||||
void radeon_driver_irq_uninstall_kms(struct drm_device *dev);
|
||||
irqreturn_t radeon_driver_irq_handler_kms(DRM_IRQ_ARGS);
|
||||
int radeon_master_create_kms(struct drm_device *dev, struct drm_master *master);
|
||||
void radeon_master_destroy_kms(struct drm_device *dev,
|
||||
struct drm_master *master);
|
||||
int radeon_dma_ioctl_kms(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
int radeon_gem_object_init(struct drm_gem_object *obj);
|
||||
void radeon_gem_object_free(struct drm_gem_object *obj);
|
||||
extern struct drm_ioctl_desc radeon_ioctls_kms[];
|
||||
extern int radeon_max_kms_ioctl;
|
||||
int radeon_mmap(struct file *filp, struct vm_area_struct *vma);
|
||||
#if defined(CONFIG_DEBUG_FS)
|
||||
int radeon_debugfs_init(struct drm_minor *minor);
|
||||
void radeon_debugfs_cleanup(struct drm_minor *minor);
|
||||
#endif
|
||||
#endif
|
||||
|
||||
|
||||
int radeon_no_wb;
|
||||
#if defined(CONFIG_DRM_RADEON_KMS)
|
||||
int radeon_modeset = -1;
|
||||
int radeon_dynclks = -1;
|
||||
int radeon_r4xx_atom = 0;
|
||||
int radeon_agpmode = 0;
|
||||
int radeon_vram_limit = 0;
|
||||
int radeon_gart_size = 512; /* default gart size */
|
||||
int radeon_benchmarking = 0;
|
||||
int radeon_connector_table = 0;
|
||||
#endif
|
||||
|
||||
MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers");
|
||||
module_param_named(no_wb, radeon_no_wb, int, 0444);
|
||||
|
||||
#if defined(CONFIG_DRM_RADEON_KMS)
|
||||
MODULE_PARM_DESC(modeset, "Disable/Enable modesetting");
|
||||
module_param_named(modeset, radeon_modeset, int, 0400);
|
||||
|
||||
MODULE_PARM_DESC(dynclks, "Disable/Enable dynamic clocks");
|
||||
module_param_named(dynclks, radeon_dynclks, int, 0444);
|
||||
|
||||
MODULE_PARM_DESC(r4xx_atom, "Enable ATOMBIOS modesetting for R4xx");
|
||||
module_param_named(r4xx_atom, radeon_r4xx_atom, int, 0444);
|
||||
|
||||
MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing");
|
||||
module_param_named(vramlimit, radeon_vram_limit, int, 0600);
|
||||
|
||||
MODULE_PARM_DESC(agpmode, "AGP Mode (-1 == PCI)");
|
||||
module_param_named(agpmode, radeon_agpmode, int, 0444);
|
||||
|
||||
MODULE_PARM_DESC(gartsize, "Size of PCIE/IGP gart to setup in megabytes (32,64, etc)\n");
|
||||
module_param_named(gartsize, radeon_gart_size, int, 0600);
|
||||
|
||||
MODULE_PARM_DESC(benchmark, "Run benchmark");
|
||||
module_param_named(benchmark, radeon_benchmarking, int, 0444);
|
||||
|
||||
MODULE_PARM_DESC(connector_table, "Force connector table");
|
||||
module_param_named(connector_table, radeon_connector_table, int, 0444);
|
||||
#endif
|
||||
|
||||
static int radeon_suspend(struct drm_device *dev, pm_message_t state)
|
||||
{
|
||||
drm_radeon_private_t *dev_priv = dev->dev_private;
|
||||
|
@ -73,7 +153,11 @@ static struct pci_device_id pciidlist[] = {
|
|||
radeon_PCI_IDS
|
||||
};
|
||||
|
||||
static struct drm_driver driver = {
|
||||
#if defined(CONFIG_DRM_RADEON_KMS)
|
||||
MODULE_DEVICE_TABLE(pci, pciidlist);
|
||||
#endif
|
||||
|
||||
static struct drm_driver driver_old = {
|
||||
.driver_features =
|
||||
DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG |
|
||||
DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED,
|
||||
|
@ -127,18 +211,141 @@ static struct drm_driver driver = {
|
|||
.patchlevel = DRIVER_PATCHLEVEL,
|
||||
};
|
||||
|
||||
#if defined(CONFIG_DRM_RADEON_KMS)
|
||||
static struct drm_driver kms_driver;
|
||||
|
||||
static int __devinit
|
||||
radeon_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
{
|
||||
return drm_get_dev(pdev, ent, &kms_driver);
|
||||
}
|
||||
|
||||
static void
|
||||
radeon_pci_remove(struct pci_dev *pdev)
|
||||
{
|
||||
struct drm_device *dev = pci_get_drvdata(pdev);
|
||||
|
||||
drm_put_dev(dev);
|
||||
}
|
||||
|
||||
static int
|
||||
radeon_pci_suspend(struct pci_dev *pdev, pm_message_t state)
|
||||
{
|
||||
struct drm_device *dev = pci_get_drvdata(pdev);
|
||||
return radeon_suspend_kms(dev, state);
|
||||
}
|
||||
|
||||
static int
|
||||
radeon_pci_resume(struct pci_dev *pdev)
|
||||
{
|
||||
struct drm_device *dev = pci_get_drvdata(pdev);
|
||||
return radeon_resume_kms(dev);
|
||||
}
|
||||
|
||||
static struct drm_driver kms_driver = {
|
||||
.driver_features =
|
||||
DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG |
|
||||
DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED | DRIVER_GEM,
|
||||
.dev_priv_size = 0,
|
||||
.load = radeon_driver_load_kms,
|
||||
.firstopen = radeon_driver_firstopen_kms,
|
||||
.open = radeon_driver_open_kms,
|
||||
.preclose = radeon_driver_preclose_kms,
|
||||
.postclose = radeon_driver_postclose_kms,
|
||||
.lastclose = radeon_driver_lastclose_kms,
|
||||
.unload = radeon_driver_unload_kms,
|
||||
.suspend = radeon_suspend_kms,
|
||||
.resume = radeon_resume_kms,
|
||||
.get_vblank_counter = radeon_get_vblank_counter_kms,
|
||||
.enable_vblank = radeon_enable_vblank_kms,
|
||||
.disable_vblank = radeon_disable_vblank_kms,
|
||||
.master_create = radeon_master_create_kms,
|
||||
.master_destroy = radeon_master_destroy_kms,
|
||||
#if defined(CONFIG_DEBUG_FS)
|
||||
.debugfs_init = radeon_debugfs_init,
|
||||
.debugfs_cleanup = radeon_debugfs_cleanup,
|
||||
#endif
|
||||
.irq_preinstall = radeon_driver_irq_preinstall_kms,
|
||||
.irq_postinstall = radeon_driver_irq_postinstall_kms,
|
||||
.irq_uninstall = radeon_driver_irq_uninstall_kms,
|
||||
.irq_handler = radeon_driver_irq_handler_kms,
|
||||
.reclaim_buffers = drm_core_reclaim_buffers,
|
||||
.get_map_ofs = drm_core_get_map_ofs,
|
||||
.get_reg_ofs = drm_core_get_reg_ofs,
|
||||
.ioctls = radeon_ioctls_kms,
|
||||
.gem_init_object = radeon_gem_object_init,
|
||||
.gem_free_object = radeon_gem_object_free,
|
||||
.dma_ioctl = radeon_dma_ioctl_kms,
|
||||
.fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = drm_open,
|
||||
.release = drm_release,
|
||||
.ioctl = drm_ioctl,
|
||||
.mmap = radeon_mmap,
|
||||
.poll = drm_poll,
|
||||
.fasync = drm_fasync,
|
||||
#ifdef CONFIG_COMPAT
|
||||
.compat_ioctl = NULL,
|
||||
#endif
|
||||
},
|
||||
|
||||
.pci_driver = {
|
||||
.name = DRIVER_NAME,
|
||||
.id_table = pciidlist,
|
||||
.probe = radeon_pci_probe,
|
||||
.remove = radeon_pci_remove,
|
||||
.suspend = radeon_pci_suspend,
|
||||
.resume = radeon_pci_resume,
|
||||
},
|
||||
|
||||
.name = DRIVER_NAME,
|
||||
.desc = DRIVER_DESC,
|
||||
.date = DRIVER_DATE,
|
||||
.major = KMS_DRIVER_MAJOR,
|
||||
.minor = KMS_DRIVER_MINOR,
|
||||
.patchlevel = KMS_DRIVER_PATCHLEVEL,
|
||||
};
|
||||
#endif
|
||||
|
||||
static struct drm_driver *driver;
|
||||
|
||||
static int __init radeon_init(void)
|
||||
{
|
||||
driver.num_ioctls = radeon_max_ioctl;
|
||||
return drm_init(&driver);
|
||||
driver = &driver_old;
|
||||
driver->num_ioctls = radeon_max_ioctl;
|
||||
#if defined(CONFIG_DRM_RADEON_KMS) && defined(CONFIG_X86)
|
||||
/* if enabled by default */
|
||||
if (radeon_modeset == -1) {
|
||||
DRM_INFO("radeon default to kernel modesetting.\n");
|
||||
radeon_modeset = 1;
|
||||
}
|
||||
if (radeon_modeset == 1) {
|
||||
DRM_INFO("radeon kernel modesetting enabled.\n");
|
||||
driver = &kms_driver;
|
||||
driver->driver_features |= DRIVER_MODESET;
|
||||
driver->num_ioctls = radeon_max_kms_ioctl;
|
||||
}
|
||||
|
||||
/* if the vga console setting is enabled still
|
||||
* let modprobe override it */
|
||||
#ifdef CONFIG_VGA_CONSOLE
|
||||
if (vgacon_text_force() && radeon_modeset == -1) {
|
||||
DRM_INFO("VGACON disable radeon kernel modesetting.\n");
|
||||
driver = &driver_old;
|
||||
driver->driver_features &= ~DRIVER_MODESET;
|
||||
radeon_modeset = 0;
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
return drm_init(driver);
|
||||
}
|
||||
|
||||
static void __exit radeon_exit(void)
|
||||
{
|
||||
drm_exit(&driver);
|
||||
drm_exit(driver);
|
||||
}
|
||||
|
||||
module_init(radeon_init);
|
||||
late_initcall(radeon_init);
|
||||
module_exit(radeon_exit);
|
||||
|
||||
MODULE_AUTHOR(DRIVER_AUTHOR);
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,825 @@
|
|||
/*
|
||||
* Copyright © 2007 David Airlie
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors:
|
||||
* David Airlie
|
||||
*/
|
||||
/*
|
||||
* Modularization
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/tty.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/fb.h>
|
||||
#include <linux/init.h>
|
||||
|
||||
#include "drmP.h"
|
||||
#include "drm.h"
|
||||
#include "drm_crtc.h"
|
||||
#include "drm_crtc_helper.h"
|
||||
#include "radeon_drm.h"
|
||||
#include "radeon.h"
|
||||
|
||||
struct radeon_fb_device {
|
||||
struct radeon_device *rdev;
|
||||
struct drm_display_mode *mode;
|
||||
struct radeon_framebuffer *rfb;
|
||||
int crtc_count;
|
||||
/* crtc currently bound to this */
|
||||
uint32_t crtc_ids[2];
|
||||
};
|
||||
|
||||
static int radeonfb_setcolreg(unsigned regno,
|
||||
unsigned red,
|
||||
unsigned green,
|
||||
unsigned blue,
|
||||
unsigned transp,
|
||||
struct fb_info *info)
|
||||
{
|
||||
struct radeon_fb_device *rfbdev = info->par;
|
||||
struct drm_device *dev = rfbdev->rdev->ddev;
|
||||
struct drm_crtc *crtc;
|
||||
int i;
|
||||
|
||||
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
|
||||
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
|
||||
struct drm_mode_set *modeset = &radeon_crtc->mode_set;
|
||||
struct drm_framebuffer *fb = modeset->fb;
|
||||
|
||||
for (i = 0; i < rfbdev->crtc_count; i++) {
|
||||
if (crtc->base.id == rfbdev->crtc_ids[i]) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (i == rfbdev->crtc_count) {
|
||||
continue;
|
||||
}
|
||||
if (regno > 255) {
|
||||
return 1;
|
||||
}
|
||||
if (fb->depth == 8) {
|
||||
radeon_crtc_fb_gamma_set(crtc, red, green, blue, regno);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (regno < 16) {
|
||||
switch (fb->depth) {
|
||||
case 15:
|
||||
fb->pseudo_palette[regno] = ((red & 0xf800) >> 1) |
|
||||
((green & 0xf800) >> 6) |
|
||||
((blue & 0xf800) >> 11);
|
||||
break;
|
||||
case 16:
|
||||
fb->pseudo_palette[regno] = (red & 0xf800) |
|
||||
((green & 0xfc00) >> 5) |
|
||||
((blue & 0xf800) >> 11);
|
||||
break;
|
||||
case 24:
|
||||
case 32:
|
||||
fb->pseudo_palette[regno] = ((red & 0xff00) << 8) |
|
||||
(green & 0xff00) |
|
||||
((blue & 0xff00) >> 8);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int radeonfb_check_var(struct fb_var_screeninfo *var,
|
||||
struct fb_info *info)
|
||||
{
|
||||
struct radeon_fb_device *rfbdev = info->par;
|
||||
struct radeon_framebuffer *rfb = rfbdev->rfb;
|
||||
struct drm_framebuffer *fb = &rfb->base;
|
||||
int depth;
|
||||
|
||||
if (var->pixclock == -1 || !var->pixclock) {
|
||||
return -EINVAL;
|
||||
}
|
||||
/* Need to resize the fb object !!! */
|
||||
if (var->xres > fb->width || var->yres > fb->height) {
|
||||
DRM_ERROR("Requested width/height is greater than current fb "
|
||||
"object %dx%d > %dx%d\n", var->xres, var->yres,
|
||||
fb->width, fb->height);
|
||||
DRM_ERROR("Need resizing code.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
switch (var->bits_per_pixel) {
|
||||
case 16:
|
||||
depth = (var->green.length == 6) ? 16 : 15;
|
||||
break;
|
||||
case 32:
|
||||
depth = (var->transp.length > 0) ? 32 : 24;
|
||||
break;
|
||||
default:
|
||||
depth = var->bits_per_pixel;
|
||||
break;
|
||||
}
|
||||
|
||||
switch (depth) {
|
||||
case 8:
|
||||
var->red.offset = 0;
|
||||
var->green.offset = 0;
|
||||
var->blue.offset = 0;
|
||||
var->red.length = 8;
|
||||
var->green.length = 8;
|
||||
var->blue.length = 8;
|
||||
var->transp.length = 0;
|
||||
var->transp.offset = 0;
|
||||
break;
|
||||
case 15:
|
||||
var->red.offset = 10;
|
||||
var->green.offset = 5;
|
||||
var->blue.offset = 0;
|
||||
var->red.length = 5;
|
||||
var->green.length = 5;
|
||||
var->blue.length = 5;
|
||||
var->transp.length = 1;
|
||||
var->transp.offset = 15;
|
||||
break;
|
||||
case 16:
|
||||
var->red.offset = 11;
|
||||
var->green.offset = 5;
|
||||
var->blue.offset = 0;
|
||||
var->red.length = 5;
|
||||
var->green.length = 6;
|
||||
var->blue.length = 5;
|
||||
var->transp.length = 0;
|
||||
var->transp.offset = 0;
|
||||
break;
|
||||
case 24:
|
||||
var->red.offset = 16;
|
||||
var->green.offset = 8;
|
||||
var->blue.offset = 0;
|
||||
var->red.length = 8;
|
||||
var->green.length = 8;
|
||||
var->blue.length = 8;
|
||||
var->transp.length = 0;
|
||||
var->transp.offset = 0;
|
||||
break;
|
||||
case 32:
|
||||
var->red.offset = 16;
|
||||
var->green.offset = 8;
|
||||
var->blue.offset = 0;
|
||||
var->red.length = 8;
|
||||
var->green.length = 8;
|
||||
var->blue.length = 8;
|
||||
var->transp.length = 8;
|
||||
var->transp.offset = 24;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* this will let fbcon do the mode init */
|
||||
static int radeonfb_set_par(struct fb_info *info)
|
||||
{
|
||||
struct radeon_fb_device *rfbdev = info->par;
|
||||
struct drm_device *dev = rfbdev->rdev->ddev;
|
||||
struct fb_var_screeninfo *var = &info->var;
|
||||
struct drm_crtc *crtc;
|
||||
int ret;
|
||||
int i;
|
||||
|
||||
if (var->pixclock != -1) {
|
||||
DRM_ERROR("PIXEL CLCOK SET\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
|
||||
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
|
||||
|
||||
for (i = 0; i < rfbdev->crtc_count; i++) {
|
||||
if (crtc->base.id == rfbdev->crtc_ids[i]) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (i == rfbdev->crtc_count) {
|
||||
continue;
|
||||
}
|
||||
if (crtc->fb == radeon_crtc->mode_set.fb) {
|
||||
mutex_lock(&dev->mode_config.mutex);
|
||||
ret = crtc->funcs->set_config(&radeon_crtc->mode_set);
|
||||
mutex_unlock(&dev->mode_config.mutex);
|
||||
if (ret) {
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int radeonfb_pan_display(struct fb_var_screeninfo *var,
|
||||
struct fb_info *info)
|
||||
{
|
||||
struct radeon_fb_device *rfbdev = info->par;
|
||||
struct drm_device *dev = rfbdev->rdev->ddev;
|
||||
struct drm_mode_set *modeset;
|
||||
struct drm_crtc *crtc;
|
||||
struct radeon_crtc *radeon_crtc;
|
||||
int ret = 0;
|
||||
int i;
|
||||
|
||||
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
|
||||
for (i = 0; i < rfbdev->crtc_count; i++) {
|
||||
if (crtc->base.id == rfbdev->crtc_ids[i]) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (i == rfbdev->crtc_count) {
|
||||
continue;
|
||||
}
|
||||
|
||||
radeon_crtc = to_radeon_crtc(crtc);
|
||||
modeset = &radeon_crtc->mode_set;
|
||||
|
||||
modeset->x = var->xoffset;
|
||||
modeset->y = var->yoffset;
|
||||
|
||||
if (modeset->num_connectors) {
|
||||
mutex_lock(&dev->mode_config.mutex);
|
||||
ret = crtc->funcs->set_config(modeset);
|
||||
mutex_unlock(&dev->mode_config.mutex);
|
||||
if (!ret) {
|
||||
info->var.xoffset = var->xoffset;
|
||||
info->var.yoffset = var->yoffset;
|
||||
}
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void radeonfb_on(struct fb_info *info)
|
||||
{
|
||||
struct radeon_fb_device *rfbdev = info->par;
|
||||
struct drm_device *dev = rfbdev->rdev->ddev;
|
||||
struct drm_crtc *crtc;
|
||||
struct drm_encoder *encoder;
|
||||
int i;
|
||||
|
||||
/*
|
||||
* For each CRTC in this fb, find all associated encoders
|
||||
* and turn them off, then turn off the CRTC.
|
||||
*/
|
||||
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
|
||||
struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
|
||||
|
||||
for (i = 0; i < rfbdev->crtc_count; i++) {
|
||||
if (crtc->base.id == rfbdev->crtc_ids[i]) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
mutex_lock(&dev->mode_config.mutex);
|
||||
crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
|
||||
mutex_unlock(&dev->mode_config.mutex);
|
||||
|
||||
/* Found a CRTC on this fb, now find encoders */
|
||||
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
|
||||
if (encoder->crtc == crtc) {
|
||||
struct drm_encoder_helper_funcs *encoder_funcs;
|
||||
|
||||
encoder_funcs = encoder->helper_private;
|
||||
mutex_lock(&dev->mode_config.mutex);
|
||||
encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
|
||||
mutex_unlock(&dev->mode_config.mutex);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void radeonfb_off(struct fb_info *info, int dpms_mode)
|
||||
{
|
||||
struct radeon_fb_device *rfbdev = info->par;
|
||||
struct drm_device *dev = rfbdev->rdev->ddev;
|
||||
struct drm_crtc *crtc;
|
||||
struct drm_encoder *encoder;
|
||||
int i;
|
||||
|
||||
/*
|
||||
* For each CRTC in this fb, find all associated encoders
|
||||
* and turn them off, then turn off the CRTC.
|
||||
*/
|
||||
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
|
||||
struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
|
||||
|
||||
for (i = 0; i < rfbdev->crtc_count; i++) {
|
||||
if (crtc->base.id == rfbdev->crtc_ids[i]) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* Found a CRTC on this fb, now find encoders */
|
||||
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
|
||||
if (encoder->crtc == crtc) {
|
||||
struct drm_encoder_helper_funcs *encoder_funcs;
|
||||
|
||||
encoder_funcs = encoder->helper_private;
|
||||
mutex_lock(&dev->mode_config.mutex);
|
||||
encoder_funcs->dpms(encoder, dpms_mode);
|
||||
mutex_unlock(&dev->mode_config.mutex);
|
||||
}
|
||||
}
|
||||
if (dpms_mode == DRM_MODE_DPMS_OFF) {
|
||||
mutex_lock(&dev->mode_config.mutex);
|
||||
crtc_funcs->dpms(crtc, dpms_mode);
|
||||
mutex_unlock(&dev->mode_config.mutex);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int radeonfb_blank(int blank, struct fb_info *info)
|
||||
{
|
||||
switch (blank) {
|
||||
case FB_BLANK_UNBLANK:
|
||||
radeonfb_on(info);
|
||||
break;
|
||||
case FB_BLANK_NORMAL:
|
||||
radeonfb_off(info, DRM_MODE_DPMS_STANDBY);
|
||||
break;
|
||||
case FB_BLANK_HSYNC_SUSPEND:
|
||||
radeonfb_off(info, DRM_MODE_DPMS_STANDBY);
|
||||
break;
|
||||
case FB_BLANK_VSYNC_SUSPEND:
|
||||
radeonfb_off(info, DRM_MODE_DPMS_SUSPEND);
|
||||
break;
|
||||
case FB_BLANK_POWERDOWN:
|
||||
radeonfb_off(info, DRM_MODE_DPMS_OFF);
|
||||
break;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct fb_ops radeonfb_ops = {
|
||||
.owner = THIS_MODULE,
|
||||
.fb_check_var = radeonfb_check_var,
|
||||
.fb_set_par = radeonfb_set_par,
|
||||
.fb_setcolreg = radeonfb_setcolreg,
|
||||
.fb_fillrect = cfb_fillrect,
|
||||
.fb_copyarea = cfb_copyarea,
|
||||
.fb_imageblit = cfb_imageblit,
|
||||
.fb_pan_display = radeonfb_pan_display,
|
||||
.fb_blank = radeonfb_blank,
|
||||
};
|
||||
|
||||
/**
|
||||
* Curretly it is assumed that the old framebuffer is reused.
|
||||
*
|
||||
* LOCKING
|
||||
* caller should hold the mode config lock.
|
||||
*
|
||||
*/
|
||||
int radeonfb_resize(struct drm_device *dev, struct drm_crtc *crtc)
|
||||
{
|
||||
struct fb_info *info;
|
||||
struct drm_framebuffer *fb;
|
||||
struct drm_display_mode *mode = crtc->desired_mode;
|
||||
|
||||
fb = crtc->fb;
|
||||
if (fb == NULL) {
|
||||
return 1;
|
||||
}
|
||||
info = fb->fbdev;
|
||||
if (info == NULL) {
|
||||
return 1;
|
||||
}
|
||||
if (mode == NULL) {
|
||||
return 1;
|
||||
}
|
||||
info->var.xres = mode->hdisplay;
|
||||
info->var.right_margin = mode->hsync_start - mode->hdisplay;
|
||||
info->var.hsync_len = mode->hsync_end - mode->hsync_start;
|
||||
info->var.left_margin = mode->htotal - mode->hsync_end;
|
||||
info->var.yres = mode->vdisplay;
|
||||
info->var.lower_margin = mode->vsync_start - mode->vdisplay;
|
||||
info->var.vsync_len = mode->vsync_end - mode->vsync_start;
|
||||
info->var.upper_margin = mode->vtotal - mode->vsync_end;
|
||||
info->var.pixclock = 10000000 / mode->htotal * 1000 / mode->vtotal * 100;
|
||||
/* avoid overflow */
|
||||
info->var.pixclock = info->var.pixclock * 1000 / mode->vrefresh;
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(radeonfb_resize);
|
||||
|
||||
static struct drm_mode_set panic_mode;
|
||||
|
||||
int radeonfb_panic(struct notifier_block *n, unsigned long ununsed,
|
||||
void *panic_str)
|
||||
{
|
||||
DRM_ERROR("panic occurred, switching back to text console\n");
|
||||
drm_crtc_helper_set_config(&panic_mode);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(radeonfb_panic);
|
||||
|
||||
static struct notifier_block paniced = {
|
||||
.notifier_call = radeonfb_panic,
|
||||
};
|
||||
|
||||
static int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp)
|
||||
{
|
||||
int aligned = width;
|
||||
int align_large = (ASIC_IS_AVIVO(rdev));
|
||||
int pitch_mask = 0;
|
||||
|
||||
switch (bpp / 8) {
|
||||
case 1:
|
||||
pitch_mask = align_large ? 255 : 127;
|
||||
break;
|
||||
case 2:
|
||||
pitch_mask = align_large ? 127 : 31;
|
||||
break;
|
||||
case 3:
|
||||
case 4:
|
||||
pitch_mask = align_large ? 63 : 15;
|
||||
break;
|
||||
}
|
||||
|
||||
aligned += pitch_mask;
|
||||
aligned &= ~pitch_mask;
|
||||
return aligned;
|
||||
}
|
||||
|
||||
int radeonfb_create(struct radeon_device *rdev,
|
||||
uint32_t fb_width, uint32_t fb_height,
|
||||
uint32_t surface_width, uint32_t surface_height,
|
||||
struct radeon_framebuffer **rfb_p)
|
||||
{
|
||||
struct fb_info *info;
|
||||
struct radeon_fb_device *rfbdev;
|
||||
struct drm_framebuffer *fb;
|
||||
struct radeon_framebuffer *rfb;
|
||||
struct drm_mode_fb_cmd mode_cmd;
|
||||
struct drm_gem_object *gobj = NULL;
|
||||
struct radeon_object *robj = NULL;
|
||||
struct device *device = &rdev->pdev->dev;
|
||||
int size, aligned_size, ret;
|
||||
void *fbptr = NULL;
|
||||
|
||||
mode_cmd.width = surface_width;
|
||||
mode_cmd.height = surface_height;
|
||||
mode_cmd.bpp = 32;
|
||||
/* need to align pitch with crtc limits */
|
||||
mode_cmd.pitch = radeon_align_pitch(rdev, mode_cmd.width, mode_cmd.bpp) * ((mode_cmd.bpp + 1) / 8);
|
||||
mode_cmd.depth = 24;
|
||||
|
||||
size = mode_cmd.pitch * mode_cmd.height;
|
||||
aligned_size = ALIGN(size, PAGE_SIZE);
|
||||
|
||||
ret = radeon_gem_object_create(rdev, aligned_size, 0,
|
||||
RADEON_GEM_DOMAIN_VRAM,
|
||||
false, ttm_bo_type_kernel,
|
||||
false, &gobj);
|
||||
if (ret) {
|
||||
printk(KERN_ERR "failed to allocate framebuffer\n");
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
robj = gobj->driver_private;
|
||||
|
||||
mutex_lock(&rdev->ddev->struct_mutex);
|
||||
fb = radeon_framebuffer_create(rdev->ddev, &mode_cmd, gobj);
|
||||
if (fb == NULL) {
|
||||
DRM_ERROR("failed to allocate fb.\n");
|
||||
ret = -ENOMEM;
|
||||
goto out_unref;
|
||||
}
|
||||
|
||||
list_add(&fb->filp_head, &rdev->ddev->mode_config.fb_kernel_list);
|
||||
|
||||
rfb = to_radeon_framebuffer(fb);
|
||||
*rfb_p = rfb;
|
||||
rdev->fbdev_rfb = rfb;
|
||||
|
||||
info = framebuffer_alloc(sizeof(struct radeon_fb_device), device);
|
||||
if (info == NULL) {
|
||||
ret = -ENOMEM;
|
||||
goto out_unref;
|
||||
}
|
||||
rfbdev = info->par;
|
||||
|
||||
ret = radeon_object_kmap(robj, &fbptr);
|
||||
if (ret) {
|
||||
goto out_unref;
|
||||
}
|
||||
|
||||
strcpy(info->fix.id, "radeondrmfb");
|
||||
info->fix.type = FB_TYPE_PACKED_PIXELS;
|
||||
info->fix.visual = FB_VISUAL_TRUECOLOR;
|
||||
info->fix.type_aux = 0;
|
||||
info->fix.xpanstep = 1; /* doing it in hw */
|
||||
info->fix.ypanstep = 1; /* doing it in hw */
|
||||
info->fix.ywrapstep = 0;
|
||||
info->fix.accel = FB_ACCEL_I830;
|
||||
info->fix.type_aux = 0;
|
||||
info->flags = FBINFO_DEFAULT;
|
||||
info->fbops = &radeonfb_ops;
|
||||
info->fix.line_length = fb->pitch;
|
||||
info->screen_base = fbptr;
|
||||
info->fix.smem_start = (unsigned long)fbptr;
|
||||
info->fix.smem_len = size;
|
||||
info->screen_base = fbptr;
|
||||
info->screen_size = size;
|
||||
info->pseudo_palette = fb->pseudo_palette;
|
||||
info->var.xres_virtual = fb->width;
|
||||
info->var.yres_virtual = fb->height;
|
||||
info->var.bits_per_pixel = fb->bits_per_pixel;
|
||||
info->var.xoffset = 0;
|
||||
info->var.yoffset = 0;
|
||||
info->var.activate = FB_ACTIVATE_NOW;
|
||||
info->var.height = -1;
|
||||
info->var.width = -1;
|
||||
info->var.xres = fb_width;
|
||||
info->var.yres = fb_height;
|
||||
info->fix.mmio_start = pci_resource_start(rdev->pdev, 2);
|
||||
info->fix.mmio_len = pci_resource_len(rdev->pdev, 2);
|
||||
info->pixmap.size = 64*1024;
|
||||
info->pixmap.buf_align = 8;
|
||||
info->pixmap.access_align = 32;
|
||||
info->pixmap.flags = FB_PIXMAP_SYSTEM;
|
||||
info->pixmap.scan_align = 1;
|
||||
if (info->screen_base == NULL) {
|
||||
ret = -ENOSPC;
|
||||
goto out_unref;
|
||||
}
|
||||
DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start);
|
||||
DRM_INFO("vram apper at 0x%lX\n", (unsigned long)rdev->mc.aper_base);
|
||||
DRM_INFO("size %lu\n", (unsigned long)size);
|
||||
DRM_INFO("fb depth is %d\n", fb->depth);
|
||||
DRM_INFO(" pitch is %d\n", fb->pitch);
|
||||
|
||||
switch (fb->depth) {
|
||||
case 8:
|
||||
info->var.red.offset = 0;
|
||||
info->var.green.offset = 0;
|
||||
info->var.blue.offset = 0;
|
||||
info->var.red.length = 8; /* 8bit DAC */
|
||||
info->var.green.length = 8;
|
||||
info->var.blue.length = 8;
|
||||
info->var.transp.offset = 0;
|
||||
info->var.transp.length = 0;
|
||||
break;
|
||||
case 15:
|
||||
info->var.red.offset = 10;
|
||||
info->var.green.offset = 5;
|
||||
info->var.blue.offset = 0;
|
||||
info->var.red.length = 5;
|
||||
info->var.green.length = 5;
|
||||
info->var.blue.length = 5;
|
||||
info->var.transp.offset = 15;
|
||||
info->var.transp.length = 1;
|
||||
break;
|
||||
case 16:
|
||||
info->var.red.offset = 11;
|
||||
info->var.green.offset = 5;
|
||||
info->var.blue.offset = 0;
|
||||
info->var.red.length = 5;
|
||||
info->var.green.length = 6;
|
||||
info->var.blue.length = 5;
|
||||
info->var.transp.offset = 0;
|
||||
break;
|
||||
case 24:
|
||||
info->var.red.offset = 16;
|
||||
info->var.green.offset = 8;
|
||||
info->var.blue.offset = 0;
|
||||
info->var.red.length = 8;
|
||||
info->var.green.length = 8;
|
||||
info->var.blue.length = 8;
|
||||
info->var.transp.offset = 0;
|
||||
info->var.transp.length = 0;
|
||||
break;
|
||||
case 32:
|
||||
info->var.red.offset = 16;
|
||||
info->var.green.offset = 8;
|
||||
info->var.blue.offset = 0;
|
||||
info->var.red.length = 8;
|
||||
info->var.green.length = 8;
|
||||
info->var.blue.length = 8;
|
||||
info->var.transp.offset = 24;
|
||||
info->var.transp.length = 8;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
fb->fbdev = info;
|
||||
rfbdev->rfb = rfb;
|
||||
rfbdev->rdev = rdev;
|
||||
|
||||
mutex_unlock(&rdev->ddev->struct_mutex);
|
||||
return 0;
|
||||
|
||||
out_unref:
|
||||
if (robj) {
|
||||
radeon_object_kunmap(robj);
|
||||
}
|
||||
if (ret) {
|
||||
list_del(&fb->filp_head);
|
||||
drm_gem_object_unreference(gobj);
|
||||
drm_framebuffer_cleanup(fb);
|
||||
kfree(fb);
|
||||
}
|
||||
drm_gem_object_unreference(gobj);
|
||||
mutex_unlock(&rdev->ddev->struct_mutex);
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int radeonfb_single_fb_probe(struct radeon_device *rdev)
|
||||
{
|
||||
struct drm_crtc *crtc;
|
||||
struct drm_connector *connector;
|
||||
unsigned int fb_width = (unsigned)-1, fb_height = (unsigned)-1;
|
||||
unsigned int surface_width = 0, surface_height = 0;
|
||||
int new_fb = 0;
|
||||
int crtc_count = 0;
|
||||
int ret, i, conn_count = 0;
|
||||
struct radeon_framebuffer *rfb;
|
||||
struct fb_info *info;
|
||||
struct radeon_fb_device *rfbdev;
|
||||
struct drm_mode_set *modeset = NULL;
|
||||
|
||||
/* first up get a count of crtcs now in use and new min/maxes width/heights */
|
||||
list_for_each_entry(crtc, &rdev->ddev->mode_config.crtc_list, head) {
|
||||
if (drm_helper_crtc_in_use(crtc)) {
|
||||
if (crtc->desired_mode) {
|
||||
if (crtc->desired_mode->hdisplay < fb_width)
|
||||
fb_width = crtc->desired_mode->hdisplay;
|
||||
|
||||
if (crtc->desired_mode->vdisplay < fb_height)
|
||||
fb_height = crtc->desired_mode->vdisplay;
|
||||
|
||||
if (crtc->desired_mode->hdisplay > surface_width)
|
||||
surface_width = crtc->desired_mode->hdisplay;
|
||||
|
||||
if (crtc->desired_mode->vdisplay > surface_height)
|
||||
surface_height = crtc->desired_mode->vdisplay;
|
||||
}
|
||||
crtc_count++;
|
||||
}
|
||||
}
|
||||
|
||||
if (crtc_count == 0 || fb_width == -1 || fb_height == -1) {
|
||||
/* hmm everyone went away - assume VGA cable just fell out
|
||||
and will come back later. */
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* do we have an fb already? */
|
||||
if (list_empty(&rdev->ddev->mode_config.fb_kernel_list)) {
|
||||
/* create an fb if we don't have one */
|
||||
ret = radeonfb_create(rdev, fb_width, fb_height, surface_width, surface_height, &rfb);
|
||||
if (ret) {
|
||||
return -EINVAL;
|
||||
}
|
||||
new_fb = 1;
|
||||
} else {
|
||||
struct drm_framebuffer *fb;
|
||||
fb = list_first_entry(&rdev->ddev->mode_config.fb_kernel_list, struct drm_framebuffer, filp_head);
|
||||
rfb = to_radeon_framebuffer(fb);
|
||||
|
||||
/* if someone hotplugs something bigger than we have already allocated, we are pwned.
|
||||
As really we can't resize an fbdev that is in the wild currently due to fbdev
|
||||
not really being designed for the lower layers moving stuff around under it.
|
||||
- so in the grand style of things - punt. */
|
||||
if ((fb->width < surface_width) || (fb->height < surface_height)) {
|
||||
DRM_ERROR("Framebuffer not large enough to scale console onto.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
info = rfb->base.fbdev;
|
||||
rdev->fbdev_info = info;
|
||||
rfbdev = info->par;
|
||||
|
||||
crtc_count = 0;
|
||||
/* okay we need to setup new connector sets in the crtcs */
|
||||
list_for_each_entry(crtc, &rdev->ddev->mode_config.crtc_list, head) {
|
||||
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
|
||||
modeset = &radeon_crtc->mode_set;
|
||||
modeset->fb = &rfb->base;
|
||||
conn_count = 0;
|
||||
list_for_each_entry(connector, &rdev->ddev->mode_config.connector_list, head) {
|
||||
if (connector->encoder)
|
||||
if (connector->encoder->crtc == modeset->crtc) {
|
||||
modeset->connectors[conn_count] = connector;
|
||||
conn_count++;
|
||||
if (conn_count > RADEONFB_CONN_LIMIT)
|
||||
BUG();
|
||||
}
|
||||
}
|
||||
|
||||
for (i = conn_count; i < RADEONFB_CONN_LIMIT; i++)
|
||||
modeset->connectors[i] = NULL;
|
||||
|
||||
|
||||
rfbdev->crtc_ids[crtc_count++] = crtc->base.id;
|
||||
|
||||
modeset->num_connectors = conn_count;
|
||||
if (modeset->crtc->desired_mode) {
|
||||
if (modeset->mode) {
|
||||
drm_mode_destroy(rdev->ddev, modeset->mode);
|
||||
}
|
||||
modeset->mode = drm_mode_duplicate(rdev->ddev,
|
||||
modeset->crtc->desired_mode);
|
||||
}
|
||||
}
|
||||
rfbdev->crtc_count = crtc_count;
|
||||
|
||||
if (new_fb) {
|
||||
info->var.pixclock = -1;
|
||||
if (register_framebuffer(info) < 0)
|
||||
return -EINVAL;
|
||||
} else {
|
||||
radeonfb_set_par(info);
|
||||
}
|
||||
printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node,
|
||||
info->fix.id);
|
||||
|
||||
/* Switch back to kernel console on panic */
|
||||
panic_mode = *modeset;
|
||||
atomic_notifier_chain_register(&panic_notifier_list, &paniced);
|
||||
printk(KERN_INFO "registered panic notifier\n");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int radeonfb_probe(struct drm_device *dev)
|
||||
{
|
||||
int ret;
|
||||
|
||||
/* something has changed in the lower levels of hell - deal with it
|
||||
here */
|
||||
|
||||
/* two modes : a) 1 fb to rule all crtcs.
|
||||
b) one fb per crtc.
|
||||
two actions 1) new connected device
|
||||
2) device removed.
|
||||
case a/1 : if the fb surface isn't big enough - resize the surface fb.
|
||||
if the fb size isn't big enough - resize fb into surface.
|
||||
if everything big enough configure the new crtc/etc.
|
||||
case a/2 : undo the configuration
|
||||
possibly resize down the fb to fit the new configuration.
|
||||
case b/1 : see if it is on a new crtc - setup a new fb and add it.
|
||||
case b/2 : teardown the new fb.
|
||||
*/
|
||||
ret = radeonfb_single_fb_probe(dev->dev_private);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(radeonfb_probe);
|
||||
|
||||
int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb)
|
||||
{
|
||||
struct fb_info *info;
|
||||
struct radeon_framebuffer *rfb = to_radeon_framebuffer(fb);
|
||||
struct radeon_object *robj;
|
||||
|
||||
if (!fb) {
|
||||
return -EINVAL;
|
||||
}
|
||||
info = fb->fbdev;
|
||||
if (info) {
|
||||
robj = rfb->obj->driver_private;
|
||||
unregister_framebuffer(info);
|
||||
radeon_object_kunmap(robj);
|
||||
framebuffer_release(info);
|
||||
}
|
||||
|
||||
printk(KERN_INFO "unregistered panic notifier\n");
|
||||
atomic_notifier_chain_unregister(&panic_notifier_list, &paniced);
|
||||
memset(&panic_mode, 0, sizeof(struct drm_mode_set));
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(radeonfb_remove);
|
||||
MODULE_LICENSE("GPL");
|
|
@ -0,0 +1,387 @@
|
|||
/*
|
||||
* Copyright 2009 Jerome Glisse.
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
*/
|
||||
/*
|
||||
* Authors:
|
||||
* Jerome Glisse <glisse@freedesktop.org>
|
||||
* Dave Airlie
|
||||
*/
|
||||
#include <linux/seq_file.h>
|
||||
#include <asm/atomic.h>
|
||||
#include <linux/wait.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/kref.h>
|
||||
#include "drmP.h"
|
||||
#include "drm.h"
|
||||
#include "radeon_reg.h"
|
||||
#include "radeon.h"
|
||||
|
||||
int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
|
||||
{
|
||||
unsigned long irq_flags;
|
||||
|
||||
write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
|
||||
if (fence->emited) {
|
||||
write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
|
||||
return 0;
|
||||
}
|
||||
fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
|
||||
if (!rdev->cp.ready) {
|
||||
/* FIXME: cp is not running assume everythings is done right
|
||||
* away
|
||||
*/
|
||||
WREG32(rdev->fence_drv.scratch_reg, fence->seq);
|
||||
} else {
|
||||
radeon_fence_ring_emit(rdev, fence);
|
||||
}
|
||||
fence->emited = true;
|
||||
fence->timeout = jiffies + ((2000 * HZ) / 1000);
|
||||
list_del(&fence->list);
|
||||
list_add_tail(&fence->list, &rdev->fence_drv.emited);
|
||||
write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool radeon_fence_poll_locked(struct radeon_device *rdev)
|
||||
{
|
||||
struct radeon_fence *fence;
|
||||
struct list_head *i, *n;
|
||||
uint32_t seq;
|
||||
bool wake = false;
|
||||
|
||||
if (rdev == NULL) {
|
||||
return true;
|
||||
}
|
||||
if (rdev->shutdown) {
|
||||
return true;
|
||||
}
|
||||
seq = RREG32(rdev->fence_drv.scratch_reg);
|
||||
rdev->fence_drv.last_seq = seq;
|
||||
n = NULL;
|
||||
list_for_each(i, &rdev->fence_drv.emited) {
|
||||
fence = list_entry(i, struct radeon_fence, list);
|
||||
if (fence->seq == seq) {
|
||||
n = i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
/* all fence previous to this one are considered as signaled */
|
||||
if (n) {
|
||||
i = n;
|
||||
do {
|
||||
n = i->prev;
|
||||
list_del(i);
|
||||
list_add_tail(i, &rdev->fence_drv.signaled);
|
||||
fence = list_entry(i, struct radeon_fence, list);
|
||||
fence->signaled = true;
|
||||
i = n;
|
||||
} while (i != &rdev->fence_drv.emited);
|
||||
wake = true;
|
||||
}
|
||||
return wake;
|
||||
}
|
||||
|
||||
static void radeon_fence_destroy(struct kref *kref)
|
||||
{
|
||||
unsigned long irq_flags;
|
||||
struct radeon_fence *fence;
|
||||
|
||||
fence = container_of(kref, struct radeon_fence, kref);
|
||||
write_lock_irqsave(&fence->rdev->fence_drv.lock, irq_flags);
|
||||
list_del(&fence->list);
|
||||
fence->emited = false;
|
||||
write_unlock_irqrestore(&fence->rdev->fence_drv.lock, irq_flags);
|
||||
kfree(fence);
|
||||
}
|
||||
|
||||
int radeon_fence_create(struct radeon_device *rdev, struct radeon_fence **fence)
|
||||
{
|
||||
unsigned long irq_flags;
|
||||
|
||||
*fence = kmalloc(sizeof(struct radeon_fence), GFP_KERNEL);
|
||||
if ((*fence) == NULL) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
kref_init(&((*fence)->kref));
|
||||
(*fence)->rdev = rdev;
|
||||
(*fence)->emited = false;
|
||||
(*fence)->signaled = false;
|
||||
(*fence)->seq = 0;
|
||||
INIT_LIST_HEAD(&(*fence)->list);
|
||||
|
||||
write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
|
||||
list_add_tail(&(*fence)->list, &rdev->fence_drv.created);
|
||||
write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
bool radeon_fence_signaled(struct radeon_fence *fence)
|
||||
{
|
||||
struct radeon_device *rdev = fence->rdev;
|
||||
unsigned long irq_flags;
|
||||
bool signaled = false;
|
||||
|
||||
if (rdev->gpu_lockup) {
|
||||
return true;
|
||||
}
|
||||
if (fence == NULL) {
|
||||
return true;
|
||||
}
|
||||
write_lock_irqsave(&fence->rdev->fence_drv.lock, irq_flags);
|
||||
signaled = fence->signaled;
|
||||
/* if we are shuting down report all fence as signaled */
|
||||
if (fence->rdev->shutdown) {
|
||||
signaled = true;
|
||||
}
|
||||
if (!fence->emited) {
|
||||
WARN(1, "Querying an unemited fence : %p !\n", fence);
|
||||
signaled = true;
|
||||
}
|
||||
if (!signaled) {
|
||||
radeon_fence_poll_locked(fence->rdev);
|
||||
signaled = fence->signaled;
|
||||
}
|
||||
write_unlock_irqrestore(&fence->rdev->fence_drv.lock, irq_flags);
|
||||
return signaled;
|
||||
}
|
||||
|
||||
int radeon_fence_wait(struct radeon_fence *fence, bool interruptible)
|
||||
{
|
||||
struct radeon_device *rdev;
|
||||
unsigned long cur_jiffies;
|
||||
unsigned long timeout;
|
||||
bool expired = false;
|
||||
int r;
|
||||
|
||||
|
||||
if (fence == NULL) {
|
||||
WARN(1, "Querying an invalid fence : %p !\n", fence);
|
||||
return 0;
|
||||
}
|
||||
rdev = fence->rdev;
|
||||
if (radeon_fence_signaled(fence)) {
|
||||
return 0;
|
||||
}
|
||||
retry:
|
||||
cur_jiffies = jiffies;
|
||||
timeout = HZ / 100;
|
||||
if (time_after(fence->timeout, cur_jiffies)) {
|
||||
timeout = fence->timeout - cur_jiffies;
|
||||
}
|
||||
if (interruptible) {
|
||||
r = wait_event_interruptible_timeout(rdev->fence_drv.queue,
|
||||
radeon_fence_signaled(fence), timeout);
|
||||
if (unlikely(r == -ERESTARTSYS)) {
|
||||
return -ERESTART;
|
||||
}
|
||||
} else {
|
||||
r = wait_event_timeout(rdev->fence_drv.queue,
|
||||
radeon_fence_signaled(fence), timeout);
|
||||
}
|
||||
if (unlikely(!radeon_fence_signaled(fence))) {
|
||||
if (unlikely(r == 0)) {
|
||||
expired = true;
|
||||
}
|
||||
if (unlikely(expired)) {
|
||||
timeout = 1;
|
||||
if (time_after(cur_jiffies, fence->timeout)) {
|
||||
timeout = cur_jiffies - fence->timeout;
|
||||
}
|
||||
timeout = jiffies_to_msecs(timeout);
|
||||
if (timeout > 500) {
|
||||
DRM_ERROR("fence(%p:0x%08X) %lums timeout "
|
||||
"going to reset GPU\n",
|
||||
fence, fence->seq, timeout);
|
||||
radeon_gpu_reset(rdev);
|
||||
WREG32(rdev->fence_drv.scratch_reg, fence->seq);
|
||||
}
|
||||
}
|
||||
goto retry;
|
||||
}
|
||||
if (unlikely(expired)) {
|
||||
rdev->fence_drv.count_timeout++;
|
||||
cur_jiffies = jiffies;
|
||||
timeout = 1;
|
||||
if (time_after(cur_jiffies, fence->timeout)) {
|
||||
timeout = cur_jiffies - fence->timeout;
|
||||
}
|
||||
timeout = jiffies_to_msecs(timeout);
|
||||
DRM_ERROR("fence(%p:0x%08X) %lums timeout\n",
|
||||
fence, fence->seq, timeout);
|
||||
DRM_ERROR("last signaled fence(0x%08X)\n",
|
||||
rdev->fence_drv.last_seq);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int radeon_fence_wait_next(struct radeon_device *rdev)
|
||||
{
|
||||
unsigned long irq_flags;
|
||||
struct radeon_fence *fence;
|
||||
int r;
|
||||
|
||||
if (rdev->gpu_lockup) {
|
||||
return 0;
|
||||
}
|
||||
write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
|
||||
if (list_empty(&rdev->fence_drv.emited)) {
|
||||
write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
|
||||
return 0;
|
||||
}
|
||||
fence = list_entry(rdev->fence_drv.emited.next,
|
||||
struct radeon_fence, list);
|
||||
radeon_fence_ref(fence);
|
||||
write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
|
||||
r = radeon_fence_wait(fence, false);
|
||||
radeon_fence_unref(&fence);
|
||||
return r;
|
||||
}
|
||||
|
||||
int radeon_fence_wait_last(struct radeon_device *rdev)
|
||||
{
|
||||
unsigned long irq_flags;
|
||||
struct radeon_fence *fence;
|
||||
int r;
|
||||
|
||||
if (rdev->gpu_lockup) {
|
||||
return 0;
|
||||
}
|
||||
write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
|
||||
if (list_empty(&rdev->fence_drv.emited)) {
|
||||
write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
|
||||
return 0;
|
||||
}
|
||||
fence = list_entry(rdev->fence_drv.emited.prev,
|
||||
struct radeon_fence, list);
|
||||
radeon_fence_ref(fence);
|
||||
write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
|
||||
r = radeon_fence_wait(fence, false);
|
||||
radeon_fence_unref(&fence);
|
||||
return r;
|
||||
}
|
||||
|
||||
struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence)
|
||||
{
|
||||
kref_get(&fence->kref);
|
||||
return fence;
|
||||
}
|
||||
|
||||
void radeon_fence_unref(struct radeon_fence **fence)
|
||||
{
|
||||
struct radeon_fence *tmp = *fence;
|
||||
|
||||
*fence = NULL;
|
||||
if (tmp) {
|
||||
kref_put(&tmp->kref, &radeon_fence_destroy);
|
||||
}
|
||||
}
|
||||
|
||||
void radeon_fence_process(struct radeon_device *rdev)
|
||||
{
|
||||
unsigned long irq_flags;
|
||||
bool wake;
|
||||
|
||||
write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
|
||||
wake = radeon_fence_poll_locked(rdev);
|
||||
write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
|
||||
if (wake) {
|
||||
wake_up_all(&rdev->fence_drv.queue);
|
||||
}
|
||||
}
|
||||
|
||||
int radeon_fence_driver_init(struct radeon_device *rdev)
|
||||
{
|
||||
unsigned long irq_flags;
|
||||
int r;
|
||||
|
||||
write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
|
||||
r = radeon_scratch_get(rdev, &rdev->fence_drv.scratch_reg);
|
||||
if (r) {
|
||||
DRM_ERROR("Fence failed to get a scratch register.");
|
||||
write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
|
||||
return r;
|
||||
}
|
||||
WREG32(rdev->fence_drv.scratch_reg, 0);
|
||||
atomic_set(&rdev->fence_drv.seq, 0);
|
||||
INIT_LIST_HEAD(&rdev->fence_drv.created);
|
||||
INIT_LIST_HEAD(&rdev->fence_drv.emited);
|
||||
INIT_LIST_HEAD(&rdev->fence_drv.signaled);
|
||||
rdev->fence_drv.count_timeout = 0;
|
||||
init_waitqueue_head(&rdev->fence_drv.queue);
|
||||
write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
|
||||
if (radeon_debugfs_fence_init(rdev)) {
|
||||
DRM_ERROR("Failed to register debugfs file for fence !\n");
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void radeon_fence_driver_fini(struct radeon_device *rdev)
|
||||
{
|
||||
unsigned long irq_flags;
|
||||
|
||||
wake_up_all(&rdev->fence_drv.queue);
|
||||
write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
|
||||
radeon_scratch_free(rdev, rdev->fence_drv.scratch_reg);
|
||||
write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
|
||||
DRM_INFO("radeon: fence finalized\n");
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Fence debugfs
|
||||
*/
|
||||
#if defined(CONFIG_DEBUG_FS)
|
||||
static int radeon_debugfs_fence_info(struct seq_file *m, void *data)
|
||||
{
|
||||
struct drm_info_node *node = (struct drm_info_node *)m->private;
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
struct radeon_fence *fence;
|
||||
|
||||
seq_printf(m, "Last signaled fence 0x%08X\n",
|
||||
RREG32(rdev->fence_drv.scratch_reg));
|
||||
if (!list_empty(&rdev->fence_drv.emited)) {
|
||||
fence = list_entry(rdev->fence_drv.emited.prev,
|
||||
struct radeon_fence, list);
|
||||
seq_printf(m, "Last emited fence %p with 0x%08X\n",
|
||||
fence, fence->seq);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct drm_info_list radeon_debugfs_fence_list[] = {
|
||||
{"radeon_fence_info", &radeon_debugfs_fence_info, 0, NULL},
|
||||
};
|
||||
#endif
|
||||
|
||||
int radeon_debugfs_fence_init(struct radeon_device *rdev)
|
||||
{
|
||||
#if defined(CONFIG_DEBUG_FS)
|
||||
return radeon_debugfs_add_files(rdev, radeon_debugfs_fence_list, 1);
|
||||
#else
|
||||
return 0;
|
||||
#endif
|
||||
}
|
|
@ -0,0 +1,50 @@
|
|||
/*
|
||||
* Copyright 2009 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Dave Airlie
|
||||
*/
|
||||
#ifndef RADEON_FIXED_H
|
||||
#define RADEON_FIXED_H
|
||||
|
||||
typedef union rfixed {
|
||||
u32 full;
|
||||
} fixed20_12;
|
||||
|
||||
|
||||
#define rfixed_const(A) (u32)(((A) << 12))/* + ((B + 0.000122)*4096)) */
|
||||
#define rfixed_const_half(A) (u32)(((A) << 12) + 2048)
|
||||
#define rfixed_const_666(A) (u32)(((A) << 12) + 2731)
|
||||
#define rfixed_const_8(A) (u32)(((A) << 12) + 3277)
|
||||
#define rfixed_mul(A, B) ((u64)((u64)(A).full * (B).full + 2048) >> 12)
|
||||
#define fixed_init(A) { .full = rfixed_const((A)) }
|
||||
#define fixed_init_half(A) { .full = rfixed_const_half((A)) }
|
||||
#define rfixed_trunc(A) ((A).full >> 12)
|
||||
|
||||
static inline u32 rfixed_div(fixed20_12 A, fixed20_12 B)
|
||||
{
|
||||
u64 tmp = ((u64)A.full << 13);
|
||||
|
||||
do_div(tmp, B.full);
|
||||
tmp += 1;
|
||||
tmp /= 2;
|
||||
return lower_32_bits(tmp);
|
||||
}
|
||||
#endif
|
|
@ -0,0 +1,233 @@
|
|||
/*
|
||||
* Copyright 2008 Advanced Micro Devices, Inc.
|
||||
* Copyright 2008 Red Hat Inc.
|
||||
* Copyright 2009 Jerome Glisse.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Dave Airlie
|
||||
* Alex Deucher
|
||||
* Jerome Glisse
|
||||
*/
|
||||
#include "drmP.h"
|
||||
#include "radeon_drm.h"
|
||||
#include "radeon.h"
|
||||
#include "radeon_reg.h"
|
||||
|
||||
/*
|
||||
* Common GART table functions.
|
||||
*/
|
||||
int radeon_gart_table_ram_alloc(struct radeon_device *rdev)
|
||||
{
|
||||
void *ptr;
|
||||
|
||||
ptr = pci_alloc_consistent(rdev->pdev, rdev->gart.table_size,
|
||||
&rdev->gart.table_addr);
|
||||
if (ptr == NULL) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
#ifdef CONFIG_X86
|
||||
if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480 ||
|
||||
rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) {
|
||||
set_memory_uc((unsigned long)ptr,
|
||||
rdev->gart.table_size >> PAGE_SHIFT);
|
||||
}
|
||||
#endif
|
||||
rdev->gart.table.ram.ptr = ptr;
|
||||
memset((void *)rdev->gart.table.ram.ptr, 0, rdev->gart.table_size);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void radeon_gart_table_ram_free(struct radeon_device *rdev)
|
||||
{
|
||||
if (rdev->gart.table.ram.ptr == NULL) {
|
||||
return;
|
||||
}
|
||||
#ifdef CONFIG_X86
|
||||
if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480 ||
|
||||
rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) {
|
||||
set_memory_wb((unsigned long)rdev->gart.table.ram.ptr,
|
||||
rdev->gart.table_size >> PAGE_SHIFT);
|
||||
}
|
||||
#endif
|
||||
pci_free_consistent(rdev->pdev, rdev->gart.table_size,
|
||||
(void *)rdev->gart.table.ram.ptr,
|
||||
rdev->gart.table_addr);
|
||||
rdev->gart.table.ram.ptr = NULL;
|
||||
rdev->gart.table_addr = 0;
|
||||
}
|
||||
|
||||
int radeon_gart_table_vram_alloc(struct radeon_device *rdev)
|
||||
{
|
||||
uint64_t gpu_addr;
|
||||
int r;
|
||||
|
||||
if (rdev->gart.table.vram.robj == NULL) {
|
||||
r = radeon_object_create(rdev, NULL,
|
||||
rdev->gart.table_size,
|
||||
true,
|
||||
RADEON_GEM_DOMAIN_VRAM,
|
||||
false, &rdev->gart.table.vram.robj);
|
||||
if (r) {
|
||||
return r;
|
||||
}
|
||||
}
|
||||
r = radeon_object_pin(rdev->gart.table.vram.robj,
|
||||
RADEON_GEM_DOMAIN_VRAM, &gpu_addr);
|
||||
if (r) {
|
||||
radeon_object_unref(&rdev->gart.table.vram.robj);
|
||||
return r;
|
||||
}
|
||||
r = radeon_object_kmap(rdev->gart.table.vram.robj,
|
||||
(void **)&rdev->gart.table.vram.ptr);
|
||||
if (r) {
|
||||
radeon_object_unpin(rdev->gart.table.vram.robj);
|
||||
radeon_object_unref(&rdev->gart.table.vram.robj);
|
||||
DRM_ERROR("radeon: failed to map gart vram table.\n");
|
||||
return r;
|
||||
}
|
||||
rdev->gart.table_addr = gpu_addr;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void radeon_gart_table_vram_free(struct radeon_device *rdev)
|
||||
{
|
||||
if (rdev->gart.table.vram.robj == NULL) {
|
||||
return;
|
||||
}
|
||||
radeon_object_kunmap(rdev->gart.table.vram.robj);
|
||||
radeon_object_unpin(rdev->gart.table.vram.robj);
|
||||
radeon_object_unref(&rdev->gart.table.vram.robj);
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
/*
|
||||
* Common gart functions.
|
||||
*/
|
||||
void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
|
||||
int pages)
|
||||
{
|
||||
unsigned t;
|
||||
unsigned p;
|
||||
int i, j;
|
||||
|
||||
if (!rdev->gart.ready) {
|
||||
WARN(1, "trying to unbind memory to unitialized GART !\n");
|
||||
return;
|
||||
}
|
||||
t = offset / 4096;
|
||||
p = t / (PAGE_SIZE / 4096);
|
||||
for (i = 0; i < pages; i++, p++) {
|
||||
if (rdev->gart.pages[p]) {
|
||||
pci_unmap_page(rdev->pdev, rdev->gart.pages_addr[p],
|
||||
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
|
||||
rdev->gart.pages[p] = NULL;
|
||||
rdev->gart.pages_addr[p] = 0;
|
||||
for (j = 0; j < (PAGE_SIZE / 4096); j++, t++) {
|
||||
radeon_gart_set_page(rdev, t, 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
mb();
|
||||
radeon_gart_tlb_flush(rdev);
|
||||
}
|
||||
|
||||
int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
|
||||
int pages, struct page **pagelist)
|
||||
{
|
||||
unsigned t;
|
||||
unsigned p;
|
||||
uint64_t page_base;
|
||||
int i, j;
|
||||
|
||||
if (!rdev->gart.ready) {
|
||||
DRM_ERROR("trying to bind memory to unitialized GART !\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
t = offset / 4096;
|
||||
p = t / (PAGE_SIZE / 4096);
|
||||
|
||||
for (i = 0; i < pages; i++, p++) {
|
||||
/* we need to support large memory configurations */
|
||||
/* assume that unbind have already been call on the range */
|
||||
rdev->gart.pages_addr[p] = pci_map_page(rdev->pdev, pagelist[i],
|
||||
0, PAGE_SIZE,
|
||||
PCI_DMA_BIDIRECTIONAL);
|
||||
if (pci_dma_mapping_error(rdev->pdev, rdev->gart.pages_addr[p])) {
|
||||
/* FIXME: failed to map page (return -ENOMEM?) */
|
||||
radeon_gart_unbind(rdev, offset, pages);
|
||||
return -ENOMEM;
|
||||
}
|
||||
rdev->gart.pages[p] = pagelist[i];
|
||||
page_base = (uint32_t)rdev->gart.pages_addr[p];
|
||||
for (j = 0; j < (PAGE_SIZE / 4096); j++, t++) {
|
||||
radeon_gart_set_page(rdev, t, page_base);
|
||||
page_base += 4096;
|
||||
}
|
||||
}
|
||||
mb();
|
||||
radeon_gart_tlb_flush(rdev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int radeon_gart_init(struct radeon_device *rdev)
|
||||
{
|
||||
if (rdev->gart.pages) {
|
||||
return 0;
|
||||
}
|
||||
/* We need PAGE_SIZE >= 4096 */
|
||||
if (PAGE_SIZE < 4096) {
|
||||
DRM_ERROR("Page size is smaller than GPU page size!\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
/* Compute table size */
|
||||
rdev->gart.num_cpu_pages = rdev->mc.gtt_size / PAGE_SIZE;
|
||||
rdev->gart.num_gpu_pages = rdev->mc.gtt_size / 4096;
|
||||
DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n",
|
||||
rdev->gart.num_cpu_pages, rdev->gart.num_gpu_pages);
|
||||
/* Allocate pages table */
|
||||
rdev->gart.pages = kzalloc(sizeof(void *) * rdev->gart.num_cpu_pages,
|
||||
GFP_KERNEL);
|
||||
if (rdev->gart.pages == NULL) {
|
||||
radeon_gart_fini(rdev);
|
||||
return -ENOMEM;
|
||||
}
|
||||
rdev->gart.pages_addr = kzalloc(sizeof(dma_addr_t) *
|
||||
rdev->gart.num_cpu_pages, GFP_KERNEL);
|
||||
if (rdev->gart.pages_addr == NULL) {
|
||||
radeon_gart_fini(rdev);
|
||||
return -ENOMEM;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void radeon_gart_fini(struct radeon_device *rdev)
|
||||
{
|
||||
if (rdev->gart.pages && rdev->gart.pages_addr && rdev->gart.ready) {
|
||||
/* unbind pages */
|
||||
radeon_gart_unbind(rdev, 0, rdev->gart.num_cpu_pages);
|
||||
}
|
||||
rdev->gart.ready = false;
|
||||
kfree(rdev->gart.pages);
|
||||
kfree(rdev->gart.pages_addr);
|
||||
rdev->gart.pages = NULL;
|
||||
rdev->gart.pages_addr = NULL;
|
||||
}
|
|
@ -0,0 +1,287 @@
|
|||
/*
|
||||
* Copyright 2008 Advanced Micro Devices, Inc.
|
||||
* Copyright 2008 Red Hat Inc.
|
||||
* Copyright 2009 Jerome Glisse.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Dave Airlie
|
||||
* Alex Deucher
|
||||
* Jerome Glisse
|
||||
*/
|
||||
#include "drmP.h"
|
||||
#include "drm.h"
|
||||
#include "radeon_drm.h"
|
||||
#include "radeon.h"
|
||||
|
||||
int radeon_gem_object_init(struct drm_gem_object *obj)
|
||||
{
|
||||
/* we do nothings here */
|
||||
return 0;
|
||||
}
|
||||
|
||||
void radeon_gem_object_free(struct drm_gem_object *gobj)
|
||||
{
|
||||
struct radeon_object *robj = gobj->driver_private;
|
||||
|
||||
gobj->driver_private = NULL;
|
||||
if (robj) {
|
||||
radeon_object_unref(&robj);
|
||||
}
|
||||
}
|
||||
|
||||
int radeon_gem_object_create(struct radeon_device *rdev, int size,
|
||||
int alignment, int initial_domain,
|
||||
bool discardable, bool kernel,
|
||||
bool interruptible,
|
||||
struct drm_gem_object **obj)
|
||||
{
|
||||
struct drm_gem_object *gobj;
|
||||
struct radeon_object *robj;
|
||||
int r;
|
||||
|
||||
*obj = NULL;
|
||||
gobj = drm_gem_object_alloc(rdev->ddev, size);
|
||||
if (!gobj) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
/* At least align on page size */
|
||||
if (alignment < PAGE_SIZE) {
|
||||
alignment = PAGE_SIZE;
|
||||
}
|
||||
r = radeon_object_create(rdev, gobj, size, kernel, initial_domain,
|
||||
interruptible, &robj);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to allocate GEM object (%d, %d, %u)\n",
|
||||
size, initial_domain, alignment);
|
||||
mutex_lock(&rdev->ddev->struct_mutex);
|
||||
drm_gem_object_unreference(gobj);
|
||||
mutex_unlock(&rdev->ddev->struct_mutex);
|
||||
return r;
|
||||
}
|
||||
gobj->driver_private = robj;
|
||||
*obj = gobj;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int radeon_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain,
|
||||
uint64_t *gpu_addr)
|
||||
{
|
||||
struct radeon_object *robj = obj->driver_private;
|
||||
uint32_t flags;
|
||||
|
||||
switch (pin_domain) {
|
||||
case RADEON_GEM_DOMAIN_VRAM:
|
||||
flags = TTM_PL_FLAG_VRAM;
|
||||
break;
|
||||
case RADEON_GEM_DOMAIN_GTT:
|
||||
flags = TTM_PL_FLAG_TT;
|
||||
break;
|
||||
default:
|
||||
flags = TTM_PL_FLAG_SYSTEM;
|
||||
break;
|
||||
}
|
||||
return radeon_object_pin(robj, flags, gpu_addr);
|
||||
}
|
||||
|
||||
void radeon_gem_object_unpin(struct drm_gem_object *obj)
|
||||
{
|
||||
struct radeon_object *robj = obj->driver_private;
|
||||
radeon_object_unpin(robj);
|
||||
}
|
||||
|
||||
int radeon_gem_set_domain(struct drm_gem_object *gobj,
|
||||
uint32_t rdomain, uint32_t wdomain)
|
||||
{
|
||||
struct radeon_object *robj;
|
||||
uint32_t domain;
|
||||
int r;
|
||||
|
||||
/* FIXME: reeimplement */
|
||||
robj = gobj->driver_private;
|
||||
/* work out where to validate the buffer to */
|
||||
domain = wdomain;
|
||||
if (!domain) {
|
||||
domain = rdomain;
|
||||
}
|
||||
if (!domain) {
|
||||
/* Do nothings */
|
||||
printk(KERN_WARNING "Set domain withou domain !\n");
|
||||
return 0;
|
||||
}
|
||||
if (domain == RADEON_GEM_DOMAIN_CPU) {
|
||||
/* Asking for cpu access wait for object idle */
|
||||
r = radeon_object_wait(robj);
|
||||
if (r) {
|
||||
printk(KERN_ERR "Failed to wait for object !\n");
|
||||
return r;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int radeon_gem_init(struct radeon_device *rdev)
|
||||
{
|
||||
INIT_LIST_HEAD(&rdev->gem.objects);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void radeon_gem_fini(struct radeon_device *rdev)
|
||||
{
|
||||
radeon_object_force_delete(rdev);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* GEM ioctls.
|
||||
*/
|
||||
int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *filp)
|
||||
{
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
struct drm_radeon_gem_info *args = data;
|
||||
|
||||
args->vram_size = rdev->mc.vram_size;
|
||||
/* FIXME: report somethings that makes sense */
|
||||
args->vram_visible = rdev->mc.vram_size - (4 * 1024 * 1024);
|
||||
args->gart_size = rdev->mc.gtt_size;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int radeon_gem_pread_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *filp)
|
||||
{
|
||||
/* TODO: implement */
|
||||
DRM_ERROR("unimplemented %s\n", __func__);
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *filp)
|
||||
{
|
||||
/* TODO: implement */
|
||||
DRM_ERROR("unimplemented %s\n", __func__);
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *filp)
|
||||
{
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
struct drm_radeon_gem_create *args = data;
|
||||
struct drm_gem_object *gobj;
|
||||
uint32_t handle;
|
||||
int r;
|
||||
|
||||
/* create a gem object to contain this object in */
|
||||
args->size = roundup(args->size, PAGE_SIZE);
|
||||
r = radeon_gem_object_create(rdev, args->size, args->alignment,
|
||||
args->initial_domain, false,
|
||||
false, true, &gobj);
|
||||
if (r) {
|
||||
return r;
|
||||
}
|
||||
r = drm_gem_handle_create(filp, gobj, &handle);
|
||||
if (r) {
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
drm_gem_object_unreference(gobj);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
return r;
|
||||
}
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
drm_gem_object_handle_unreference(gobj);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
args->handle = handle;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *filp)
|
||||
{
|
||||
/* transition the BO to a domain -
|
||||
* just validate the BO into a certain domain */
|
||||
struct drm_radeon_gem_set_domain *args = data;
|
||||
struct drm_gem_object *gobj;
|
||||
struct radeon_object *robj;
|
||||
int r;
|
||||
|
||||
/* for now if someone requests domain CPU -
|
||||
* just make sure the buffer is finished with */
|
||||
|
||||
/* just do a BO wait for now */
|
||||
gobj = drm_gem_object_lookup(dev, filp, args->handle);
|
||||
if (gobj == NULL) {
|
||||
return -EINVAL;
|
||||
}
|
||||
robj = gobj->driver_private;
|
||||
|
||||
r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
drm_gem_object_unreference(gobj);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
return r;
|
||||
}
|
||||
|
||||
int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *filp)
|
||||
{
|
||||
struct drm_radeon_gem_mmap *args = data;
|
||||
struct drm_gem_object *gobj;
|
||||
struct radeon_object *robj;
|
||||
int r;
|
||||
|
||||
gobj = drm_gem_object_lookup(dev, filp, args->handle);
|
||||
if (gobj == NULL) {
|
||||
return -EINVAL;
|
||||
}
|
||||
robj = gobj->driver_private;
|
||||
r = radeon_object_mmap(robj, &args->addr_ptr);
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
drm_gem_object_unreference(gobj);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
return r;
|
||||
}
|
||||
|
||||
int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *filp)
|
||||
{
|
||||
/* FIXME: implement */
|
||||
return 0;
|
||||
}
|
||||
|
||||
int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *filp)
|
||||
{
|
||||
struct drm_radeon_gem_wait_idle *args = data;
|
||||
struct drm_gem_object *gobj;
|
||||
struct radeon_object *robj;
|
||||
int r;
|
||||
|
||||
gobj = drm_gem_object_lookup(dev, filp, args->handle);
|
||||
if (gobj == NULL) {
|
||||
return -EINVAL;
|
||||
}
|
||||
robj = gobj->driver_private;
|
||||
r = radeon_object_wait(robj);
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
drm_gem_object_unreference(gobj);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
return r;
|
||||
}
|
|
@ -0,0 +1,209 @@
|
|||
/*
|
||||
* Copyright 2007-8 Advanced Micro Devices, Inc.
|
||||
* Copyright 2008 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Dave Airlie
|
||||
* Alex Deucher
|
||||
*/
|
||||
#include "drmP.h"
|
||||
#include "radeon_drm.h"
|
||||
#include "radeon.h"
|
||||
|
||||
/**
|
||||
* radeon_ddc_probe
|
||||
*
|
||||
*/
|
||||
bool radeon_ddc_probe(struct radeon_connector *radeon_connector)
|
||||
{
|
||||
u8 out_buf[] = { 0x0, 0x0};
|
||||
u8 buf[2];
|
||||
int ret;
|
||||
struct i2c_msg msgs[] = {
|
||||
{
|
||||
.addr = 0x50,
|
||||
.flags = 0,
|
||||
.len = 1,
|
||||
.buf = out_buf,
|
||||
},
|
||||
{
|
||||
.addr = 0x50,
|
||||
.flags = I2C_M_RD,
|
||||
.len = 1,
|
||||
.buf = buf,
|
||||
}
|
||||
};
|
||||
|
||||
ret = i2c_transfer(&radeon_connector->ddc_bus->adapter, msgs, 2);
|
||||
if (ret == 2)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
void radeon_i2c_do_lock(struct radeon_connector *radeon_connector, int lock_state)
|
||||
{
|
||||
struct radeon_device *rdev = radeon_connector->base.dev->dev_private;
|
||||
uint32_t temp;
|
||||
struct radeon_i2c_bus_rec *rec = &radeon_connector->ddc_bus->rec;
|
||||
|
||||
/* RV410 appears to have a bug where the hw i2c in reset
|
||||
* holds the i2c port in a bad state - switch hw i2c away before
|
||||
* doing DDC - do this for all r200s/r300s/r400s for safety sake
|
||||
*/
|
||||
if ((rdev->family >= CHIP_R200) && !ASIC_IS_AVIVO(rdev)) {
|
||||
if (rec->a_clk_reg == RADEON_GPIO_MONID) {
|
||||
WREG32(RADEON_DVI_I2C_CNTL_0, (RADEON_I2C_SOFT_RST |
|
||||
R200_DVI_I2C_PIN_SEL(R200_SEL_DDC1)));
|
||||
} else {
|
||||
WREG32(RADEON_DVI_I2C_CNTL_0, (RADEON_I2C_SOFT_RST |
|
||||
R200_DVI_I2C_PIN_SEL(R200_SEL_DDC3)));
|
||||
}
|
||||
}
|
||||
if (lock_state) {
|
||||
temp = RREG32(rec->a_clk_reg);
|
||||
temp &= ~(rec->a_clk_mask);
|
||||
WREG32(rec->a_clk_reg, temp);
|
||||
|
||||
temp = RREG32(rec->a_data_reg);
|
||||
temp &= ~(rec->a_data_mask);
|
||||
WREG32(rec->a_data_reg, temp);
|
||||
}
|
||||
|
||||
temp = RREG32(rec->mask_clk_reg);
|
||||
if (lock_state)
|
||||
temp |= rec->mask_clk_mask;
|
||||
else
|
||||
temp &= ~rec->mask_clk_mask;
|
||||
WREG32(rec->mask_clk_reg, temp);
|
||||
temp = RREG32(rec->mask_clk_reg);
|
||||
|
||||
temp = RREG32(rec->mask_data_reg);
|
||||
if (lock_state)
|
||||
temp |= rec->mask_data_mask;
|
||||
else
|
||||
temp &= ~rec->mask_data_mask;
|
||||
WREG32(rec->mask_data_reg, temp);
|
||||
temp = RREG32(rec->mask_data_reg);
|
||||
}
|
||||
|
||||
static int get_clock(void *i2c_priv)
|
||||
{
|
||||
struct radeon_i2c_chan *i2c = i2c_priv;
|
||||
struct radeon_device *rdev = i2c->dev->dev_private;
|
||||
struct radeon_i2c_bus_rec *rec = &i2c->rec;
|
||||
uint32_t val;
|
||||
|
||||
val = RREG32(rec->get_clk_reg);
|
||||
val &= rec->get_clk_mask;
|
||||
|
||||
return (val != 0);
|
||||
}
|
||||
|
||||
|
||||
static int get_data(void *i2c_priv)
|
||||
{
|
||||
struct radeon_i2c_chan *i2c = i2c_priv;
|
||||
struct radeon_device *rdev = i2c->dev->dev_private;
|
||||
struct radeon_i2c_bus_rec *rec = &i2c->rec;
|
||||
uint32_t val;
|
||||
|
||||
val = RREG32(rec->get_data_reg);
|
||||
val &= rec->get_data_mask;
|
||||
return (val != 0);
|
||||
}
|
||||
|
||||
static void set_clock(void *i2c_priv, int clock)
|
||||
{
|
||||
struct radeon_i2c_chan *i2c = i2c_priv;
|
||||
struct radeon_device *rdev = i2c->dev->dev_private;
|
||||
struct radeon_i2c_bus_rec *rec = &i2c->rec;
|
||||
uint32_t val;
|
||||
|
||||
val = RREG32(rec->put_clk_reg) & (uint32_t)~(rec->put_clk_mask);
|
||||
val |= clock ? 0 : rec->put_clk_mask;
|
||||
WREG32(rec->put_clk_reg, val);
|
||||
}
|
||||
|
||||
static void set_data(void *i2c_priv, int data)
|
||||
{
|
||||
struct radeon_i2c_chan *i2c = i2c_priv;
|
||||
struct radeon_device *rdev = i2c->dev->dev_private;
|
||||
struct radeon_i2c_bus_rec *rec = &i2c->rec;
|
||||
uint32_t val;
|
||||
|
||||
val = RREG32(rec->put_data_reg) & (uint32_t)~(rec->put_data_mask);
|
||||
val |= data ? 0 : rec->put_data_mask;
|
||||
WREG32(rec->put_data_reg, val);
|
||||
}
|
||||
|
||||
struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
|
||||
struct radeon_i2c_bus_rec *rec,
|
||||
const char *name)
|
||||
{
|
||||
struct radeon_i2c_chan *i2c;
|
||||
int ret;
|
||||
|
||||
i2c = drm_calloc(1, sizeof(struct radeon_i2c_chan), DRM_MEM_DRIVER);
|
||||
if (i2c == NULL)
|
||||
return NULL;
|
||||
|
||||
i2c->adapter.owner = THIS_MODULE;
|
||||
i2c->adapter.algo_data = &i2c->algo;
|
||||
i2c->dev = dev;
|
||||
i2c->algo.setsda = set_data;
|
||||
i2c->algo.setscl = set_clock;
|
||||
i2c->algo.getsda = get_data;
|
||||
i2c->algo.getscl = get_clock;
|
||||
i2c->algo.udelay = 20;
|
||||
/* vesa says 2.2 ms is enough, 1 jiffy doesn't seem to always
|
||||
* make this, 2 jiffies is a lot more reliable */
|
||||
i2c->algo.timeout = 2;
|
||||
i2c->algo.data = i2c;
|
||||
i2c->rec = *rec;
|
||||
i2c_set_adapdata(&i2c->adapter, i2c);
|
||||
|
||||
ret = i2c_bit_add_bus(&i2c->adapter);
|
||||
if (ret) {
|
||||
DRM_INFO("Failed to register i2c %s\n", name);
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
return i2c;
|
||||
out_free:
|
||||
drm_free(i2c, sizeof(struct radeon_i2c_chan), DRM_MEM_DRIVER);
|
||||
return NULL;
|
||||
|
||||
}
|
||||
|
||||
void radeon_i2c_destroy(struct radeon_i2c_chan *i2c)
|
||||
{
|
||||
if (!i2c)
|
||||
return;
|
||||
|
||||
i2c_del_adapter(&i2c->adapter);
|
||||
drm_free(i2c, sizeof(struct radeon_i2c_chan), DRM_MEM_DRIVER);
|
||||
}
|
||||
|
||||
struct drm_encoder *radeon_best_encoder(struct drm_connector *connector)
|
||||
{
|
||||
return NULL;
|
||||
}
|
|
@ -0,0 +1,158 @@
|
|||
/*
|
||||
* Copyright 2008 Advanced Micro Devices, Inc.
|
||||
* Copyright 2008 Red Hat Inc.
|
||||
* Copyright 2009 Jerome Glisse.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Dave Airlie
|
||||
* Alex Deucher
|
||||
* Jerome Glisse
|
||||
*/
|
||||
#include "drmP.h"
|
||||
#include "radeon_drm.h"
|
||||
#include "radeon_reg.h"
|
||||
#include "radeon_microcode.h"
|
||||
#include "radeon.h"
|
||||
#include "atom.h"
|
||||
|
||||
static inline uint32_t r100_irq_ack(struct radeon_device *rdev)
|
||||
{
|
||||
uint32_t irqs = RREG32(RADEON_GEN_INT_STATUS);
|
||||
uint32_t irq_mask = RADEON_SW_INT_TEST;
|
||||
|
||||
if (irqs) {
|
||||
WREG32(RADEON_GEN_INT_STATUS, irqs);
|
||||
}
|
||||
return irqs & irq_mask;
|
||||
}
|
||||
|
||||
int r100_irq_set(struct radeon_device *rdev)
|
||||
{
|
||||
uint32_t tmp = 0;
|
||||
|
||||
if (rdev->irq.sw_int) {
|
||||
tmp |= RADEON_SW_INT_ENABLE;
|
||||
}
|
||||
/* Todo go through CRTC and enable vblank int or not */
|
||||
WREG32(RADEON_GEN_INT_CNTL, tmp);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int r100_irq_process(struct radeon_device *rdev)
|
||||
{
|
||||
uint32_t status;
|
||||
|
||||
status = r100_irq_ack(rdev);
|
||||
if (!status) {
|
||||
return IRQ_NONE;
|
||||
}
|
||||
while (status) {
|
||||
/* SW interrupt */
|
||||
if (status & RADEON_SW_INT_TEST) {
|
||||
radeon_fence_process(rdev);
|
||||
}
|
||||
status = r100_irq_ack(rdev);
|
||||
}
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
int rs600_irq_set(struct radeon_device *rdev)
|
||||
{
|
||||
uint32_t tmp = 0;
|
||||
|
||||
if (rdev->irq.sw_int) {
|
||||
tmp |= RADEON_SW_INT_ENABLE;
|
||||
}
|
||||
WREG32(RADEON_GEN_INT_CNTL, tmp);
|
||||
/* Todo go through CRTC and enable vblank int or not */
|
||||
WREG32(R500_DxMODE_INT_MASK, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
irqreturn_t radeon_driver_irq_handler_kms(DRM_IRQ_ARGS)
|
||||
{
|
||||
struct drm_device *dev = (struct drm_device *) arg;
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
|
||||
return radeon_irq_process(rdev);
|
||||
}
|
||||
|
||||
void radeon_driver_irq_preinstall_kms(struct drm_device *dev)
|
||||
{
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
unsigned i;
|
||||
|
||||
/* Disable *all* interrupts */
|
||||
rdev->irq.sw_int = false;
|
||||
for (i = 0; i < 2; i++) {
|
||||
rdev->irq.crtc_vblank_int[i] = false;
|
||||
}
|
||||
radeon_irq_set(rdev);
|
||||
/* Clear bits */
|
||||
radeon_irq_process(rdev);
|
||||
}
|
||||
|
||||
int radeon_driver_irq_postinstall_kms(struct drm_device *dev)
|
||||
{
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
|
||||
dev->max_vblank_count = 0x001fffff;
|
||||
rdev->irq.sw_int = true;
|
||||
radeon_irq_set(rdev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void radeon_driver_irq_uninstall_kms(struct drm_device *dev)
|
||||
{
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
unsigned i;
|
||||
|
||||
if (rdev == NULL) {
|
||||
return;
|
||||
}
|
||||
/* Disable *all* interrupts */
|
||||
rdev->irq.sw_int = false;
|
||||
for (i = 0; i < 2; i++) {
|
||||
rdev->irq.crtc_vblank_int[i] = false;
|
||||
}
|
||||
radeon_irq_set(rdev);
|
||||
}
|
||||
|
||||
int radeon_irq_kms_init(struct radeon_device *rdev)
|
||||
{
|
||||
int r = 0;
|
||||
|
||||
r = drm_vblank_init(rdev->ddev, 2);
|
||||
if (r) {
|
||||
return r;
|
||||
}
|
||||
drm_irq_install(rdev->ddev);
|
||||
rdev->irq.installed = true;
|
||||
DRM_INFO("radeon: irq initialized.\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
void radeon_irq_kms_fini(struct radeon_device *rdev)
|
||||
{
|
||||
if (rdev->irq.installed) {
|
||||
rdev->irq.installed = false;
|
||||
drm_irq_uninstall(rdev->ddev);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,295 @@
|
|||
/*
|
||||
* Copyright 2008 Advanced Micro Devices, Inc.
|
||||
* Copyright 2008 Red Hat Inc.
|
||||
* Copyright 2009 Jerome Glisse.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Dave Airlie
|
||||
* Alex Deucher
|
||||
* Jerome Glisse
|
||||
*/
|
||||
#include "drmP.h"
|
||||
#include "drm_sarea.h"
|
||||
#include "radeon.h"
|
||||
#include "radeon_drm.h"
|
||||
|
||||
|
||||
/*
|
||||
* Driver load/unload
|
||||
*/
|
||||
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
|
||||
{
|
||||
struct radeon_device *rdev;
|
||||
int r;
|
||||
|
||||
rdev = kzalloc(sizeof(struct radeon_device), GFP_KERNEL);
|
||||
if (rdev == NULL) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
dev->dev_private = (void *)rdev;
|
||||
|
||||
/* update BUS flag */
|
||||
if (drm_device_is_agp(dev)) {
|
||||
flags |= RADEON_IS_AGP;
|
||||
} else if (drm_device_is_pcie(dev)) {
|
||||
flags |= RADEON_IS_PCIE;
|
||||
} else {
|
||||
flags |= RADEON_IS_PCI;
|
||||
}
|
||||
|
||||
r = radeon_device_init(rdev, dev, dev->pdev, flags);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to initialize radeon, disabling IOCTL\n");
|
||||
radeon_device_fini(rdev);
|
||||
return r;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int radeon_driver_unload_kms(struct drm_device *dev)
|
||||
{
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
|
||||
radeon_device_fini(rdev);
|
||||
kfree(rdev);
|
||||
dev->dev_private = NULL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Userspace get informations ioctl
|
||||
*/
|
||||
int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
|
||||
{
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
struct drm_radeon_info *info;
|
||||
uint32_t *value_ptr;
|
||||
uint32_t value;
|
||||
|
||||
info = data;
|
||||
value_ptr = (uint32_t *)((unsigned long)info->value);
|
||||
switch (info->request) {
|
||||
case RADEON_INFO_DEVICE_ID:
|
||||
value = dev->pci_device;
|
||||
break;
|
||||
case RADEON_INFO_NUM_GB_PIPES:
|
||||
value = rdev->num_gb_pipes;
|
||||
break;
|
||||
default:
|
||||
DRM_DEBUG("Invalid request %d\n", info->request);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (DRM_COPY_TO_USER(value_ptr, &value, sizeof(uint32_t))) {
|
||||
DRM_ERROR("copy_to_user\n");
|
||||
return -EFAULT;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Outdated mess for old drm with Xorg being in charge (void function now).
|
||||
*/
|
||||
int radeon_driver_firstopen_kms(struct drm_device *dev)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
void radeon_driver_lastclose_kms(struct drm_device *dev)
|
||||
{
|
||||
}
|
||||
|
||||
int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
void radeon_driver_postclose_kms(struct drm_device *dev,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
}
|
||||
|
||||
void radeon_driver_preclose_kms(struct drm_device *dev,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* VBlank related functions.
|
||||
*/
|
||||
u32 radeon_get_vblank_counter_kms(struct drm_device *dev, int crtc)
|
||||
{
|
||||
/* FIXME: implement */
|
||||
return 0;
|
||||
}
|
||||
|
||||
int radeon_enable_vblank_kms(struct drm_device *dev, int crtc)
|
||||
{
|
||||
/* FIXME: implement */
|
||||
return 0;
|
||||
}
|
||||
|
||||
void radeon_disable_vblank_kms(struct drm_device *dev, int crtc)
|
||||
{
|
||||
/* FIXME: implement */
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* For multiple master (like multiple X).
|
||||
*/
|
||||
struct drm_radeon_master_private {
|
||||
drm_local_map_t *sarea;
|
||||
drm_radeon_sarea_t *sarea_priv;
|
||||
};
|
||||
|
||||
int radeon_master_create_kms(struct drm_device *dev, struct drm_master *master)
|
||||
{
|
||||
struct drm_radeon_master_private *master_priv;
|
||||
unsigned long sareapage;
|
||||
int ret;
|
||||
|
||||
master_priv = drm_calloc(1, sizeof(*master_priv), DRM_MEM_DRIVER);
|
||||
if (master_priv == NULL) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
/* prebuild the SAREA */
|
||||
sareapage = max_t(unsigned long, SAREA_MAX, PAGE_SIZE);
|
||||
ret = drm_addmap(dev, 0, sareapage, _DRM_SHM,
|
||||
_DRM_CONTAINS_LOCK|_DRM_DRIVER,
|
||||
&master_priv->sarea);
|
||||
if (ret) {
|
||||
DRM_ERROR("SAREA setup failed\n");
|
||||
return ret;
|
||||
}
|
||||
master_priv->sarea_priv = master_priv->sarea->handle + sizeof(struct drm_sarea);
|
||||
master_priv->sarea_priv->pfCurrentPage = 0;
|
||||
master->driver_priv = master_priv;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void radeon_master_destroy_kms(struct drm_device *dev,
|
||||
struct drm_master *master)
|
||||
{
|
||||
struct drm_radeon_master_private *master_priv = master->driver_priv;
|
||||
|
||||
if (master_priv == NULL) {
|
||||
return;
|
||||
}
|
||||
if (master_priv->sarea) {
|
||||
drm_rmmap_locked(dev, master_priv->sarea);
|
||||
}
|
||||
drm_free(master_priv, sizeof(*master_priv), DRM_MEM_DRIVER);
|
||||
master->driver_priv = NULL;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* IOCTL.
|
||||
*/
|
||||
int radeon_dma_ioctl_kms(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
/* Not valid in KMS. */
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
#define KMS_INVALID_IOCTL(name) \
|
||||
int name(struct drm_device *dev, void *data, struct drm_file *file_priv)\
|
||||
{ \
|
||||
DRM_ERROR("invalid ioctl with kms %s\n", __func__); \
|
||||
return -EINVAL; \
|
||||
}
|
||||
|
||||
/*
|
||||
* All these ioctls are invalid in kms world.
|
||||
*/
|
||||
KMS_INVALID_IOCTL(radeon_cp_init_kms)
|
||||
KMS_INVALID_IOCTL(radeon_cp_start_kms)
|
||||
KMS_INVALID_IOCTL(radeon_cp_stop_kms)
|
||||
KMS_INVALID_IOCTL(radeon_cp_reset_kms)
|
||||
KMS_INVALID_IOCTL(radeon_cp_idle_kms)
|
||||
KMS_INVALID_IOCTL(radeon_cp_resume_kms)
|
||||
KMS_INVALID_IOCTL(radeon_engine_reset_kms)
|
||||
KMS_INVALID_IOCTL(radeon_fullscreen_kms)
|
||||
KMS_INVALID_IOCTL(radeon_cp_swap_kms)
|
||||
KMS_INVALID_IOCTL(radeon_cp_clear_kms)
|
||||
KMS_INVALID_IOCTL(radeon_cp_vertex_kms)
|
||||
KMS_INVALID_IOCTL(radeon_cp_indices_kms)
|
||||
KMS_INVALID_IOCTL(radeon_cp_texture_kms)
|
||||
KMS_INVALID_IOCTL(radeon_cp_stipple_kms)
|
||||
KMS_INVALID_IOCTL(radeon_cp_indirect_kms)
|
||||
KMS_INVALID_IOCTL(radeon_cp_vertex2_kms)
|
||||
KMS_INVALID_IOCTL(radeon_cp_cmdbuf_kms)
|
||||
KMS_INVALID_IOCTL(radeon_cp_getparam_kms)
|
||||
KMS_INVALID_IOCTL(radeon_cp_flip_kms)
|
||||
KMS_INVALID_IOCTL(radeon_mem_alloc_kms)
|
||||
KMS_INVALID_IOCTL(radeon_mem_free_kms)
|
||||
KMS_INVALID_IOCTL(radeon_mem_init_heap_kms)
|
||||
KMS_INVALID_IOCTL(radeon_irq_emit_kms)
|
||||
KMS_INVALID_IOCTL(radeon_irq_wait_kms)
|
||||
KMS_INVALID_IOCTL(radeon_cp_setparam_kms)
|
||||
KMS_INVALID_IOCTL(radeon_surface_alloc_kms)
|
||||
KMS_INVALID_IOCTL(radeon_surface_free_kms)
|
||||
|
||||
|
||||
struct drm_ioctl_desc radeon_ioctls_kms[] = {
|
||||
DRM_IOCTL_DEF(DRM_RADEON_CP_INIT, radeon_cp_init_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
|
||||
DRM_IOCTL_DEF(DRM_RADEON_CP_START, radeon_cp_start_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
|
||||
DRM_IOCTL_DEF(DRM_RADEON_CP_STOP, radeon_cp_stop_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
|
||||
DRM_IOCTL_DEF(DRM_RADEON_CP_RESET, radeon_cp_reset_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
|
||||
DRM_IOCTL_DEF(DRM_RADEON_CP_IDLE, radeon_cp_idle_kms, DRM_AUTH),
|
||||
DRM_IOCTL_DEF(DRM_RADEON_CP_RESUME, radeon_cp_resume_kms, DRM_AUTH),
|
||||
DRM_IOCTL_DEF(DRM_RADEON_RESET, radeon_engine_reset_kms, DRM_AUTH),
|
||||
DRM_IOCTL_DEF(DRM_RADEON_FULLSCREEN, radeon_fullscreen_kms, DRM_AUTH),
|
||||
DRM_IOCTL_DEF(DRM_RADEON_SWAP, radeon_cp_swap_kms, DRM_AUTH),
|
||||
DRM_IOCTL_DEF(DRM_RADEON_CLEAR, radeon_cp_clear_kms, DRM_AUTH),
|
||||
DRM_IOCTL_DEF(DRM_RADEON_VERTEX, radeon_cp_vertex_kms, DRM_AUTH),
|
||||
DRM_IOCTL_DEF(DRM_RADEON_INDICES, radeon_cp_indices_kms, DRM_AUTH),
|
||||
DRM_IOCTL_DEF(DRM_RADEON_TEXTURE, radeon_cp_texture_kms, DRM_AUTH),
|
||||
DRM_IOCTL_DEF(DRM_RADEON_STIPPLE, radeon_cp_stipple_kms, DRM_AUTH),
|
||||
DRM_IOCTL_DEF(DRM_RADEON_INDIRECT, radeon_cp_indirect_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
|
||||
DRM_IOCTL_DEF(DRM_RADEON_VERTEX2, radeon_cp_vertex2_kms, DRM_AUTH),
|
||||
DRM_IOCTL_DEF(DRM_RADEON_CMDBUF, radeon_cp_cmdbuf_kms, DRM_AUTH),
|
||||
DRM_IOCTL_DEF(DRM_RADEON_GETPARAM, radeon_cp_getparam_kms, DRM_AUTH),
|
||||
DRM_IOCTL_DEF(DRM_RADEON_FLIP, radeon_cp_flip_kms, DRM_AUTH),
|
||||
DRM_IOCTL_DEF(DRM_RADEON_ALLOC, radeon_mem_alloc_kms, DRM_AUTH),
|
||||
DRM_IOCTL_DEF(DRM_RADEON_FREE, radeon_mem_free_kms, DRM_AUTH),
|
||||
DRM_IOCTL_DEF(DRM_RADEON_INIT_HEAP, radeon_mem_init_heap_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
|
||||
DRM_IOCTL_DEF(DRM_RADEON_IRQ_EMIT, radeon_irq_emit_kms, DRM_AUTH),
|
||||
DRM_IOCTL_DEF(DRM_RADEON_IRQ_WAIT, radeon_irq_wait_kms, DRM_AUTH),
|
||||
DRM_IOCTL_DEF(DRM_RADEON_SETPARAM, radeon_cp_setparam_kms, DRM_AUTH),
|
||||
DRM_IOCTL_DEF(DRM_RADEON_SURF_ALLOC, radeon_surface_alloc_kms, DRM_AUTH),
|
||||
DRM_IOCTL_DEF(DRM_RADEON_SURF_FREE, radeon_surface_free_kms, DRM_AUTH),
|
||||
/* KMS */
|
||||
DRM_IOCTL_DEF(DRM_RADEON_GEM_INFO, radeon_gem_info_ioctl, DRM_AUTH),
|
||||
DRM_IOCTL_DEF(DRM_RADEON_GEM_CREATE, radeon_gem_create_ioctl, DRM_AUTH),
|
||||
DRM_IOCTL_DEF(DRM_RADEON_GEM_MMAP, radeon_gem_mmap_ioctl, DRM_AUTH),
|
||||
DRM_IOCTL_DEF(DRM_RADEON_GEM_SET_DOMAIN, radeon_gem_set_domain_ioctl, DRM_AUTH),
|
||||
DRM_IOCTL_DEF(DRM_RADEON_GEM_PREAD, radeon_gem_pread_ioctl, DRM_AUTH),
|
||||
DRM_IOCTL_DEF(DRM_RADEON_GEM_PWRITE, radeon_gem_pwrite_ioctl, DRM_AUTH),
|
||||
DRM_IOCTL_DEF(DRM_RADEON_GEM_WAIT_IDLE, radeon_gem_wait_idle_ioctl, DRM_AUTH),
|
||||
DRM_IOCTL_DEF(DRM_RADEON_CS, radeon_cs_ioctl, DRM_AUTH),
|
||||
DRM_IOCTL_DEF(DRM_RADEON_INFO, radeon_info_ioctl, DRM_AUTH),
|
||||
};
|
||||
int radeon_max_kms_ioctl = DRM_ARRAY_SIZE(radeon_ioctls_kms);
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,398 @@
|
|||
/*
|
||||
* Copyright 2000 ATI Technologies Inc., Markham, Ontario, and
|
||||
* VA Linux Systems Inc., Fremont, California.
|
||||
* Copyright 2008 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Original Authors:
|
||||
* Kevin E. Martin, Rickard E. Faith, Alan Hourihane
|
||||
*
|
||||
* Kernel port Author: Dave Airlie
|
||||
*/
|
||||
|
||||
#ifndef RADEON_MODE_H
|
||||
#define RADEON_MODE_H
|
||||
|
||||
#include <drm_crtc.h>
|
||||
#include <drm_mode.h>
|
||||
#include <drm_edid.h>
|
||||
#include <linux/i2c.h>
|
||||
#include <linux/i2c-id.h>
|
||||
#include <linux/i2c-algo-bit.h>
|
||||
|
||||
#define to_radeon_crtc(x) container_of(x, struct radeon_crtc, base)
|
||||
#define to_radeon_connector(x) container_of(x, struct radeon_connector, base)
|
||||
#define to_radeon_encoder(x) container_of(x, struct radeon_encoder, base)
|
||||
#define to_radeon_framebuffer(x) container_of(x, struct radeon_framebuffer, base)
|
||||
|
||||
enum radeon_connector_type {
|
||||
CONNECTOR_NONE,
|
||||
CONNECTOR_VGA,
|
||||
CONNECTOR_DVI_I,
|
||||
CONNECTOR_DVI_D,
|
||||
CONNECTOR_DVI_A,
|
||||
CONNECTOR_STV,
|
||||
CONNECTOR_CTV,
|
||||
CONNECTOR_LVDS,
|
||||
CONNECTOR_DIGITAL,
|
||||
CONNECTOR_SCART,
|
||||
CONNECTOR_HDMI_TYPE_A,
|
||||
CONNECTOR_HDMI_TYPE_B,
|
||||
CONNECTOR_0XC,
|
||||
CONNECTOR_0XD,
|
||||
CONNECTOR_DIN,
|
||||
CONNECTOR_DISPLAY_PORT,
|
||||
CONNECTOR_UNSUPPORTED
|
||||
};
|
||||
|
||||
enum radeon_dvi_type {
|
||||
DVI_AUTO,
|
||||
DVI_DIGITAL,
|
||||
DVI_ANALOG
|
||||
};
|
||||
|
||||
enum radeon_rmx_type {
|
||||
RMX_OFF,
|
||||
RMX_FULL,
|
||||
RMX_CENTER,
|
||||
RMX_ASPECT
|
||||
};
|
||||
|
||||
enum radeon_tv_std {
|
||||
TV_STD_NTSC,
|
||||
TV_STD_PAL,
|
||||
TV_STD_PAL_M,
|
||||
TV_STD_PAL_60,
|
||||
TV_STD_NTSC_J,
|
||||
TV_STD_SCART_PAL,
|
||||
TV_STD_SECAM,
|
||||
TV_STD_PAL_CN,
|
||||
};
|
||||
|
||||
struct radeon_i2c_bus_rec {
|
||||
bool valid;
|
||||
uint32_t mask_clk_reg;
|
||||
uint32_t mask_data_reg;
|
||||
uint32_t a_clk_reg;
|
||||
uint32_t a_data_reg;
|
||||
uint32_t put_clk_reg;
|
||||
uint32_t put_data_reg;
|
||||
uint32_t get_clk_reg;
|
||||
uint32_t get_data_reg;
|
||||
uint32_t mask_clk_mask;
|
||||
uint32_t mask_data_mask;
|
||||
uint32_t put_clk_mask;
|
||||
uint32_t put_data_mask;
|
||||
uint32_t get_clk_mask;
|
||||
uint32_t get_data_mask;
|
||||
uint32_t a_clk_mask;
|
||||
uint32_t a_data_mask;
|
||||
};
|
||||
|
||||
struct radeon_tmds_pll {
|
||||
uint32_t freq;
|
||||
uint32_t value;
|
||||
};
|
||||
|
||||
#define RADEON_MAX_BIOS_CONNECTOR 16
|
||||
|
||||
#define RADEON_PLL_USE_BIOS_DIVS (1 << 0)
|
||||
#define RADEON_PLL_NO_ODD_POST_DIV (1 << 1)
|
||||
#define RADEON_PLL_USE_REF_DIV (1 << 2)
|
||||
#define RADEON_PLL_LEGACY (1 << 3)
|
||||
#define RADEON_PLL_PREFER_LOW_REF_DIV (1 << 4)
|
||||
#define RADEON_PLL_PREFER_HIGH_REF_DIV (1 << 5)
|
||||
#define RADEON_PLL_PREFER_LOW_FB_DIV (1 << 6)
|
||||
#define RADEON_PLL_PREFER_HIGH_FB_DIV (1 << 7)
|
||||
#define RADEON_PLL_PREFER_LOW_POST_DIV (1 << 8)
|
||||
#define RADEON_PLL_PREFER_HIGH_POST_DIV (1 << 9)
|
||||
#define RADEON_PLL_USE_FRAC_FB_DIV (1 << 10)
|
||||
|
||||
struct radeon_pll {
|
||||
uint16_t reference_freq;
|
||||
uint16_t reference_div;
|
||||
uint32_t pll_in_min;
|
||||
uint32_t pll_in_max;
|
||||
uint32_t pll_out_min;
|
||||
uint32_t pll_out_max;
|
||||
uint16_t xclk;
|
||||
|
||||
uint32_t min_ref_div;
|
||||
uint32_t max_ref_div;
|
||||
uint32_t min_post_div;
|
||||
uint32_t max_post_div;
|
||||
uint32_t min_feedback_div;
|
||||
uint32_t max_feedback_div;
|
||||
uint32_t min_frac_feedback_div;
|
||||
uint32_t max_frac_feedback_div;
|
||||
uint32_t best_vco;
|
||||
};
|
||||
|
||||
struct radeon_i2c_chan {
|
||||
struct drm_device *dev;
|
||||
struct i2c_adapter adapter;
|
||||
struct i2c_algo_bit_data algo;
|
||||
struct radeon_i2c_bus_rec rec;
|
||||
};
|
||||
|
||||
/* mostly for macs, but really any system without connector tables */
|
||||
enum radeon_connector_table {
|
||||
CT_NONE,
|
||||
CT_GENERIC,
|
||||
CT_IBOOK,
|
||||
CT_POWERBOOK_EXTERNAL,
|
||||
CT_POWERBOOK_INTERNAL,
|
||||
CT_POWERBOOK_VGA,
|
||||
CT_MINI_EXTERNAL,
|
||||
CT_MINI_INTERNAL,
|
||||
CT_IMAC_G5_ISIGHT,
|
||||
CT_EMAC,
|
||||
};
|
||||
|
||||
struct radeon_mode_info {
|
||||
struct atom_context *atom_context;
|
||||
enum radeon_connector_table connector_table;
|
||||
bool mode_config_initialized;
|
||||
};
|
||||
|
||||
struct radeon_crtc {
|
||||
struct drm_crtc base;
|
||||
int crtc_id;
|
||||
u16 lut_r[256], lut_g[256], lut_b[256];
|
||||
bool enabled;
|
||||
bool can_tile;
|
||||
uint32_t crtc_offset;
|
||||
struct radeon_framebuffer *fbdev_fb;
|
||||
struct drm_mode_set mode_set;
|
||||
struct drm_gem_object *cursor_bo;
|
||||
uint64_t cursor_addr;
|
||||
int cursor_width;
|
||||
int cursor_height;
|
||||
};
|
||||
|
||||
#define RADEON_USE_RMX 1
|
||||
|
||||
struct radeon_native_mode {
|
||||
/* preferred mode */
|
||||
uint32_t panel_xres, panel_yres;
|
||||
uint32_t hoverplus, hsync_width;
|
||||
uint32_t hblank;
|
||||
uint32_t voverplus, vsync_width;
|
||||
uint32_t vblank;
|
||||
uint32_t dotclock;
|
||||
uint32_t flags;
|
||||
};
|
||||
|
||||
struct radeon_encoder_primary_dac {
|
||||
/* legacy primary dac */
|
||||
uint32_t ps2_pdac_adj;
|
||||
};
|
||||
|
||||
struct radeon_encoder_lvds {
|
||||
/* legacy lvds */
|
||||
uint16_t panel_vcc_delay;
|
||||
uint8_t panel_pwr_delay;
|
||||
uint8_t panel_digon_delay;
|
||||
uint8_t panel_blon_delay;
|
||||
uint16_t panel_ref_divider;
|
||||
uint8_t panel_post_divider;
|
||||
uint16_t panel_fb_divider;
|
||||
bool use_bios_dividers;
|
||||
uint32_t lvds_gen_cntl;
|
||||
/* panel mode */
|
||||
struct radeon_native_mode native_mode;
|
||||
};
|
||||
|
||||
struct radeon_encoder_tv_dac {
|
||||
/* legacy tv dac */
|
||||
uint32_t ps2_tvdac_adj;
|
||||
uint32_t ntsc_tvdac_adj;
|
||||
uint32_t pal_tvdac_adj;
|
||||
|
||||
enum radeon_tv_std tv_std;
|
||||
};
|
||||
|
||||
struct radeon_encoder_int_tmds {
|
||||
/* legacy int tmds */
|
||||
struct radeon_tmds_pll tmds_pll[4];
|
||||
};
|
||||
|
||||
struct radeon_encoder_atom_dig {
|
||||
/* atom dig */
|
||||
bool coherent_mode;
|
||||
int dig_block;
|
||||
/* atom lvds */
|
||||
uint32_t lvds_misc;
|
||||
uint16_t panel_pwr_delay;
|
||||
/* panel mode */
|
||||
struct radeon_native_mode native_mode;
|
||||
};
|
||||
|
||||
struct radeon_encoder {
|
||||
struct drm_encoder base;
|
||||
uint32_t encoder_id;
|
||||
uint32_t devices;
|
||||
uint32_t flags;
|
||||
uint32_t pixel_clock;
|
||||
enum radeon_rmx_type rmx_type;
|
||||
struct radeon_native_mode native_mode;
|
||||
void *enc_priv;
|
||||
};
|
||||
|
||||
struct radeon_connector_atom_dig {
|
||||
uint32_t igp_lane_info;
|
||||
bool linkb;
|
||||
};
|
||||
|
||||
struct radeon_connector {
|
||||
struct drm_connector base;
|
||||
uint32_t connector_id;
|
||||
uint32_t devices;
|
||||
struct radeon_i2c_chan *ddc_bus;
|
||||
int use_digital;
|
||||
void *con_priv;
|
||||
};
|
||||
|
||||
struct radeon_framebuffer {
|
||||
struct drm_framebuffer base;
|
||||
struct drm_gem_object *obj;
|
||||
};
|
||||
|
||||
extern struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
|
||||
struct radeon_i2c_bus_rec *rec,
|
||||
const char *name);
|
||||
extern void radeon_i2c_destroy(struct radeon_i2c_chan *i2c);
|
||||
extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector);
|
||||
extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector);
|
||||
|
||||
extern struct drm_encoder *radeon_best_encoder(struct drm_connector *connector);
|
||||
|
||||
extern void radeon_compute_pll(struct radeon_pll *pll,
|
||||
uint64_t freq,
|
||||
uint32_t *dot_clock_p,
|
||||
uint32_t *fb_div_p,
|
||||
uint32_t *frac_fb_div_p,
|
||||
uint32_t *ref_div_p,
|
||||
uint32_t *post_div_p,
|
||||
int flags);
|
||||
|
||||
struct drm_encoder *radeon_encoder_legacy_lvds_add(struct drm_device *dev, int bios_index);
|
||||
struct drm_encoder *radeon_encoder_legacy_primary_dac_add(struct drm_device *dev, int bios_index, int with_tv);
|
||||
struct drm_encoder *radeon_encoder_legacy_tv_dac_add(struct drm_device *dev, int bios_index, int with_tv);
|
||||
struct drm_encoder *radeon_encoder_legacy_tmds_int_add(struct drm_device *dev, int bios_index);
|
||||
struct drm_encoder *radeon_encoder_legacy_tmds_ext_add(struct drm_device *dev, int bios_index);
|
||||
extern void atombios_external_tmds_setup(struct drm_encoder *encoder, int action);
|
||||
extern int atombios_get_encoder_mode(struct drm_encoder *encoder);
|
||||
|
||||
extern void radeon_crtc_load_lut(struct drm_crtc *crtc);
|
||||
extern int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
|
||||
struct drm_framebuffer *old_fb);
|
||||
extern int atombios_crtc_mode_set(struct drm_crtc *crtc,
|
||||
struct drm_display_mode *mode,
|
||||
struct drm_display_mode *adjusted_mode,
|
||||
int x, int y,
|
||||
struct drm_framebuffer *old_fb);
|
||||
extern void atombios_crtc_dpms(struct drm_crtc *crtc, int mode);
|
||||
|
||||
extern int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
|
||||
struct drm_framebuffer *old_fb);
|
||||
extern void radeon_legacy_atom_set_surface(struct drm_crtc *crtc);
|
||||
|
||||
extern int radeon_crtc_cursor_set(struct drm_crtc *crtc,
|
||||
struct drm_file *file_priv,
|
||||
uint32_t handle,
|
||||
uint32_t width,
|
||||
uint32_t height);
|
||||
extern int radeon_crtc_cursor_move(struct drm_crtc *crtc,
|
||||
int x, int y);
|
||||
|
||||
extern bool radeon_atom_get_clock_info(struct drm_device *dev);
|
||||
extern bool radeon_combios_get_clock_info(struct drm_device *dev);
|
||||
extern struct radeon_encoder_atom_dig *
|
||||
radeon_atombios_get_lvds_info(struct radeon_encoder *encoder);
|
||||
extern struct radeon_encoder_int_tmds *
|
||||
radeon_atombios_get_tmds_info(struct radeon_encoder *encoder);
|
||||
extern struct radeon_encoder_primary_dac *
|
||||
radeon_atombios_get_primary_dac_info(struct radeon_encoder *encoder);
|
||||
extern struct radeon_encoder_tv_dac *
|
||||
radeon_atombios_get_tv_dac_info(struct radeon_encoder *encoder);
|
||||
extern struct radeon_encoder_lvds *
|
||||
radeon_combios_get_lvds_info(struct radeon_encoder *encoder);
|
||||
extern struct radeon_encoder_int_tmds *
|
||||
radeon_combios_get_tmds_info(struct radeon_encoder *encoder);
|
||||
extern void radeon_combios_get_ext_tmds_info(struct radeon_encoder *encoder);
|
||||
extern struct radeon_encoder_tv_dac *
|
||||
radeon_combios_get_tv_dac_info(struct radeon_encoder *encoder);
|
||||
extern struct radeon_encoder_primary_dac *
|
||||
radeon_combios_get_primary_dac_info(struct radeon_encoder *encoder);
|
||||
extern void radeon_combios_output_lock(struct drm_encoder *encoder, bool lock);
|
||||
extern void radeon_combios_initialize_bios_scratch_regs(struct drm_device *dev);
|
||||
extern void radeon_atom_output_lock(struct drm_encoder *encoder, bool lock);
|
||||
extern void radeon_atom_initialize_bios_scratch_regs(struct drm_device *dev);
|
||||
extern void
|
||||
radeon_atombios_encoder_crtc_scratch_regs(struct drm_encoder *encoder, int crtc);
|
||||
extern void
|
||||
radeon_atombios_encoder_dpms_scratch_regs(struct drm_encoder *encoder, bool on);
|
||||
extern void
|
||||
radeon_combios_encoder_crtc_scratch_regs(struct drm_encoder *encoder, int crtc);
|
||||
extern void
|
||||
radeon_combios_encoder_dpms_scratch_regs(struct drm_encoder *encoder, bool on);
|
||||
extern void radeon_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
|
||||
u16 blue, int regno);
|
||||
struct drm_framebuffer *radeon_framebuffer_create(struct drm_device *dev,
|
||||
struct drm_mode_fb_cmd *mode_cmd,
|
||||
struct drm_gem_object *obj);
|
||||
|
||||
int radeonfb_probe(struct drm_device *dev);
|
||||
|
||||
int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb);
|
||||
bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev);
|
||||
bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev);
|
||||
void radeon_atombios_init_crtc(struct drm_device *dev,
|
||||
struct radeon_crtc *radeon_crtc);
|
||||
void radeon_legacy_init_crtc(struct drm_device *dev,
|
||||
struct radeon_crtc *radeon_crtc);
|
||||
void radeon_i2c_do_lock(struct radeon_connector *radeon_connector, int lock_state);
|
||||
|
||||
void radeon_get_clock_info(struct drm_device *dev);
|
||||
|
||||
extern bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev);
|
||||
extern bool radeon_get_atom_connector_info_from_supported_devices_table(struct drm_device *dev);
|
||||
|
||||
void radeon_rmx_mode_fixup(struct drm_encoder *encoder,
|
||||
struct drm_display_mode *mode,
|
||||
struct drm_display_mode *adjusted_mode);
|
||||
void radeon_enc_destroy(struct drm_encoder *encoder);
|
||||
void radeon_copy_fb(struct drm_device *dev, struct drm_gem_object *dst_obj);
|
||||
void radeon_combios_asic_init(struct drm_device *dev);
|
||||
extern int radeon_static_clocks_init(struct drm_device *dev);
|
||||
void radeon_init_disp_bw_legacy(struct drm_device *dev,
|
||||
struct drm_display_mode *mode1,
|
||||
uint32_t pixel_bytes1,
|
||||
struct drm_display_mode *mode2,
|
||||
uint32_t pixel_bytes2);
|
||||
void radeon_init_disp_bw_avivo(struct drm_device *dev,
|
||||
struct drm_display_mode *mode1,
|
||||
uint32_t pixel_bytes1,
|
||||
struct drm_display_mode *mode2,
|
||||
uint32_t pixel_bytes2);
|
||||
void radeon_init_disp_bandwidth(struct drm_device *dev);
|
||||
|
||||
#endif
|
|
@ -0,0 +1,511 @@
|
|||
/*
|
||||
* Copyright 2009 Jerome Glisse.
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
*/
|
||||
/*
|
||||
* Authors:
|
||||
* Jerome Glisse <glisse@freedesktop.org>
|
||||
* Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
|
||||
* Dave Airlie
|
||||
*/
|
||||
#include <linux/list.h>
|
||||
#include <drm/drmP.h>
|
||||
#include "radeon_drm.h"
|
||||
#include "radeon.h"
|
||||
|
||||
struct radeon_object {
|
||||
struct ttm_buffer_object tobj;
|
||||
struct list_head list;
|
||||
struct radeon_device *rdev;
|
||||
struct drm_gem_object *gobj;
|
||||
struct ttm_bo_kmap_obj kmap;
|
||||
unsigned pin_count;
|
||||
uint64_t gpu_addr;
|
||||
void *kptr;
|
||||
bool is_iomem;
|
||||
};
|
||||
|
||||
int radeon_ttm_init(struct radeon_device *rdev);
|
||||
void radeon_ttm_fini(struct radeon_device *rdev);
|
||||
|
||||
/*
|
||||
* To exclude mutual BO access we rely on bo_reserve exclusion, as all
|
||||
* function are calling it.
|
||||
*/
|
||||
|
||||
static int radeon_object_reserve(struct radeon_object *robj, bool interruptible)
|
||||
{
|
||||
return ttm_bo_reserve(&robj->tobj, interruptible, false, false, 0);
|
||||
}
|
||||
|
||||
static void radeon_object_unreserve(struct radeon_object *robj)
|
||||
{
|
||||
ttm_bo_unreserve(&robj->tobj);
|
||||
}
|
||||
|
||||
static void radeon_ttm_object_object_destroy(struct ttm_buffer_object *tobj)
|
||||
{
|
||||
struct radeon_object *robj;
|
||||
|
||||
robj = container_of(tobj, struct radeon_object, tobj);
|
||||
list_del_init(&robj->list);
|
||||
kfree(robj);
|
||||
}
|
||||
|
||||
static inline void radeon_object_gpu_addr(struct radeon_object *robj)
|
||||
{
|
||||
/* Default gpu address */
|
||||
robj->gpu_addr = 0xFFFFFFFFFFFFFFFFULL;
|
||||
if (robj->tobj.mem.mm_node == NULL) {
|
||||
return;
|
||||
}
|
||||
robj->gpu_addr = ((u64)robj->tobj.mem.mm_node->start) << PAGE_SHIFT;
|
||||
switch (robj->tobj.mem.mem_type) {
|
||||
case TTM_PL_VRAM:
|
||||
robj->gpu_addr += (u64)robj->rdev->mc.vram_location;
|
||||
break;
|
||||
case TTM_PL_TT:
|
||||
robj->gpu_addr += (u64)robj->rdev->mc.gtt_location;
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("Unknown placement %d\n", robj->tobj.mem.mem_type);
|
||||
robj->gpu_addr = 0xFFFFFFFFFFFFFFFFULL;
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
static inline uint32_t radeon_object_flags_from_domain(uint32_t domain)
|
||||
{
|
||||
uint32_t flags = 0;
|
||||
if (domain & RADEON_GEM_DOMAIN_VRAM) {
|
||||
flags |= TTM_PL_FLAG_VRAM;
|
||||
}
|
||||
if (domain & RADEON_GEM_DOMAIN_GTT) {
|
||||
flags |= TTM_PL_FLAG_TT;
|
||||
}
|
||||
if (domain & RADEON_GEM_DOMAIN_CPU) {
|
||||
flags |= TTM_PL_FLAG_SYSTEM;
|
||||
}
|
||||
if (!flags) {
|
||||
flags |= TTM_PL_FLAG_SYSTEM;
|
||||
}
|
||||
return flags;
|
||||
}
|
||||
|
||||
int radeon_object_create(struct radeon_device *rdev,
|
||||
struct drm_gem_object *gobj,
|
||||
unsigned long size,
|
||||
bool kernel,
|
||||
uint32_t domain,
|
||||
bool interruptible,
|
||||
struct radeon_object **robj_ptr)
|
||||
{
|
||||
struct radeon_object *robj;
|
||||
enum ttm_bo_type type;
|
||||
uint32_t flags;
|
||||
int r;
|
||||
|
||||
if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) {
|
||||
rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping;
|
||||
}
|
||||
if (kernel) {
|
||||
type = ttm_bo_type_kernel;
|
||||
} else {
|
||||
type = ttm_bo_type_device;
|
||||
}
|
||||
*robj_ptr = NULL;
|
||||
robj = kzalloc(sizeof(struct radeon_object), GFP_KERNEL);
|
||||
if (robj == NULL) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
robj->rdev = rdev;
|
||||
robj->gobj = gobj;
|
||||
INIT_LIST_HEAD(&robj->list);
|
||||
|
||||
flags = radeon_object_flags_from_domain(domain);
|
||||
r = ttm_buffer_object_init(&rdev->mman.bdev, &robj->tobj, size, type, flags,
|
||||
0, 0, false, NULL, size,
|
||||
&radeon_ttm_object_object_destroy);
|
||||
if (unlikely(r != 0)) {
|
||||
/* ttm call radeon_ttm_object_object_destroy if error happen */
|
||||
DRM_ERROR("Failed to allocate TTM object (%ld, 0x%08X, %u)\n",
|
||||
size, flags, 0);
|
||||
return r;
|
||||
}
|
||||
*robj_ptr = robj;
|
||||
if (gobj) {
|
||||
list_add_tail(&robj->list, &rdev->gem.objects);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int radeon_object_kmap(struct radeon_object *robj, void **ptr)
|
||||
{
|
||||
int r;
|
||||
|
||||
spin_lock(&robj->tobj.lock);
|
||||
if (robj->kptr) {
|
||||
if (ptr) {
|
||||
*ptr = robj->kptr;
|
||||
}
|
||||
spin_unlock(&robj->tobj.lock);
|
||||
return 0;
|
||||
}
|
||||
spin_unlock(&robj->tobj.lock);
|
||||
r = ttm_bo_kmap(&robj->tobj, 0, robj->tobj.num_pages, &robj->kmap);
|
||||
if (r) {
|
||||
return r;
|
||||
}
|
||||
spin_lock(&robj->tobj.lock);
|
||||
robj->kptr = ttm_kmap_obj_virtual(&robj->kmap, &robj->is_iomem);
|
||||
spin_unlock(&robj->tobj.lock);
|
||||
if (ptr) {
|
||||
*ptr = robj->kptr;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void radeon_object_kunmap(struct radeon_object *robj)
|
||||
{
|
||||
spin_lock(&robj->tobj.lock);
|
||||
if (robj->kptr == NULL) {
|
||||
spin_unlock(&robj->tobj.lock);
|
||||
return;
|
||||
}
|
||||
robj->kptr = NULL;
|
||||
spin_unlock(&robj->tobj.lock);
|
||||
ttm_bo_kunmap(&robj->kmap);
|
||||
}
|
||||
|
||||
void radeon_object_unref(struct radeon_object **robj)
|
||||
{
|
||||
struct ttm_buffer_object *tobj;
|
||||
|
||||
if ((*robj) == NULL) {
|
||||
return;
|
||||
}
|
||||
tobj = &((*robj)->tobj);
|
||||
ttm_bo_unref(&tobj);
|
||||
if (tobj == NULL) {
|
||||
*robj = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
int radeon_object_mmap(struct radeon_object *robj, uint64_t *offset)
|
||||
{
|
||||
*offset = robj->tobj.addr_space_offset;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int radeon_object_pin(struct radeon_object *robj, uint32_t domain,
|
||||
uint64_t *gpu_addr)
|
||||
{
|
||||
uint32_t flags;
|
||||
uint32_t tmp;
|
||||
void *fbptr;
|
||||
int r;
|
||||
|
||||
flags = radeon_object_flags_from_domain(domain);
|
||||
spin_lock(&robj->tobj.lock);
|
||||
if (robj->pin_count) {
|
||||
robj->pin_count++;
|
||||
if (gpu_addr != NULL) {
|
||||
*gpu_addr = robj->gpu_addr;
|
||||
}
|
||||
spin_unlock(&robj->tobj.lock);
|
||||
return 0;
|
||||
}
|
||||
spin_unlock(&robj->tobj.lock);
|
||||
r = radeon_object_reserve(robj, false);
|
||||
if (unlikely(r != 0)) {
|
||||
DRM_ERROR("radeon: failed to reserve object for pinning it.\n");
|
||||
return r;
|
||||
}
|
||||
if (robj->rdev->fbdev_robj == robj) {
|
||||
mutex_lock(&robj->rdev->fbdev_info->lock);
|
||||
radeon_object_kunmap(robj);
|
||||
}
|
||||
tmp = robj->tobj.mem.placement;
|
||||
ttm_flag_masked(&tmp, flags, TTM_PL_MASK_MEM);
|
||||
robj->tobj.proposed_placement = tmp | TTM_PL_FLAG_NO_EVICT | TTM_PL_MASK_CACHING;
|
||||
r = ttm_buffer_object_validate(&robj->tobj,
|
||||
robj->tobj.proposed_placement,
|
||||
false, false);
|
||||
radeon_object_gpu_addr(robj);
|
||||
if (gpu_addr != NULL) {
|
||||
*gpu_addr = robj->gpu_addr;
|
||||
}
|
||||
robj->pin_count = 1;
|
||||
if (unlikely(r != 0)) {
|
||||
DRM_ERROR("radeon: failed to pin object.\n");
|
||||
}
|
||||
radeon_object_unreserve(robj);
|
||||
if (robj->rdev->fbdev_robj == robj) {
|
||||
if (!r) {
|
||||
r = radeon_object_kmap(robj, &fbptr);
|
||||
}
|
||||
if (!r) {
|
||||
robj->rdev->fbdev_info->screen_base = fbptr;
|
||||
robj->rdev->fbdev_info->fix.smem_start = (unsigned long)fbptr;
|
||||
}
|
||||
mutex_unlock(&robj->rdev->fbdev_info->lock);
|
||||
}
|
||||
return r;
|
||||
}
|
||||
|
||||
void radeon_object_unpin(struct radeon_object *robj)
|
||||
{
|
||||
uint32_t flags;
|
||||
void *fbptr;
|
||||
int r;
|
||||
|
||||
spin_lock(&robj->tobj.lock);
|
||||
if (!robj->pin_count) {
|
||||
spin_unlock(&robj->tobj.lock);
|
||||
printk(KERN_WARNING "Unpin not necessary for %p !\n", robj);
|
||||
return;
|
||||
}
|
||||
robj->pin_count--;
|
||||
if (robj->pin_count) {
|
||||
spin_unlock(&robj->tobj.lock);
|
||||
return;
|
||||
}
|
||||
spin_unlock(&robj->tobj.lock);
|
||||
r = radeon_object_reserve(robj, false);
|
||||
if (unlikely(r != 0)) {
|
||||
DRM_ERROR("radeon: failed to reserve object for unpinning it.\n");
|
||||
return;
|
||||
}
|
||||
if (robj->rdev->fbdev_robj == robj) {
|
||||
mutex_lock(&robj->rdev->fbdev_info->lock);
|
||||
radeon_object_kunmap(robj);
|
||||
}
|
||||
flags = robj->tobj.mem.placement;
|
||||
robj->tobj.proposed_placement = flags & ~TTM_PL_FLAG_NO_EVICT;
|
||||
r = ttm_buffer_object_validate(&robj->tobj,
|
||||
robj->tobj.proposed_placement,
|
||||
false, false);
|
||||
if (unlikely(r != 0)) {
|
||||
DRM_ERROR("radeon: failed to unpin buffer.\n");
|
||||
}
|
||||
radeon_object_unreserve(robj);
|
||||
if (robj->rdev->fbdev_robj == robj) {
|
||||
if (!r) {
|
||||
r = radeon_object_kmap(robj, &fbptr);
|
||||
}
|
||||
if (!r) {
|
||||
robj->rdev->fbdev_info->screen_base = fbptr;
|
||||
robj->rdev->fbdev_info->fix.smem_start = (unsigned long)fbptr;
|
||||
}
|
||||
mutex_unlock(&robj->rdev->fbdev_info->lock);
|
||||
}
|
||||
}
|
||||
|
||||
int radeon_object_wait(struct radeon_object *robj)
|
||||
{
|
||||
int r = 0;
|
||||
|
||||
/* FIXME: should use block reservation instead */
|
||||
r = radeon_object_reserve(robj, true);
|
||||
if (unlikely(r != 0)) {
|
||||
DRM_ERROR("radeon: failed to reserve object for waiting.\n");
|
||||
return r;
|
||||
}
|
||||
spin_lock(&robj->tobj.lock);
|
||||
if (robj->tobj.sync_obj) {
|
||||
r = ttm_bo_wait(&robj->tobj, true, false, false);
|
||||
}
|
||||
spin_unlock(&robj->tobj.lock);
|
||||
radeon_object_unreserve(robj);
|
||||
return r;
|
||||
}
|
||||
|
||||
int radeon_object_evict_vram(struct radeon_device *rdev)
|
||||
{
|
||||
if (rdev->flags & RADEON_IS_IGP) {
|
||||
/* Useless to evict on IGP chips */
|
||||
return 0;
|
||||
}
|
||||
return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM);
|
||||
}
|
||||
|
||||
void radeon_object_force_delete(struct radeon_device *rdev)
|
||||
{
|
||||
struct radeon_object *robj, *n;
|
||||
struct drm_gem_object *gobj;
|
||||
|
||||
if (list_empty(&rdev->gem.objects)) {
|
||||
return;
|
||||
}
|
||||
DRM_ERROR("Userspace still has active objects !\n");
|
||||
list_for_each_entry_safe(robj, n, &rdev->gem.objects, list) {
|
||||
mutex_lock(&rdev->ddev->struct_mutex);
|
||||
gobj = robj->gobj;
|
||||
DRM_ERROR("Force free for (%p,%p,%lu,%lu)\n",
|
||||
gobj, robj, (unsigned long)gobj->size,
|
||||
*((unsigned long *)&gobj->refcount));
|
||||
list_del_init(&robj->list);
|
||||
radeon_object_unref(&robj);
|
||||
gobj->driver_private = NULL;
|
||||
drm_gem_object_unreference(gobj);
|
||||
mutex_unlock(&rdev->ddev->struct_mutex);
|
||||
}
|
||||
}
|
||||
|
||||
int radeon_object_init(struct radeon_device *rdev)
|
||||
{
|
||||
return radeon_ttm_init(rdev);
|
||||
}
|
||||
|
||||
void radeon_object_fini(struct radeon_device *rdev)
|
||||
{
|
||||
radeon_ttm_fini(rdev);
|
||||
}
|
||||
|
||||
void radeon_object_list_add_object(struct radeon_object_list *lobj,
|
||||
struct list_head *head)
|
||||
{
|
||||
if (lobj->wdomain) {
|
||||
list_add(&lobj->list, head);
|
||||
} else {
|
||||
list_add_tail(&lobj->list, head);
|
||||
}
|
||||
}
|
||||
|
||||
int radeon_object_list_reserve(struct list_head *head)
|
||||
{
|
||||
struct radeon_object_list *lobj;
|
||||
struct list_head *i;
|
||||
int r;
|
||||
|
||||
list_for_each(i, head) {
|
||||
lobj = list_entry(i, struct radeon_object_list, list);
|
||||
if (!lobj->robj->pin_count) {
|
||||
r = radeon_object_reserve(lobj->robj, true);
|
||||
if (unlikely(r != 0)) {
|
||||
DRM_ERROR("radeon: failed to reserve object.\n");
|
||||
return r;
|
||||
}
|
||||
} else {
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void radeon_object_list_unreserve(struct list_head *head)
|
||||
{
|
||||
struct radeon_object_list *lobj;
|
||||
struct list_head *i;
|
||||
|
||||
list_for_each(i, head) {
|
||||
lobj = list_entry(i, struct radeon_object_list, list);
|
||||
if (!lobj->robj->pin_count) {
|
||||
radeon_object_unreserve(lobj->robj);
|
||||
} else {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int radeon_object_list_validate(struct list_head *head, void *fence)
|
||||
{
|
||||
struct radeon_object_list *lobj;
|
||||
struct radeon_object *robj;
|
||||
struct radeon_fence *old_fence = NULL;
|
||||
struct list_head *i;
|
||||
uint32_t flags;
|
||||
int r;
|
||||
|
||||
r = radeon_object_list_reserve(head);
|
||||
if (unlikely(r != 0)) {
|
||||
radeon_object_list_unreserve(head);
|
||||
return r;
|
||||
}
|
||||
list_for_each(i, head) {
|
||||
lobj = list_entry(i, struct radeon_object_list, list);
|
||||
robj = lobj->robj;
|
||||
if (lobj->wdomain) {
|
||||
flags = radeon_object_flags_from_domain(lobj->wdomain);
|
||||
flags |= TTM_PL_FLAG_TT;
|
||||
} else {
|
||||
flags = radeon_object_flags_from_domain(lobj->rdomain);
|
||||
flags |= TTM_PL_FLAG_TT;
|
||||
flags |= TTM_PL_FLAG_VRAM;
|
||||
}
|
||||
if (!robj->pin_count) {
|
||||
robj->tobj.proposed_placement = flags | TTM_PL_MASK_CACHING;
|
||||
r = ttm_buffer_object_validate(&robj->tobj,
|
||||
robj->tobj.proposed_placement,
|
||||
true, false);
|
||||
if (unlikely(r)) {
|
||||
radeon_object_list_unreserve(head);
|
||||
DRM_ERROR("radeon: failed to validate.\n");
|
||||
return r;
|
||||
}
|
||||
radeon_object_gpu_addr(robj);
|
||||
}
|
||||
lobj->gpu_offset = robj->gpu_addr;
|
||||
if (fence) {
|
||||
old_fence = (struct radeon_fence *)robj->tobj.sync_obj;
|
||||
robj->tobj.sync_obj = radeon_fence_ref(fence);
|
||||
robj->tobj.sync_obj_arg = NULL;
|
||||
}
|
||||
if (old_fence) {
|
||||
radeon_fence_unref(&old_fence);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void radeon_object_list_unvalidate(struct list_head *head)
|
||||
{
|
||||
struct radeon_object_list *lobj;
|
||||
struct radeon_fence *old_fence = NULL;
|
||||
struct list_head *i;
|
||||
|
||||
list_for_each(i, head) {
|
||||
lobj = list_entry(i, struct radeon_object_list, list);
|
||||
old_fence = (struct radeon_fence *)lobj->robj->tobj.sync_obj;
|
||||
lobj->robj->tobj.sync_obj = NULL;
|
||||
if (old_fence) {
|
||||
radeon_fence_unref(&old_fence);
|
||||
}
|
||||
}
|
||||
radeon_object_list_unreserve(head);
|
||||
}
|
||||
|
||||
void radeon_object_list_clean(struct list_head *head)
|
||||
{
|
||||
radeon_object_list_unreserve(head);
|
||||
}
|
||||
|
||||
int radeon_object_fbdev_mmap(struct radeon_object *robj,
|
||||
struct vm_area_struct *vma)
|
||||
{
|
||||
return ttm_fbdev_mmap(vma, &robj->tobj);
|
||||
}
|
||||
|
||||
unsigned long radeon_object_size(struct radeon_object *robj)
|
||||
{
|
||||
return robj->tobj.num_pages << PAGE_SHIFT;
|
||||
}
|
|
@ -0,0 +1,45 @@
|
|||
/*
|
||||
* Copyright 2008 Advanced Micro Devices, Inc.
|
||||
* Copyright 2008 Red Hat Inc.
|
||||
* Copyright 2009 Jerome Glisse.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Dave Airlie
|
||||
* Alex Deucher
|
||||
* Jerome Glisse
|
||||
*/
|
||||
#ifndef __RADEON_OBJECT_H__
|
||||
#define __RADEON_OBJECT_H__
|
||||
|
||||
#include <ttm/ttm_bo_api.h>
|
||||
#include <ttm/ttm_bo_driver.h>
|
||||
#include <ttm/ttm_placement.h>
|
||||
#include <ttm/ttm_module.h>
|
||||
|
||||
/*
|
||||
* TTM.
|
||||
*/
|
||||
struct radeon_mman {
|
||||
struct ttm_global_reference mem_global_ref;
|
||||
bool mem_global_referenced;
|
||||
struct ttm_bo_device bdev;
|
||||
};
|
||||
|
||||
#endif
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,485 @@
|
|||
/*
|
||||
* Copyright 2008 Advanced Micro Devices, Inc.
|
||||
* Copyright 2008 Red Hat Inc.
|
||||
* Copyright 2009 Jerome Glisse.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Dave Airlie
|
||||
* Alex Deucher
|
||||
* Jerome Glisse
|
||||
*/
|
||||
#include <linux/seq_file.h>
|
||||
#include "drmP.h"
|
||||
#include "radeon_drm.h"
|
||||
#include "radeon_reg.h"
|
||||
#include "radeon.h"
|
||||
#include "atom.h"
|
||||
|
||||
int radeon_debugfs_ib_init(struct radeon_device *rdev);
|
||||
|
||||
/*
|
||||
* IB.
|
||||
*/
|
||||
int radeon_ib_get(struct radeon_device *rdev, struct radeon_ib **ib)
|
||||
{
|
||||
struct radeon_fence *fence;
|
||||
struct radeon_ib *nib;
|
||||
unsigned long i;
|
||||
int r = 0;
|
||||
|
||||
*ib = NULL;
|
||||
r = radeon_fence_create(rdev, &fence);
|
||||
if (r) {
|
||||
DRM_ERROR("failed to create fence for new IB\n");
|
||||
return r;
|
||||
}
|
||||
mutex_lock(&rdev->ib_pool.mutex);
|
||||
i = find_first_zero_bit(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE);
|
||||
if (i < RADEON_IB_POOL_SIZE) {
|
||||
set_bit(i, rdev->ib_pool.alloc_bm);
|
||||
rdev->ib_pool.ibs[i].length_dw = 0;
|
||||
*ib = &rdev->ib_pool.ibs[i];
|
||||
goto out;
|
||||
}
|
||||
if (list_empty(&rdev->ib_pool.scheduled_ibs)) {
|
||||
/* we go do nothings here */
|
||||
DRM_ERROR("all IB allocated none scheduled.\n");
|
||||
r = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
/* get the first ib on the scheduled list */
|
||||
nib = list_entry(rdev->ib_pool.scheduled_ibs.next,
|
||||
struct radeon_ib, list);
|
||||
if (nib->fence == NULL) {
|
||||
/* we go do nothings here */
|
||||
DRM_ERROR("IB %lu scheduled without a fence.\n", nib->idx);
|
||||
r = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
r = radeon_fence_wait(nib->fence, false);
|
||||
if (r) {
|
||||
DRM_ERROR("radeon: IB(%lu:0x%016lX:%u)\n", nib->idx,
|
||||
(unsigned long)nib->gpu_addr, nib->length_dw);
|
||||
DRM_ERROR("radeon: GPU lockup detected, fail to get a IB\n");
|
||||
goto out;
|
||||
}
|
||||
radeon_fence_unref(&nib->fence);
|
||||
nib->length_dw = 0;
|
||||
list_del(&nib->list);
|
||||
INIT_LIST_HEAD(&nib->list);
|
||||
*ib = nib;
|
||||
out:
|
||||
mutex_unlock(&rdev->ib_pool.mutex);
|
||||
if (r) {
|
||||
radeon_fence_unref(&fence);
|
||||
} else {
|
||||
(*ib)->fence = fence;
|
||||
}
|
||||
return r;
|
||||
}
|
||||
|
||||
void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
|
||||
{
|
||||
struct radeon_ib *tmp = *ib;
|
||||
|
||||
*ib = NULL;
|
||||
if (tmp == NULL) {
|
||||
return;
|
||||
}
|
||||
mutex_lock(&rdev->ib_pool.mutex);
|
||||
if (!list_empty(&tmp->list) && !radeon_fence_signaled(tmp->fence)) {
|
||||
/* IB is scheduled & not signaled don't do anythings */
|
||||
mutex_unlock(&rdev->ib_pool.mutex);
|
||||
return;
|
||||
}
|
||||
list_del(&tmp->list);
|
||||
INIT_LIST_HEAD(&tmp->list);
|
||||
if (tmp->fence) {
|
||||
radeon_fence_unref(&tmp->fence);
|
||||
}
|
||||
tmp->length_dw = 0;
|
||||
clear_bit(tmp->idx, rdev->ib_pool.alloc_bm);
|
||||
mutex_unlock(&rdev->ib_pool.mutex);
|
||||
}
|
||||
|
||||
static void radeon_ib_align(struct radeon_device *rdev, struct radeon_ib *ib)
|
||||
{
|
||||
while ((ib->length_dw & rdev->cp.align_mask)) {
|
||||
ib->ptr[ib->length_dw++] = PACKET2(0);
|
||||
}
|
||||
}
|
||||
|
||||
static void radeon_ib_cpu_flush(struct radeon_device *rdev,
|
||||
struct radeon_ib *ib)
|
||||
{
|
||||
unsigned long tmp;
|
||||
unsigned i;
|
||||
|
||||
/* To force CPU cache flush ugly but seems reliable */
|
||||
for (i = 0; i < ib->length_dw; i += (rdev->cp.align_mask + 1)) {
|
||||
tmp = readl(&ib->ptr[i]);
|
||||
}
|
||||
}
|
||||
|
||||
int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
|
||||
{
|
||||
int r = 0;
|
||||
|
||||
mutex_lock(&rdev->ib_pool.mutex);
|
||||
radeon_ib_align(rdev, ib);
|
||||
radeon_ib_cpu_flush(rdev, ib);
|
||||
if (!ib->length_dw || !rdev->cp.ready) {
|
||||
/* TODO: Nothings in the ib we should report. */
|
||||
mutex_unlock(&rdev->ib_pool.mutex);
|
||||
DRM_ERROR("radeon: couldn't schedule IB(%lu).\n", ib->idx);
|
||||
return -EINVAL;
|
||||
}
|
||||
/* 64 dwords should be enought for fence too */
|
||||
r = radeon_ring_lock(rdev, 64);
|
||||
if (r) {
|
||||
DRM_ERROR("radeon: scheduling IB failled (%d).\n", r);
|
||||
mutex_unlock(&rdev->ib_pool.mutex);
|
||||
return r;
|
||||
}
|
||||
radeon_ring_write(rdev, PACKET0(RADEON_CP_IB_BASE, 1));
|
||||
radeon_ring_write(rdev, ib->gpu_addr);
|
||||
radeon_ring_write(rdev, ib->length_dw);
|
||||
radeon_fence_emit(rdev, ib->fence);
|
||||
radeon_ring_unlock_commit(rdev);
|
||||
list_add_tail(&ib->list, &rdev->ib_pool.scheduled_ibs);
|
||||
mutex_unlock(&rdev->ib_pool.mutex);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int radeon_ib_pool_init(struct radeon_device *rdev)
|
||||
{
|
||||
void *ptr;
|
||||
uint64_t gpu_addr;
|
||||
int i;
|
||||
int r = 0;
|
||||
|
||||
/* Allocate 1M object buffer */
|
||||
INIT_LIST_HEAD(&rdev->ib_pool.scheduled_ibs);
|
||||
r = radeon_object_create(rdev, NULL, RADEON_IB_POOL_SIZE*64*1024,
|
||||
true, RADEON_GEM_DOMAIN_GTT,
|
||||
false, &rdev->ib_pool.robj);
|
||||
if (r) {
|
||||
DRM_ERROR("radeon: failed to ib pool (%d).\n", r);
|
||||
return r;
|
||||
}
|
||||
r = radeon_object_pin(rdev->ib_pool.robj, RADEON_GEM_DOMAIN_GTT, &gpu_addr);
|
||||
if (r) {
|
||||
DRM_ERROR("radeon: failed to pin ib pool (%d).\n", r);
|
||||
return r;
|
||||
}
|
||||
r = radeon_object_kmap(rdev->ib_pool.robj, &ptr);
|
||||
if (r) {
|
||||
DRM_ERROR("radeon: failed to map ib poll (%d).\n", r);
|
||||
return r;
|
||||
}
|
||||
for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
|
||||
unsigned offset;
|
||||
|
||||
offset = i * 64 * 1024;
|
||||
rdev->ib_pool.ibs[i].gpu_addr = gpu_addr + offset;
|
||||
rdev->ib_pool.ibs[i].ptr = ptr + offset;
|
||||
rdev->ib_pool.ibs[i].idx = i;
|
||||
rdev->ib_pool.ibs[i].length_dw = 0;
|
||||
INIT_LIST_HEAD(&rdev->ib_pool.ibs[i].list);
|
||||
}
|
||||
bitmap_zero(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE);
|
||||
rdev->ib_pool.ready = true;
|
||||
DRM_INFO("radeon: ib pool ready.\n");
|
||||
if (radeon_debugfs_ib_init(rdev)) {
|
||||
DRM_ERROR("Failed to register debugfs file for IB !\n");
|
||||
}
|
||||
return r;
|
||||
}
|
||||
|
||||
void radeon_ib_pool_fini(struct radeon_device *rdev)
|
||||
{
|
||||
if (!rdev->ib_pool.ready) {
|
||||
return;
|
||||
}
|
||||
mutex_lock(&rdev->ib_pool.mutex);
|
||||
bitmap_zero(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE);
|
||||
if (rdev->ib_pool.robj) {
|
||||
radeon_object_kunmap(rdev->ib_pool.robj);
|
||||
radeon_object_unref(&rdev->ib_pool.robj);
|
||||
rdev->ib_pool.robj = NULL;
|
||||
}
|
||||
mutex_unlock(&rdev->ib_pool.mutex);
|
||||
}
|
||||
|
||||
int radeon_ib_test(struct radeon_device *rdev)
|
||||
{
|
||||
struct radeon_ib *ib;
|
||||
uint32_t scratch;
|
||||
uint32_t tmp = 0;
|
||||
unsigned i;
|
||||
int r;
|
||||
|
||||
r = radeon_scratch_get(rdev, &scratch);
|
||||
if (r) {
|
||||
DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r);
|
||||
return r;
|
||||
}
|
||||
WREG32(scratch, 0xCAFEDEAD);
|
||||
r = radeon_ib_get(rdev, &ib);
|
||||
if (r) {
|
||||
return r;
|
||||
}
|
||||
ib->ptr[0] = PACKET0(scratch, 0);
|
||||
ib->ptr[1] = 0xDEADBEEF;
|
||||
ib->ptr[2] = PACKET2(0);
|
||||
ib->ptr[3] = PACKET2(0);
|
||||
ib->ptr[4] = PACKET2(0);
|
||||
ib->ptr[5] = PACKET2(0);
|
||||
ib->ptr[6] = PACKET2(0);
|
||||
ib->ptr[7] = PACKET2(0);
|
||||
ib->length_dw = 8;
|
||||
r = radeon_ib_schedule(rdev, ib);
|
||||
if (r) {
|
||||
radeon_scratch_free(rdev, scratch);
|
||||
radeon_ib_free(rdev, &ib);
|
||||
return r;
|
||||
}
|
||||
r = radeon_fence_wait(ib->fence, false);
|
||||
if (r) {
|
||||
return r;
|
||||
}
|
||||
for (i = 0; i < rdev->usec_timeout; i++) {
|
||||
tmp = RREG32(scratch);
|
||||
if (tmp == 0xDEADBEEF) {
|
||||
break;
|
||||
}
|
||||
DRM_UDELAY(1);
|
||||
}
|
||||
if (i < rdev->usec_timeout) {
|
||||
DRM_INFO("ib test succeeded in %u usecs\n", i);
|
||||
} else {
|
||||
DRM_ERROR("radeon: ib test failed (sracth(0x%04X)=0x%08X)\n",
|
||||
scratch, tmp);
|
||||
r = -EINVAL;
|
||||
}
|
||||
radeon_scratch_free(rdev, scratch);
|
||||
radeon_ib_free(rdev, &ib);
|
||||
return r;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Ring.
|
||||
*/
|
||||
void radeon_ring_free_size(struct radeon_device *rdev)
|
||||
{
|
||||
rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
|
||||
/* This works because ring_size is a power of 2 */
|
||||
rdev->cp.ring_free_dw = (rdev->cp.rptr + (rdev->cp.ring_size / 4));
|
||||
rdev->cp.ring_free_dw -= rdev->cp.wptr;
|
||||
rdev->cp.ring_free_dw &= rdev->cp.ptr_mask;
|
||||
if (!rdev->cp.ring_free_dw) {
|
||||
rdev->cp.ring_free_dw = rdev->cp.ring_size / 4;
|
||||
}
|
||||
}
|
||||
|
||||
int radeon_ring_lock(struct radeon_device *rdev, unsigned ndw)
|
||||
{
|
||||
int r;
|
||||
|
||||
/* Align requested size with padding so unlock_commit can
|
||||
* pad safely */
|
||||
ndw = (ndw + rdev->cp.align_mask) & ~rdev->cp.align_mask;
|
||||
mutex_lock(&rdev->cp.mutex);
|
||||
while (ndw > (rdev->cp.ring_free_dw - 1)) {
|
||||
radeon_ring_free_size(rdev);
|
||||
if (ndw < rdev->cp.ring_free_dw) {
|
||||
break;
|
||||
}
|
||||
r = radeon_fence_wait_next(rdev);
|
||||
if (r) {
|
||||
mutex_unlock(&rdev->cp.mutex);
|
||||
return r;
|
||||
}
|
||||
}
|
||||
rdev->cp.count_dw = ndw;
|
||||
rdev->cp.wptr_old = rdev->cp.wptr;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void radeon_ring_unlock_commit(struct radeon_device *rdev)
|
||||
{
|
||||
unsigned count_dw_pad;
|
||||
unsigned i;
|
||||
|
||||
/* We pad to match fetch size */
|
||||
count_dw_pad = (rdev->cp.align_mask + 1) -
|
||||
(rdev->cp.wptr & rdev->cp.align_mask);
|
||||
for (i = 0; i < count_dw_pad; i++) {
|
||||
radeon_ring_write(rdev, PACKET2(0));
|
||||
}
|
||||
DRM_MEMORYBARRIER();
|
||||
WREG32(RADEON_CP_RB_WPTR, rdev->cp.wptr);
|
||||
(void)RREG32(RADEON_CP_RB_WPTR);
|
||||
mutex_unlock(&rdev->cp.mutex);
|
||||
}
|
||||
|
||||
void radeon_ring_unlock_undo(struct radeon_device *rdev)
|
||||
{
|
||||
rdev->cp.wptr = rdev->cp.wptr_old;
|
||||
mutex_unlock(&rdev->cp.mutex);
|
||||
}
|
||||
|
||||
int radeon_ring_test(struct radeon_device *rdev)
|
||||
{
|
||||
uint32_t scratch;
|
||||
uint32_t tmp = 0;
|
||||
unsigned i;
|
||||
int r;
|
||||
|
||||
r = radeon_scratch_get(rdev, &scratch);
|
||||
if (r) {
|
||||
DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r);
|
||||
return r;
|
||||
}
|
||||
WREG32(scratch, 0xCAFEDEAD);
|
||||
r = radeon_ring_lock(rdev, 2);
|
||||
if (r) {
|
||||
DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
|
||||
radeon_scratch_free(rdev, scratch);
|
||||
return r;
|
||||
}
|
||||
radeon_ring_write(rdev, PACKET0(scratch, 0));
|
||||
radeon_ring_write(rdev, 0xDEADBEEF);
|
||||
radeon_ring_unlock_commit(rdev);
|
||||
for (i = 0; i < rdev->usec_timeout; i++) {
|
||||
tmp = RREG32(scratch);
|
||||
if (tmp == 0xDEADBEEF) {
|
||||
break;
|
||||
}
|
||||
DRM_UDELAY(1);
|
||||
}
|
||||
if (i < rdev->usec_timeout) {
|
||||
DRM_INFO("ring test succeeded in %d usecs\n", i);
|
||||
} else {
|
||||
DRM_ERROR("radeon: ring test failed (sracth(0x%04X)=0x%08X)\n",
|
||||
scratch, tmp);
|
||||
r = -EINVAL;
|
||||
}
|
||||
radeon_scratch_free(rdev, scratch);
|
||||
return r;
|
||||
}
|
||||
|
||||
int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size)
|
||||
{
|
||||
int r;
|
||||
|
||||
rdev->cp.ring_size = ring_size;
|
||||
/* Allocate ring buffer */
|
||||
if (rdev->cp.ring_obj == NULL) {
|
||||
r = radeon_object_create(rdev, NULL, rdev->cp.ring_size,
|
||||
true,
|
||||
RADEON_GEM_DOMAIN_GTT,
|
||||
false,
|
||||
&rdev->cp.ring_obj);
|
||||
if (r) {
|
||||
DRM_ERROR("radeon: failed to create ring buffer (%d).\n", r);
|
||||
mutex_unlock(&rdev->cp.mutex);
|
||||
return r;
|
||||
}
|
||||
r = radeon_object_pin(rdev->cp.ring_obj,
|
||||
RADEON_GEM_DOMAIN_GTT,
|
||||
&rdev->cp.gpu_addr);
|
||||
if (r) {
|
||||
DRM_ERROR("radeon: failed to pin ring buffer (%d).\n", r);
|
||||
mutex_unlock(&rdev->cp.mutex);
|
||||
return r;
|
||||
}
|
||||
r = radeon_object_kmap(rdev->cp.ring_obj,
|
||||
(void **)&rdev->cp.ring);
|
||||
if (r) {
|
||||
DRM_ERROR("radeon: failed to map ring buffer (%d).\n", r);
|
||||
mutex_unlock(&rdev->cp.mutex);
|
||||
return r;
|
||||
}
|
||||
}
|
||||
rdev->cp.ptr_mask = (rdev->cp.ring_size / 4) - 1;
|
||||
rdev->cp.ring_free_dw = rdev->cp.ring_size / 4;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void radeon_ring_fini(struct radeon_device *rdev)
|
||||
{
|
||||
mutex_lock(&rdev->cp.mutex);
|
||||
if (rdev->cp.ring_obj) {
|
||||
radeon_object_kunmap(rdev->cp.ring_obj);
|
||||
radeon_object_unpin(rdev->cp.ring_obj);
|
||||
radeon_object_unref(&rdev->cp.ring_obj);
|
||||
rdev->cp.ring = NULL;
|
||||
rdev->cp.ring_obj = NULL;
|
||||
}
|
||||
mutex_unlock(&rdev->cp.mutex);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Debugfs info
|
||||
*/
|
||||
#if defined(CONFIG_DEBUG_FS)
|
||||
static int radeon_debugfs_ib_info(struct seq_file *m, void *data)
|
||||
{
|
||||
struct drm_info_node *node = (struct drm_info_node *) m->private;
|
||||
struct radeon_ib *ib = node->info_ent->data;
|
||||
unsigned i;
|
||||
|
||||
if (ib == NULL) {
|
||||
return 0;
|
||||
}
|
||||
seq_printf(m, "IB %04lu\n", ib->idx);
|
||||
seq_printf(m, "IB fence %p\n", ib->fence);
|
||||
seq_printf(m, "IB size %05u dwords\n", ib->length_dw);
|
||||
for (i = 0; i < ib->length_dw; i++) {
|
||||
seq_printf(m, "[%05u]=0x%08X\n", i, ib->ptr[i]);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct drm_info_list radeon_debugfs_ib_list[RADEON_IB_POOL_SIZE];
|
||||
static char radeon_debugfs_ib_names[RADEON_IB_POOL_SIZE][32];
|
||||
#endif
|
||||
|
||||
int radeon_debugfs_ib_init(struct radeon_device *rdev)
|
||||
{
|
||||
#if defined(CONFIG_DEBUG_FS)
|
||||
unsigned i;
|
||||
|
||||
for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
|
||||
sprintf(radeon_debugfs_ib_names[i], "radeon_ib_%04u", i);
|
||||
radeon_debugfs_ib_list[i].name = radeon_debugfs_ib_names[i];
|
||||
radeon_debugfs_ib_list[i].show = &radeon_debugfs_ib_info;
|
||||
radeon_debugfs_ib_list[i].driver_features = 0;
|
||||
radeon_debugfs_ib_list[i].data = &rdev->ib_pool.ibs[i];
|
||||
}
|
||||
return radeon_debugfs_add_files(rdev, radeon_debugfs_ib_list,
|
||||
RADEON_IB_POOL_SIZE);
|
||||
#else
|
||||
return 0;
|
||||
#endif
|
||||
}
|
|
@ -0,0 +1,653 @@
|
|||
/*
|
||||
* Copyright 2009 Jerome Glisse.
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
*/
|
||||
/*
|
||||
* Authors:
|
||||
* Jerome Glisse <glisse@freedesktop.org>
|
||||
* Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
|
||||
* Dave Airlie
|
||||
*/
|
||||
#include <ttm/ttm_bo_api.h>
|
||||
#include <ttm/ttm_bo_driver.h>
|
||||
#include <ttm/ttm_placement.h>
|
||||
#include <ttm/ttm_module.h>
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/radeon_drm.h>
|
||||
#include "radeon_reg.h"
|
||||
#include "radeon.h"
|
||||
|
||||
#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
|
||||
|
||||
static struct radeon_device *radeon_get_rdev(struct ttm_bo_device *bdev)
|
||||
{
|
||||
struct radeon_mman *mman;
|
||||
struct radeon_device *rdev;
|
||||
|
||||
mman = container_of(bdev, struct radeon_mman, bdev);
|
||||
rdev = container_of(mman, struct radeon_device, mman);
|
||||
return rdev;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Global memory.
|
||||
*/
|
||||
static int radeon_ttm_mem_global_init(struct ttm_global_reference *ref)
|
||||
{
|
||||
return ttm_mem_global_init(ref->object);
|
||||
}
|
||||
|
||||
static void radeon_ttm_mem_global_release(struct ttm_global_reference *ref)
|
||||
{
|
||||
ttm_mem_global_release(ref->object);
|
||||
}
|
||||
|
||||
static int radeon_ttm_global_init(struct radeon_device *rdev)
|
||||
{
|
||||
struct ttm_global_reference *global_ref;
|
||||
int r;
|
||||
|
||||
rdev->mman.mem_global_referenced = false;
|
||||
global_ref = &rdev->mman.mem_global_ref;
|
||||
global_ref->global_type = TTM_GLOBAL_TTM_MEM;
|
||||
global_ref->size = sizeof(struct ttm_mem_global);
|
||||
global_ref->init = &radeon_ttm_mem_global_init;
|
||||
global_ref->release = &radeon_ttm_mem_global_release;
|
||||
r = ttm_global_item_ref(global_ref);
|
||||
if (r != 0) {
|
||||
DRM_ERROR("Failed referencing a global TTM memory object.\n");
|
||||
return r;
|
||||
}
|
||||
rdev->mman.mem_global_referenced = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void radeon_ttm_global_fini(struct radeon_device *rdev)
|
||||
{
|
||||
if (rdev->mman.mem_global_referenced) {
|
||||
ttm_global_item_unref(&rdev->mman.mem_global_ref);
|
||||
rdev->mman.mem_global_referenced = false;
|
||||
}
|
||||
}
|
||||
|
||||
struct ttm_backend *radeon_ttm_backend_create(struct radeon_device *rdev);
|
||||
|
||||
static struct ttm_backend*
|
||||
radeon_create_ttm_backend_entry(struct ttm_bo_device *bdev)
|
||||
{
|
||||
struct radeon_device *rdev;
|
||||
|
||||
rdev = radeon_get_rdev(bdev);
|
||||
#if __OS_HAS_AGP
|
||||
if (rdev->flags & RADEON_IS_AGP) {
|
||||
return ttm_agp_backend_init(bdev, rdev->ddev->agp->bridge);
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
return radeon_ttm_backend_create(rdev);
|
||||
}
|
||||
}
|
||||
|
||||
static int radeon_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
|
||||
struct ttm_mem_type_manager *man)
|
||||
{
|
||||
struct radeon_device *rdev;
|
||||
|
||||
rdev = radeon_get_rdev(bdev);
|
||||
|
||||
switch (type) {
|
||||
case TTM_PL_SYSTEM:
|
||||
/* System memory */
|
||||
man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
|
||||
man->available_caching = TTM_PL_MASK_CACHING;
|
||||
man->default_caching = TTM_PL_FLAG_CACHED;
|
||||
break;
|
||||
case TTM_PL_TT:
|
||||
man->gpu_offset = 0;
|
||||
man->available_caching = TTM_PL_MASK_CACHING;
|
||||
man->default_caching = TTM_PL_FLAG_CACHED;
|
||||
#if __OS_HAS_AGP
|
||||
if (rdev->flags & RADEON_IS_AGP) {
|
||||
if (!(drm_core_has_AGP(rdev->ddev) && rdev->ddev->agp)) {
|
||||
DRM_ERROR("AGP is not enabled for memory type %u\n",
|
||||
(unsigned)type);
|
||||
return -EINVAL;
|
||||
}
|
||||
man->io_offset = rdev->mc.agp_base;
|
||||
man->io_size = rdev->mc.gtt_size;
|
||||
man->io_addr = NULL;
|
||||
man->flags = TTM_MEMTYPE_FLAG_NEEDS_IOREMAP |
|
||||
TTM_MEMTYPE_FLAG_MAPPABLE;
|
||||
man->available_caching = TTM_PL_FLAG_UNCACHED |
|
||||
TTM_PL_FLAG_WC;
|
||||
man->default_caching = TTM_PL_FLAG_WC;
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
man->io_offset = 0;
|
||||
man->io_size = 0;
|
||||
man->io_addr = NULL;
|
||||
man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
|
||||
TTM_MEMTYPE_FLAG_CMA;
|
||||
}
|
||||
break;
|
||||
case TTM_PL_VRAM:
|
||||
/* "On-card" video ram */
|
||||
man->gpu_offset = 0;
|
||||
man->flags = TTM_MEMTYPE_FLAG_FIXED |
|
||||
TTM_MEMTYPE_FLAG_NEEDS_IOREMAP |
|
||||
TTM_MEMTYPE_FLAG_MAPPABLE;
|
||||
man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
|
||||
man->default_caching = TTM_PL_FLAG_WC;
|
||||
man->io_addr = NULL;
|
||||
man->io_offset = rdev->mc.aper_base;
|
||||
man->io_size = rdev->mc.aper_size;
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static uint32_t radeon_evict_flags(struct ttm_buffer_object *bo)
|
||||
{
|
||||
uint32_t cur_placement = bo->mem.placement & ~TTM_PL_MASK_MEMTYPE;
|
||||
|
||||
switch (bo->mem.mem_type) {
|
||||
default:
|
||||
return (cur_placement & ~TTM_PL_MASK_CACHING) |
|
||||
TTM_PL_FLAG_SYSTEM |
|
||||
TTM_PL_FLAG_CACHED;
|
||||
}
|
||||
}
|
||||
|
||||
static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void radeon_move_null(struct ttm_buffer_object *bo,
|
||||
struct ttm_mem_reg *new_mem)
|
||||
{
|
||||
struct ttm_mem_reg *old_mem = &bo->mem;
|
||||
|
||||
BUG_ON(old_mem->mm_node != NULL);
|
||||
*old_mem = *new_mem;
|
||||
new_mem->mm_node = NULL;
|
||||
}
|
||||
|
||||
static int radeon_move_blit(struct ttm_buffer_object *bo,
|
||||
bool evict, int no_wait,
|
||||
struct ttm_mem_reg *new_mem,
|
||||
struct ttm_mem_reg *old_mem)
|
||||
{
|
||||
struct radeon_device *rdev;
|
||||
uint64_t old_start, new_start;
|
||||
struct radeon_fence *fence;
|
||||
int r;
|
||||
|
||||
rdev = radeon_get_rdev(bo->bdev);
|
||||
r = radeon_fence_create(rdev, &fence);
|
||||
if (unlikely(r)) {
|
||||
return r;
|
||||
}
|
||||
old_start = old_mem->mm_node->start << PAGE_SHIFT;
|
||||
new_start = new_mem->mm_node->start << PAGE_SHIFT;
|
||||
|
||||
switch (old_mem->mem_type) {
|
||||
case TTM_PL_VRAM:
|
||||
old_start += rdev->mc.vram_location;
|
||||
break;
|
||||
case TTM_PL_TT:
|
||||
old_start += rdev->mc.gtt_location;
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
|
||||
return -EINVAL;
|
||||
}
|
||||
switch (new_mem->mem_type) {
|
||||
case TTM_PL_VRAM:
|
||||
new_start += rdev->mc.vram_location;
|
||||
break;
|
||||
case TTM_PL_TT:
|
||||
new_start += rdev->mc.gtt_location;
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (!rdev->cp.ready) {
|
||||
DRM_ERROR("Trying to move memory with CP turned off.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
r = radeon_copy(rdev, old_start, new_start, new_mem->num_pages, fence);
|
||||
/* FIXME: handle copy error */
|
||||
r = ttm_bo_move_accel_cleanup(bo, (void *)fence, NULL,
|
||||
evict, no_wait, new_mem);
|
||||
radeon_fence_unref(&fence);
|
||||
return r;
|
||||
}
|
||||
|
||||
static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
|
||||
bool evict, bool interruptible, bool no_wait,
|
||||
struct ttm_mem_reg *new_mem)
|
||||
{
|
||||
struct radeon_device *rdev;
|
||||
struct ttm_mem_reg *old_mem = &bo->mem;
|
||||
struct ttm_mem_reg tmp_mem;
|
||||
uint32_t proposed_placement;
|
||||
int r;
|
||||
|
||||
rdev = radeon_get_rdev(bo->bdev);
|
||||
tmp_mem = *new_mem;
|
||||
tmp_mem.mm_node = NULL;
|
||||
proposed_placement = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
|
||||
r = ttm_bo_mem_space(bo, proposed_placement, &tmp_mem,
|
||||
interruptible, no_wait);
|
||||
if (unlikely(r)) {
|
||||
return r;
|
||||
}
|
||||
r = ttm_tt_bind(bo->ttm, &tmp_mem);
|
||||
if (unlikely(r)) {
|
||||
goto out_cleanup;
|
||||
}
|
||||
r = radeon_move_blit(bo, true, no_wait, &tmp_mem, old_mem);
|
||||
if (unlikely(r)) {
|
||||
goto out_cleanup;
|
||||
}
|
||||
r = ttm_bo_move_ttm(bo, true, no_wait, new_mem);
|
||||
out_cleanup:
|
||||
if (tmp_mem.mm_node) {
|
||||
spin_lock(&rdev->mman.bdev.lru_lock);
|
||||
drm_mm_put_block(tmp_mem.mm_node);
|
||||
spin_unlock(&rdev->mman.bdev.lru_lock);
|
||||
return r;
|
||||
}
|
||||
return r;
|
||||
}
|
||||
|
||||
static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
|
||||
bool evict, bool interruptible, bool no_wait,
|
||||
struct ttm_mem_reg *new_mem)
|
||||
{
|
||||
struct radeon_device *rdev;
|
||||
struct ttm_mem_reg *old_mem = &bo->mem;
|
||||
struct ttm_mem_reg tmp_mem;
|
||||
uint32_t proposed_flags;
|
||||
int r;
|
||||
|
||||
rdev = radeon_get_rdev(bo->bdev);
|
||||
tmp_mem = *new_mem;
|
||||
tmp_mem.mm_node = NULL;
|
||||
proposed_flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
|
||||
r = ttm_bo_mem_space(bo, proposed_flags, &tmp_mem,
|
||||
interruptible, no_wait);
|
||||
if (unlikely(r)) {
|
||||
return r;
|
||||
}
|
||||
r = ttm_bo_move_ttm(bo, true, no_wait, &tmp_mem);
|
||||
if (unlikely(r)) {
|
||||
goto out_cleanup;
|
||||
}
|
||||
r = radeon_move_blit(bo, true, no_wait, new_mem, old_mem);
|
||||
if (unlikely(r)) {
|
||||
goto out_cleanup;
|
||||
}
|
||||
out_cleanup:
|
||||
if (tmp_mem.mm_node) {
|
||||
spin_lock(&rdev->mman.bdev.lru_lock);
|
||||
drm_mm_put_block(tmp_mem.mm_node);
|
||||
spin_unlock(&rdev->mman.bdev.lru_lock);
|
||||
return r;
|
||||
}
|
||||
return r;
|
||||
}
|
||||
|
||||
static int radeon_bo_move(struct ttm_buffer_object *bo,
|
||||
bool evict, bool interruptible, bool no_wait,
|
||||
struct ttm_mem_reg *new_mem)
|
||||
{
|
||||
struct radeon_device *rdev;
|
||||
struct ttm_mem_reg *old_mem = &bo->mem;
|
||||
int r;
|
||||
|
||||
rdev = radeon_get_rdev(bo->bdev);
|
||||
if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
|
||||
radeon_move_null(bo, new_mem);
|
||||
return 0;
|
||||
}
|
||||
if ((old_mem->mem_type == TTM_PL_TT &&
|
||||
new_mem->mem_type == TTM_PL_SYSTEM) ||
|
||||
(old_mem->mem_type == TTM_PL_SYSTEM &&
|
||||
new_mem->mem_type == TTM_PL_TT)) {
|
||||
/* bind is enought */
|
||||
radeon_move_null(bo, new_mem);
|
||||
return 0;
|
||||
}
|
||||
if (!rdev->cp.ready) {
|
||||
/* use memcpy */
|
||||
DRM_ERROR("CP is not ready use memcpy.\n");
|
||||
return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
|
||||
}
|
||||
|
||||
if (old_mem->mem_type == TTM_PL_VRAM &&
|
||||
new_mem->mem_type == TTM_PL_SYSTEM) {
|
||||
return radeon_move_vram_ram(bo, evict, interruptible,
|
||||
no_wait, new_mem);
|
||||
} else if (old_mem->mem_type == TTM_PL_SYSTEM &&
|
||||
new_mem->mem_type == TTM_PL_VRAM) {
|
||||
return radeon_move_ram_vram(bo, evict, interruptible,
|
||||
no_wait, new_mem);
|
||||
} else {
|
||||
r = radeon_move_blit(bo, evict, no_wait, new_mem, old_mem);
|
||||
if (unlikely(r)) {
|
||||
return r;
|
||||
}
|
||||
}
|
||||
return r;
|
||||
}
|
||||
|
||||
const uint32_t radeon_mem_prios[] = {
|
||||
TTM_PL_VRAM,
|
||||
TTM_PL_TT,
|
||||
TTM_PL_SYSTEM,
|
||||
};
|
||||
|
||||
const uint32_t radeon_busy_prios[] = {
|
||||
TTM_PL_TT,
|
||||
TTM_PL_VRAM,
|
||||
TTM_PL_SYSTEM,
|
||||
};
|
||||
|
||||
static int radeon_sync_obj_wait(void *sync_obj, void *sync_arg,
|
||||
bool lazy, bool interruptible)
|
||||
{
|
||||
return radeon_fence_wait((struct radeon_fence *)sync_obj, interruptible);
|
||||
}
|
||||
|
||||
static int radeon_sync_obj_flush(void *sync_obj, void *sync_arg)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void radeon_sync_obj_unref(void **sync_obj)
|
||||
{
|
||||
radeon_fence_unref((struct radeon_fence **)sync_obj);
|
||||
}
|
||||
|
||||
static void *radeon_sync_obj_ref(void *sync_obj)
|
||||
{
|
||||
return radeon_fence_ref((struct radeon_fence *)sync_obj);
|
||||
}
|
||||
|
||||
static bool radeon_sync_obj_signaled(void *sync_obj, void *sync_arg)
|
||||
{
|
||||
return radeon_fence_signaled((struct radeon_fence *)sync_obj);
|
||||
}
|
||||
|
||||
static struct ttm_bo_driver radeon_bo_driver = {
|
||||
.mem_type_prio = radeon_mem_prios,
|
||||
.mem_busy_prio = radeon_busy_prios,
|
||||
.num_mem_type_prio = ARRAY_SIZE(radeon_mem_prios),
|
||||
.num_mem_busy_prio = ARRAY_SIZE(radeon_busy_prios),
|
||||
.create_ttm_backend_entry = &radeon_create_ttm_backend_entry,
|
||||
.invalidate_caches = &radeon_invalidate_caches,
|
||||
.init_mem_type = &radeon_init_mem_type,
|
||||
.evict_flags = &radeon_evict_flags,
|
||||
.move = &radeon_bo_move,
|
||||
.verify_access = &radeon_verify_access,
|
||||
.sync_obj_signaled = &radeon_sync_obj_signaled,
|
||||
.sync_obj_wait = &radeon_sync_obj_wait,
|
||||
.sync_obj_flush = &radeon_sync_obj_flush,
|
||||
.sync_obj_unref = &radeon_sync_obj_unref,
|
||||
.sync_obj_ref = &radeon_sync_obj_ref,
|
||||
};
|
||||
|
||||
int radeon_ttm_init(struct radeon_device *rdev)
|
||||
{
|
||||
int r;
|
||||
|
||||
r = radeon_ttm_global_init(rdev);
|
||||
if (r) {
|
||||
return r;
|
||||
}
|
||||
/* No others user of address space so set it to 0 */
|
||||
r = ttm_bo_device_init(&rdev->mman.bdev,
|
||||
rdev->mman.mem_global_ref.object,
|
||||
&radeon_bo_driver, DRM_FILE_PAGE_OFFSET);
|
||||
if (r) {
|
||||
DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
|
||||
return r;
|
||||
}
|
||||
r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_VRAM, 0,
|
||||
((rdev->mc.aper_size) >> PAGE_SHIFT));
|
||||
if (r) {
|
||||
DRM_ERROR("Failed initializing VRAM heap.\n");
|
||||
return r;
|
||||
}
|
||||
r = radeon_object_create(rdev, NULL, 256 * 1024, true,
|
||||
RADEON_GEM_DOMAIN_VRAM, false,
|
||||
&rdev->stollen_vga_memory);
|
||||
if (r) {
|
||||
return r;
|
||||
}
|
||||
r = radeon_object_pin(rdev->stollen_vga_memory, RADEON_GEM_DOMAIN_VRAM, NULL);
|
||||
if (r) {
|
||||
radeon_object_unref(&rdev->stollen_vga_memory);
|
||||
return r;
|
||||
}
|
||||
DRM_INFO("radeon: %uM of VRAM memory ready\n",
|
||||
rdev->mc.vram_size / (1024 * 1024));
|
||||
r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_TT, 0,
|
||||
((rdev->mc.gtt_size) >> PAGE_SHIFT));
|
||||
if (r) {
|
||||
DRM_ERROR("Failed initializing GTT heap.\n");
|
||||
return r;
|
||||
}
|
||||
DRM_INFO("radeon: %uM of GTT memory ready.\n",
|
||||
rdev->mc.gtt_size / (1024 * 1024));
|
||||
if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) {
|
||||
rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void radeon_ttm_fini(struct radeon_device *rdev)
|
||||
{
|
||||
if (rdev->stollen_vga_memory) {
|
||||
radeon_object_unpin(rdev->stollen_vga_memory);
|
||||
radeon_object_unref(&rdev->stollen_vga_memory);
|
||||
}
|
||||
ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_VRAM);
|
||||
ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_TT);
|
||||
ttm_bo_device_release(&rdev->mman.bdev);
|
||||
radeon_gart_fini(rdev);
|
||||
radeon_ttm_global_fini(rdev);
|
||||
DRM_INFO("radeon: ttm finalized\n");
|
||||
}
|
||||
|
||||
static struct vm_operations_struct radeon_ttm_vm_ops;
|
||||
static struct vm_operations_struct *ttm_vm_ops = NULL;
|
||||
|
||||
static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
|
||||
{
|
||||
struct ttm_buffer_object *bo;
|
||||
int r;
|
||||
|
||||
bo = (struct ttm_buffer_object *)vma->vm_private_data;
|
||||
if (bo == NULL) {
|
||||
return VM_FAULT_NOPAGE;
|
||||
}
|
||||
r = ttm_vm_ops->fault(vma, vmf);
|
||||
return r;
|
||||
}
|
||||
|
||||
int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
|
||||
{
|
||||
struct drm_file *file_priv;
|
||||
struct radeon_device *rdev;
|
||||
int r;
|
||||
|
||||
if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) {
|
||||
return drm_mmap(filp, vma);
|
||||
}
|
||||
|
||||
file_priv = (struct drm_file *)filp->private_data;
|
||||
rdev = file_priv->minor->dev->dev_private;
|
||||
if (rdev == NULL) {
|
||||
return -EINVAL;
|
||||
}
|
||||
r = ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
|
||||
if (unlikely(r != 0)) {
|
||||
return r;
|
||||
}
|
||||
if (unlikely(ttm_vm_ops == NULL)) {
|
||||
ttm_vm_ops = vma->vm_ops;
|
||||
radeon_ttm_vm_ops = *ttm_vm_ops;
|
||||
radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
|
||||
}
|
||||
vma->vm_ops = &radeon_ttm_vm_ops;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* TTM backend functions.
|
||||
*/
|
||||
struct radeon_ttm_backend {
|
||||
struct ttm_backend backend;
|
||||
struct radeon_device *rdev;
|
||||
unsigned long num_pages;
|
||||
struct page **pages;
|
||||
struct page *dummy_read_page;
|
||||
bool populated;
|
||||
bool bound;
|
||||
unsigned offset;
|
||||
};
|
||||
|
||||
static int radeon_ttm_backend_populate(struct ttm_backend *backend,
|
||||
unsigned long num_pages,
|
||||
struct page **pages,
|
||||
struct page *dummy_read_page)
|
||||
{
|
||||
struct radeon_ttm_backend *gtt;
|
||||
|
||||
gtt = container_of(backend, struct radeon_ttm_backend, backend);
|
||||
gtt->pages = pages;
|
||||
gtt->num_pages = num_pages;
|
||||
gtt->dummy_read_page = dummy_read_page;
|
||||
gtt->populated = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void radeon_ttm_backend_clear(struct ttm_backend *backend)
|
||||
{
|
||||
struct radeon_ttm_backend *gtt;
|
||||
|
||||
gtt = container_of(backend, struct radeon_ttm_backend, backend);
|
||||
gtt->pages = NULL;
|
||||
gtt->num_pages = 0;
|
||||
gtt->dummy_read_page = NULL;
|
||||
gtt->populated = false;
|
||||
gtt->bound = false;
|
||||
}
|
||||
|
||||
|
||||
static int radeon_ttm_backend_bind(struct ttm_backend *backend,
|
||||
struct ttm_mem_reg *bo_mem)
|
||||
{
|
||||
struct radeon_ttm_backend *gtt;
|
||||
int r;
|
||||
|
||||
gtt = container_of(backend, struct radeon_ttm_backend, backend);
|
||||
gtt->offset = bo_mem->mm_node->start << PAGE_SHIFT;
|
||||
if (!gtt->num_pages) {
|
||||
WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", gtt->num_pages, bo_mem, backend);
|
||||
}
|
||||
r = radeon_gart_bind(gtt->rdev, gtt->offset,
|
||||
gtt->num_pages, gtt->pages);
|
||||
if (r) {
|
||||
DRM_ERROR("failed to bind %lu pages at 0x%08X\n",
|
||||
gtt->num_pages, gtt->offset);
|
||||
return r;
|
||||
}
|
||||
gtt->bound = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int radeon_ttm_backend_unbind(struct ttm_backend *backend)
|
||||
{
|
||||
struct radeon_ttm_backend *gtt;
|
||||
|
||||
gtt = container_of(backend, struct radeon_ttm_backend, backend);
|
||||
radeon_gart_unbind(gtt->rdev, gtt->offset, gtt->num_pages);
|
||||
gtt->bound = false;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void radeon_ttm_backend_destroy(struct ttm_backend *backend)
|
||||
{
|
||||
struct radeon_ttm_backend *gtt;
|
||||
|
||||
gtt = container_of(backend, struct radeon_ttm_backend, backend);
|
||||
if (gtt->bound) {
|
||||
radeon_ttm_backend_unbind(backend);
|
||||
}
|
||||
kfree(gtt);
|
||||
}
|
||||
|
||||
static struct ttm_backend_func radeon_backend_func = {
|
||||
.populate = &radeon_ttm_backend_populate,
|
||||
.clear = &radeon_ttm_backend_clear,
|
||||
.bind = &radeon_ttm_backend_bind,
|
||||
.unbind = &radeon_ttm_backend_unbind,
|
||||
.destroy = &radeon_ttm_backend_destroy,
|
||||
};
|
||||
|
||||
struct ttm_backend *radeon_ttm_backend_create(struct radeon_device *rdev)
|
||||
{
|
||||
struct radeon_ttm_backend *gtt;
|
||||
|
||||
gtt = kzalloc(sizeof(struct radeon_ttm_backend), GFP_KERNEL);
|
||||
if (gtt == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
gtt->backend.bdev = &rdev->mman.bdev;
|
||||
gtt->backend.flags = 0;
|
||||
gtt->backend.func = &radeon_backend_func;
|
||||
gtt->rdev = rdev;
|
||||
gtt->pages = NULL;
|
||||
gtt->num_pages = 0;
|
||||
gtt->dummy_read_page = NULL;
|
||||
gtt->populated = false;
|
||||
gtt->bound = false;
|
||||
return >t->backend;
|
||||
}
|
|
@ -0,0 +1,411 @@
|
|||
/*
|
||||
* Copyright 2008 Advanced Micro Devices, Inc.
|
||||
* Copyright 2008 Red Hat Inc.
|
||||
* Copyright 2009 Jerome Glisse.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Dave Airlie
|
||||
* Alex Deucher
|
||||
* Jerome Glisse
|
||||
*/
|
||||
#include <linux/seq_file.h>
|
||||
#include <drm/drmP.h>
|
||||
#include "radeon_reg.h"
|
||||
#include "radeon.h"
|
||||
|
||||
/* rs400,rs480 depends on : */
|
||||
void r100_hdp_reset(struct radeon_device *rdev);
|
||||
void r100_mc_disable_clients(struct radeon_device *rdev);
|
||||
int r300_mc_wait_for_idle(struct radeon_device *rdev);
|
||||
void r420_pipes_init(struct radeon_device *rdev);
|
||||
|
||||
/* This files gather functions specifics to :
|
||||
* rs400,rs480
|
||||
*
|
||||
* Some of these functions might be used by newer ASICs.
|
||||
*/
|
||||
void rs400_gpu_init(struct radeon_device *rdev);
|
||||
int rs400_debugfs_pcie_gart_info_init(struct radeon_device *rdev);
|
||||
|
||||
|
||||
/*
|
||||
* GART functions.
|
||||
*/
|
||||
void rs400_gart_adjust_size(struct radeon_device *rdev)
|
||||
{
|
||||
/* Check gart size */
|
||||
switch (rdev->mc.gtt_size/(1024*1024)) {
|
||||
case 32:
|
||||
case 64:
|
||||
case 128:
|
||||
case 256:
|
||||
case 512:
|
||||
case 1024:
|
||||
case 2048:
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("Unable to use IGP GART size %uM\n",
|
||||
rdev->mc.gtt_size >> 20);
|
||||
DRM_ERROR("Valid GART size for IGP are 32M,64M,128M,256M,512M,1G,2G\n");
|
||||
DRM_ERROR("Forcing to 32M GART size\n");
|
||||
rdev->mc.gtt_size = 32 * 1024 * 1024;
|
||||
return;
|
||||
}
|
||||
if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480) {
|
||||
/* FIXME: RS400 & RS480 seems to have issue with GART size
|
||||
* if 4G of system memory (needs more testing) */
|
||||
rdev->mc.gtt_size = 32 * 1024 * 1024;
|
||||
DRM_ERROR("Forcing to 32M GART size (because of ASIC bug ?)\n");
|
||||
}
|
||||
}
|
||||
|
||||
void rs400_gart_tlb_flush(struct radeon_device *rdev)
|
||||
{
|
||||
uint32_t tmp;
|
||||
unsigned int timeout = rdev->usec_timeout;
|
||||
|
||||
WREG32_MC(RS480_GART_CACHE_CNTRL, RS480_GART_CACHE_INVALIDATE);
|
||||
do {
|
||||
tmp = RREG32_MC(RS480_GART_CACHE_CNTRL);
|
||||
if ((tmp & RS480_GART_CACHE_INVALIDATE) == 0)
|
||||
break;
|
||||
DRM_UDELAY(1);
|
||||
timeout--;
|
||||
} while (timeout > 0);
|
||||
WREG32_MC(RS480_GART_CACHE_CNTRL, 0);
|
||||
}
|
||||
|
||||
int rs400_gart_enable(struct radeon_device *rdev)
|
||||
{
|
||||
uint32_t size_reg;
|
||||
uint32_t tmp;
|
||||
int r;
|
||||
|
||||
/* Initialize common gart structure */
|
||||
r = radeon_gart_init(rdev);
|
||||
if (r) {
|
||||
return r;
|
||||
}
|
||||
if (rs400_debugfs_pcie_gart_info_init(rdev)) {
|
||||
DRM_ERROR("Failed to register debugfs file for RS400 GART !\n");
|
||||
}
|
||||
|
||||
tmp = RREG32_MC(RS690_AIC_CTRL_SCRATCH);
|
||||
tmp |= RS690_DIS_OUT_OF_PCI_GART_ACCESS;
|
||||
WREG32_MC(RS690_AIC_CTRL_SCRATCH, tmp);
|
||||
/* Check gart size */
|
||||
switch(rdev->mc.gtt_size / (1024 * 1024)) {
|
||||
case 32:
|
||||
size_reg = RS480_VA_SIZE_32MB;
|
||||
break;
|
||||
case 64:
|
||||
size_reg = RS480_VA_SIZE_64MB;
|
||||
break;
|
||||
case 128:
|
||||
size_reg = RS480_VA_SIZE_128MB;
|
||||
break;
|
||||
case 256:
|
||||
size_reg = RS480_VA_SIZE_256MB;
|
||||
break;
|
||||
case 512:
|
||||
size_reg = RS480_VA_SIZE_512MB;
|
||||
break;
|
||||
case 1024:
|
||||
size_reg = RS480_VA_SIZE_1GB;
|
||||
break;
|
||||
case 2048:
|
||||
size_reg = RS480_VA_SIZE_2GB;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
if (rdev->gart.table.ram.ptr == NULL) {
|
||||
rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
|
||||
r = radeon_gart_table_ram_alloc(rdev);
|
||||
if (r) {
|
||||
return r;
|
||||
}
|
||||
}
|
||||
/* It should be fine to program it to max value */
|
||||
if (rdev->family == CHIP_RS690 || (rdev->family == CHIP_RS740)) {
|
||||
WREG32_MC(RS690_MCCFG_AGP_BASE, 0xFFFFFFFF);
|
||||
WREG32_MC(RS690_MCCFG_AGP_BASE_2, 0);
|
||||
} else {
|
||||
WREG32(RADEON_AGP_BASE, 0xFFFFFFFF);
|
||||
WREG32(RS480_AGP_BASE_2, 0);
|
||||
}
|
||||
tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
|
||||
tmp = REG_SET(RS690_MC_AGP_TOP, tmp >> 16);
|
||||
tmp |= REG_SET(RS690_MC_AGP_START, rdev->mc.gtt_location >> 16);
|
||||
if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740)) {
|
||||
WREG32_MC(RS690_MCCFG_AGP_LOCATION, tmp);
|
||||
tmp = RREG32(RADEON_BUS_CNTL) & ~RS600_BUS_MASTER_DIS;
|
||||
WREG32(RADEON_BUS_CNTL, tmp);
|
||||
} else {
|
||||
WREG32(RADEON_MC_AGP_LOCATION, tmp);
|
||||
tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
|
||||
WREG32(RADEON_BUS_CNTL, tmp);
|
||||
}
|
||||
/* Table should be in 32bits address space so ignore bits above. */
|
||||
tmp = rdev->gart.table_addr & 0xfffff000;
|
||||
WREG32_MC(RS480_GART_BASE, tmp);
|
||||
/* TODO: more tweaking here */
|
||||
WREG32_MC(RS480_GART_FEATURE_ID,
|
||||
(RS480_TLB_ENABLE |
|
||||
RS480_GTW_LAC_EN | RS480_1LEVEL_GART));
|
||||
/* Disable snooping */
|
||||
WREG32_MC(RS480_AGP_MODE_CNTL,
|
||||
(1 << RS480_REQ_TYPE_SNOOP_SHIFT) | RS480_REQ_TYPE_SNOOP_DIS);
|
||||
/* Disable AGP mode */
|
||||
/* FIXME: according to doc we should set HIDE_MMCFG_BAR=0,
|
||||
* AGPMODE30=0 & AGP30ENHANCED=0 in NB_CNTL */
|
||||
if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740)) {
|
||||
WREG32_MC(RS480_MC_MISC_CNTL,
|
||||
(RS480_GART_INDEX_REG_EN | RS690_BLOCK_GFX_D3_EN));
|
||||
} else {
|
||||
WREG32_MC(RS480_MC_MISC_CNTL, RS480_GART_INDEX_REG_EN);
|
||||
}
|
||||
/* Enable gart */
|
||||
WREG32_MC(RS480_AGP_ADDRESS_SPACE_SIZE, (RS480_GART_EN | size_reg));
|
||||
rs400_gart_tlb_flush(rdev);
|
||||
rdev->gart.ready = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void rs400_gart_disable(struct radeon_device *rdev)
|
||||
{
|
||||
uint32_t tmp;
|
||||
|
||||
tmp = RREG32_MC(RS690_AIC_CTRL_SCRATCH);
|
||||
tmp |= RS690_DIS_OUT_OF_PCI_GART_ACCESS;
|
||||
WREG32_MC(RS690_AIC_CTRL_SCRATCH, tmp);
|
||||
WREG32_MC(RS480_AGP_ADDRESS_SPACE_SIZE, 0);
|
||||
}
|
||||
|
||||
int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
|
||||
{
|
||||
if (i < 0 || i > rdev->gart.num_gpu_pages) {
|
||||
return -EINVAL;
|
||||
}
|
||||
rdev->gart.table.ram.ptr[i] = cpu_to_le32(((uint32_t)addr) | 0xC);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* MC functions.
|
||||
*/
|
||||
int rs400_mc_init(struct radeon_device *rdev)
|
||||
{
|
||||
uint32_t tmp;
|
||||
int r;
|
||||
|
||||
if (r100_debugfs_rbbm_init(rdev)) {
|
||||
DRM_ERROR("Failed to register debugfs file for RBBM !\n");
|
||||
}
|
||||
|
||||
rs400_gpu_init(rdev);
|
||||
rs400_gart_disable(rdev);
|
||||
rdev->mc.gtt_location = rdev->mc.vram_size;
|
||||
rdev->mc.gtt_location += (rdev->mc.gtt_size - 1);
|
||||
rdev->mc.gtt_location &= ~(rdev->mc.gtt_size - 1);
|
||||
rdev->mc.vram_location = 0xFFFFFFFFUL;
|
||||
r = radeon_mc_setup(rdev);
|
||||
if (r) {
|
||||
return r;
|
||||
}
|
||||
|
||||
r100_mc_disable_clients(rdev);
|
||||
if (r300_mc_wait_for_idle(rdev)) {
|
||||
printk(KERN_WARNING "Failed to wait MC idle while "
|
||||
"programming pipes. Bad things might happen.\n");
|
||||
}
|
||||
|
||||
tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1;
|
||||
tmp = REG_SET(RADEON_MC_FB_TOP, tmp >> 16);
|
||||
tmp |= REG_SET(RADEON_MC_FB_START, rdev->mc.vram_location >> 16);
|
||||
WREG32(RADEON_MC_FB_LOCATION, tmp);
|
||||
tmp = RREG32(RADEON_HOST_PATH_CNTL) | RADEON_HP_LIN_RD_CACHE_DIS;
|
||||
WREG32(RADEON_HOST_PATH_CNTL, tmp | RADEON_HDP_SOFT_RESET | RADEON_HDP_READ_BUFFER_INVALIDATE);
|
||||
(void)RREG32(RADEON_HOST_PATH_CNTL);
|
||||
WREG32(RADEON_HOST_PATH_CNTL, tmp);
|
||||
(void)RREG32(RADEON_HOST_PATH_CNTL);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void rs400_mc_fini(struct radeon_device *rdev)
|
||||
{
|
||||
rs400_gart_disable(rdev);
|
||||
radeon_gart_table_ram_free(rdev);
|
||||
radeon_gart_fini(rdev);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Global GPU functions
|
||||
*/
|
||||
void rs400_errata(struct radeon_device *rdev)
|
||||
{
|
||||
rdev->pll_errata = 0;
|
||||
}
|
||||
|
||||
void rs400_gpu_init(struct radeon_device *rdev)
|
||||
{
|
||||
/* FIXME: HDP same place on rs400 ? */
|
||||
r100_hdp_reset(rdev);
|
||||
/* FIXME: is this correct ? */
|
||||
r420_pipes_init(rdev);
|
||||
if (r300_mc_wait_for_idle(rdev)) {
|
||||
printk(KERN_WARNING "Failed to wait MC idle while "
|
||||
"programming pipes. Bad things might happen.\n");
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* VRAM info.
|
||||
*/
|
||||
void rs400_vram_info(struct radeon_device *rdev)
|
||||
{
|
||||
uint32_t tom;
|
||||
|
||||
rs400_gart_adjust_size(rdev);
|
||||
/* DDR for all card after R300 & IGP */
|
||||
rdev->mc.vram_is_ddr = true;
|
||||
rdev->mc.vram_width = 128;
|
||||
|
||||
/* read NB_TOM to get the amount of ram stolen for the GPU */
|
||||
tom = RREG32(RADEON_NB_TOM);
|
||||
rdev->mc.vram_size = (((tom >> 16) - (tom & 0xffff) + 1) << 16);
|
||||
WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.vram_size);
|
||||
|
||||
/* Could aper size report 0 ? */
|
||||
rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
|
||||
rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Indirect registers accessor
|
||||
*/
|
||||
uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg)
|
||||
{
|
||||
uint32_t r;
|
||||
|
||||
WREG32(RS480_NB_MC_INDEX, reg & 0xff);
|
||||
r = RREG32(RS480_NB_MC_DATA);
|
||||
WREG32(RS480_NB_MC_INDEX, 0xff);
|
||||
return r;
|
||||
}
|
||||
|
||||
void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
|
||||
{
|
||||
WREG32(RS480_NB_MC_INDEX, ((reg) & 0xff) | RS480_NB_MC_IND_WR_EN);
|
||||
WREG32(RS480_NB_MC_DATA, (v));
|
||||
WREG32(RS480_NB_MC_INDEX, 0xff);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Debugfs info
|
||||
*/
|
||||
#if defined(CONFIG_DEBUG_FS)
|
||||
static int rs400_debugfs_gart_info(struct seq_file *m, void *data)
|
||||
{
|
||||
struct drm_info_node *node = (struct drm_info_node *) m->private;
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
uint32_t tmp;
|
||||
|
||||
tmp = RREG32(RADEON_HOST_PATH_CNTL);
|
||||
seq_printf(m, "HOST_PATH_CNTL 0x%08x\n", tmp);
|
||||
tmp = RREG32(RADEON_BUS_CNTL);
|
||||
seq_printf(m, "BUS_CNTL 0x%08x\n", tmp);
|
||||
tmp = RREG32_MC(RS690_AIC_CTRL_SCRATCH);
|
||||
seq_printf(m, "AIC_CTRL_SCRATCH 0x%08x\n", tmp);
|
||||
if (rdev->family == CHIP_RS690 || (rdev->family == CHIP_RS740)) {
|
||||
tmp = RREG32_MC(RS690_MCCFG_AGP_BASE);
|
||||
seq_printf(m, "MCCFG_AGP_BASE 0x%08x\n", tmp);
|
||||
tmp = RREG32_MC(RS690_MCCFG_AGP_BASE_2);
|
||||
seq_printf(m, "MCCFG_AGP_BASE_2 0x%08x\n", tmp);
|
||||
tmp = RREG32_MC(RS690_MCCFG_AGP_LOCATION);
|
||||
seq_printf(m, "MCCFG_AGP_LOCATION 0x%08x\n", tmp);
|
||||
tmp = RREG32_MC(0x100);
|
||||
seq_printf(m, "MCCFG_FB_LOCATION 0x%08x\n", tmp);
|
||||
tmp = RREG32(0x134);
|
||||
seq_printf(m, "HDP_FB_LOCATION 0x%08x\n", tmp);
|
||||
} else {
|
||||
tmp = RREG32(RADEON_AGP_BASE);
|
||||
seq_printf(m, "AGP_BASE 0x%08x\n", tmp);
|
||||
tmp = RREG32(RS480_AGP_BASE_2);
|
||||
seq_printf(m, "AGP_BASE_2 0x%08x\n", tmp);
|
||||
tmp = RREG32(RADEON_MC_AGP_LOCATION);
|
||||
seq_printf(m, "MC_AGP_LOCATION 0x%08x\n", tmp);
|
||||
}
|
||||
tmp = RREG32_MC(RS480_GART_BASE);
|
||||
seq_printf(m, "GART_BASE 0x%08x\n", tmp);
|
||||
tmp = RREG32_MC(RS480_GART_FEATURE_ID);
|
||||
seq_printf(m, "GART_FEATURE_ID 0x%08x\n", tmp);
|
||||
tmp = RREG32_MC(RS480_AGP_MODE_CNTL);
|
||||
seq_printf(m, "AGP_MODE_CONTROL 0x%08x\n", tmp);
|
||||
tmp = RREG32_MC(RS480_MC_MISC_CNTL);
|
||||
seq_printf(m, "MC_MISC_CNTL 0x%08x\n", tmp);
|
||||
tmp = RREG32_MC(0x5F);
|
||||
seq_printf(m, "MC_MISC_UMA_CNTL 0x%08x\n", tmp);
|
||||
tmp = RREG32_MC(RS480_AGP_ADDRESS_SPACE_SIZE);
|
||||
seq_printf(m, "AGP_ADDRESS_SPACE_SIZE 0x%08x\n", tmp);
|
||||
tmp = RREG32_MC(RS480_GART_CACHE_CNTRL);
|
||||
seq_printf(m, "GART_CACHE_CNTRL 0x%08x\n", tmp);
|
||||
tmp = RREG32_MC(0x3B);
|
||||
seq_printf(m, "MC_GART_ERROR_ADDRESS 0x%08x\n", tmp);
|
||||
tmp = RREG32_MC(0x3C);
|
||||
seq_printf(m, "MC_GART_ERROR_ADDRESS_HI 0x%08x\n", tmp);
|
||||
tmp = RREG32_MC(0x30);
|
||||
seq_printf(m, "GART_ERROR_0 0x%08x\n", tmp);
|
||||
tmp = RREG32_MC(0x31);
|
||||
seq_printf(m, "GART_ERROR_1 0x%08x\n", tmp);
|
||||
tmp = RREG32_MC(0x32);
|
||||
seq_printf(m, "GART_ERROR_2 0x%08x\n", tmp);
|
||||
tmp = RREG32_MC(0x33);
|
||||
seq_printf(m, "GART_ERROR_3 0x%08x\n", tmp);
|
||||
tmp = RREG32_MC(0x34);
|
||||
seq_printf(m, "GART_ERROR_4 0x%08x\n", tmp);
|
||||
tmp = RREG32_MC(0x35);
|
||||
seq_printf(m, "GART_ERROR_5 0x%08x\n", tmp);
|
||||
tmp = RREG32_MC(0x36);
|
||||
seq_printf(m, "GART_ERROR_6 0x%08x\n", tmp);
|
||||
tmp = RREG32_MC(0x37);
|
||||
seq_printf(m, "GART_ERROR_7 0x%08x\n", tmp);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct drm_info_list rs400_gart_info_list[] = {
|
||||
{"rs400_gart_info", rs400_debugfs_gart_info, 0, NULL},
|
||||
};
|
||||
#endif
|
||||
|
||||
int rs400_debugfs_pcie_gart_info_init(struct radeon_device *rdev)
|
||||
{
|
||||
#if defined(CONFIG_DEBUG_FS)
|
||||
return radeon_debugfs_add_files(rdev, rs400_gart_info_list, 1);
|
||||
#else
|
||||
return 0;
|
||||
#endif
|
||||
}
|
|
@ -0,0 +1,324 @@
|
|||
/*
|
||||
* Copyright 2008 Advanced Micro Devices, Inc.
|
||||
* Copyright 2008 Red Hat Inc.
|
||||
* Copyright 2009 Jerome Glisse.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Dave Airlie
|
||||
* Alex Deucher
|
||||
* Jerome Glisse
|
||||
*/
|
||||
#include "drmP.h"
|
||||
#include "radeon_reg.h"
|
||||
#include "radeon.h"
|
||||
|
||||
/* rs600 depends on : */
|
||||
void r100_hdp_reset(struct radeon_device *rdev);
|
||||
int r100_gui_wait_for_idle(struct radeon_device *rdev);
|
||||
int r300_mc_wait_for_idle(struct radeon_device *rdev);
|
||||
void r420_pipes_init(struct radeon_device *rdev);
|
||||
|
||||
/* This files gather functions specifics to :
|
||||
* rs600
|
||||
*
|
||||
* Some of these functions might be used by newer ASICs.
|
||||
*/
|
||||
void rs600_gpu_init(struct radeon_device *rdev);
|
||||
int rs600_mc_wait_for_idle(struct radeon_device *rdev);
|
||||
void rs600_disable_vga(struct radeon_device *rdev);
|
||||
|
||||
|
||||
/*
|
||||
* GART.
|
||||
*/
|
||||
void rs600_gart_tlb_flush(struct radeon_device *rdev)
|
||||
{
|
||||
uint32_t tmp;
|
||||
|
||||
tmp = RREG32_MC(RS600_MC_PT0_CNTL);
|
||||
tmp &= ~(RS600_INVALIDATE_ALL_L1_TLBS | RS600_INVALIDATE_L2_CACHE);
|
||||
WREG32_MC(RS600_MC_PT0_CNTL, tmp);
|
||||
|
||||
tmp = RREG32_MC(RS600_MC_PT0_CNTL);
|
||||
tmp |= RS600_INVALIDATE_ALL_L1_TLBS | RS600_INVALIDATE_L2_CACHE;
|
||||
WREG32_MC(RS600_MC_PT0_CNTL, tmp);
|
||||
|
||||
tmp = RREG32_MC(RS600_MC_PT0_CNTL);
|
||||
tmp &= ~(RS600_INVALIDATE_ALL_L1_TLBS | RS600_INVALIDATE_L2_CACHE);
|
||||
WREG32_MC(RS600_MC_PT0_CNTL, tmp);
|
||||
tmp = RREG32_MC(RS600_MC_PT0_CNTL);
|
||||
}
|
||||
|
||||
int rs600_gart_enable(struct radeon_device *rdev)
|
||||
{
|
||||
uint32_t tmp;
|
||||
int i;
|
||||
int r;
|
||||
|
||||
/* Initialize common gart structure */
|
||||
r = radeon_gart_init(rdev);
|
||||
if (r) {
|
||||
return r;
|
||||
}
|
||||
rdev->gart.table_size = rdev->gart.num_gpu_pages * 8;
|
||||
r = radeon_gart_table_vram_alloc(rdev);
|
||||
if (r) {
|
||||
return r;
|
||||
}
|
||||
/* FIXME: setup default page */
|
||||
WREG32_MC(RS600_MC_PT0_CNTL,
|
||||
(RS600_EFFECTIVE_L2_CACHE_SIZE(6) |
|
||||
RS600_EFFECTIVE_L2_QUEUE_SIZE(6)));
|
||||
for (i = 0; i < 19; i++) {
|
||||
WREG32_MC(RS600_MC_PT0_CLIENT0_CNTL + i,
|
||||
(RS600_ENABLE_TRANSLATION_MODE_OVERRIDE |
|
||||
RS600_SYSTEM_ACCESS_MODE_IN_SYS |
|
||||
RS600_SYSTEM_APERTURE_UNMAPPED_ACCESS_DEFAULT_PAGE |
|
||||
RS600_EFFECTIVE_L1_CACHE_SIZE(3) |
|
||||
RS600_ENABLE_FRAGMENT_PROCESSING |
|
||||
RS600_EFFECTIVE_L1_QUEUE_SIZE(3)));
|
||||
}
|
||||
|
||||
/* System context map to GART space */
|
||||
WREG32_MC(RS600_MC_PT0_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.gtt_location);
|
||||
tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
|
||||
WREG32_MC(RS600_MC_PT0_SYSTEM_APERTURE_HIGH_ADDR, tmp);
|
||||
|
||||
/* enable first context */
|
||||
WREG32_MC(RS600_MC_PT0_CONTEXT0_FLAT_START_ADDR, rdev->mc.gtt_location);
|
||||
tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
|
||||
WREG32_MC(RS600_MC_PT0_CONTEXT0_FLAT_END_ADDR, tmp);
|
||||
WREG32_MC(RS600_MC_PT0_CONTEXT0_CNTL,
|
||||
(RS600_ENABLE_PAGE_TABLE | RS600_PAGE_TABLE_TYPE_FLAT));
|
||||
/* disable all other contexts */
|
||||
for (i = 1; i < 8; i++) {
|
||||
WREG32_MC(RS600_MC_PT0_CONTEXT0_CNTL + i, 0);
|
||||
}
|
||||
|
||||
/* setup the page table */
|
||||
WREG32_MC(RS600_MC_PT0_CONTEXT0_FLAT_BASE_ADDR,
|
||||
rdev->gart.table_addr);
|
||||
WREG32_MC(RS600_MC_PT0_CONTEXT0_DEFAULT_READ_ADDR, 0);
|
||||
|
||||
/* enable page tables */
|
||||
tmp = RREG32_MC(RS600_MC_PT0_CNTL);
|
||||
WREG32_MC(RS600_MC_PT0_CNTL, (tmp | RS600_ENABLE_PT));
|
||||
tmp = RREG32_MC(RS600_MC_CNTL1);
|
||||
WREG32_MC(RS600_MC_CNTL1, (tmp | RS600_ENABLE_PAGE_TABLES));
|
||||
rs600_gart_tlb_flush(rdev);
|
||||
rdev->gart.ready = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void rs600_gart_disable(struct radeon_device *rdev)
|
||||
{
|
||||
uint32_t tmp;
|
||||
|
||||
/* FIXME: disable out of gart access */
|
||||
WREG32_MC(RS600_MC_PT0_CNTL, 0);
|
||||
tmp = RREG32_MC(RS600_MC_CNTL1);
|
||||
tmp &= ~RS600_ENABLE_PAGE_TABLES;
|
||||
WREG32_MC(RS600_MC_CNTL1, tmp);
|
||||
radeon_object_kunmap(rdev->gart.table.vram.robj);
|
||||
radeon_object_unpin(rdev->gart.table.vram.robj);
|
||||
}
|
||||
|
||||
#define R600_PTE_VALID (1 << 0)
|
||||
#define R600_PTE_SYSTEM (1 << 1)
|
||||
#define R600_PTE_SNOOPED (1 << 2)
|
||||
#define R600_PTE_READABLE (1 << 5)
|
||||
#define R600_PTE_WRITEABLE (1 << 6)
|
||||
|
||||
int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
|
||||
{
|
||||
void __iomem *ptr = (void *)rdev->gart.table.vram.ptr;
|
||||
|
||||
if (i < 0 || i > rdev->gart.num_gpu_pages) {
|
||||
return -EINVAL;
|
||||
}
|
||||
addr = addr & 0xFFFFFFFFFFFFF000ULL;
|
||||
addr |= R600_PTE_VALID | R600_PTE_SYSTEM | R600_PTE_SNOOPED;
|
||||
addr |= R600_PTE_READABLE | R600_PTE_WRITEABLE;
|
||||
writeq(addr, ((void __iomem *)ptr) + (i * 8));
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* MC.
|
||||
*/
|
||||
void rs600_mc_disable_clients(struct radeon_device *rdev)
|
||||
{
|
||||
unsigned tmp;
|
||||
|
||||
if (r100_gui_wait_for_idle(rdev)) {
|
||||
printk(KERN_WARNING "Failed to wait GUI idle while "
|
||||
"programming pipes. Bad things might happen.\n");
|
||||
}
|
||||
|
||||
tmp = RREG32(AVIVO_D1VGA_CONTROL);
|
||||
WREG32(AVIVO_D1VGA_CONTROL, tmp & ~AVIVO_DVGA_CONTROL_MODE_ENABLE);
|
||||
tmp = RREG32(AVIVO_D2VGA_CONTROL);
|
||||
WREG32(AVIVO_D2VGA_CONTROL, tmp & ~AVIVO_DVGA_CONTROL_MODE_ENABLE);
|
||||
|
||||
tmp = RREG32(AVIVO_D1CRTC_CONTROL);
|
||||
WREG32(AVIVO_D1CRTC_CONTROL, tmp & ~AVIVO_CRTC_EN);
|
||||
tmp = RREG32(AVIVO_D2CRTC_CONTROL);
|
||||
WREG32(AVIVO_D2CRTC_CONTROL, tmp & ~AVIVO_CRTC_EN);
|
||||
|
||||
/* make sure all previous write got through */
|
||||
tmp = RREG32(AVIVO_D2CRTC_CONTROL);
|
||||
|
||||
mdelay(1);
|
||||
}
|
||||
|
||||
int rs600_mc_init(struct radeon_device *rdev)
|
||||
{
|
||||
uint32_t tmp;
|
||||
int r;
|
||||
|
||||
if (r100_debugfs_rbbm_init(rdev)) {
|
||||
DRM_ERROR("Failed to register debugfs file for RBBM !\n");
|
||||
}
|
||||
|
||||
rs600_gpu_init(rdev);
|
||||
rs600_gart_disable(rdev);
|
||||
|
||||
/* Setup GPU memory space */
|
||||
rdev->mc.vram_location = 0xFFFFFFFFUL;
|
||||
rdev->mc.gtt_location = 0xFFFFFFFFUL;
|
||||
r = radeon_mc_setup(rdev);
|
||||
if (r) {
|
||||
return r;
|
||||
}
|
||||
|
||||
/* Program GPU memory space */
|
||||
/* Enable bus master */
|
||||
tmp = RREG32(RADEON_BUS_CNTL) & ~RS600_BUS_MASTER_DIS;
|
||||
WREG32(RADEON_BUS_CNTL, tmp);
|
||||
/* FIXME: What does AGP means for such chipset ? */
|
||||
WREG32_MC(RS600_MC_AGP_LOCATION, 0x0FFFFFFF);
|
||||
/* FIXME: are this AGP reg in indirect MC range ? */
|
||||
WREG32_MC(RS600_MC_AGP_BASE, 0);
|
||||
WREG32_MC(RS600_MC_AGP_BASE_2, 0);
|
||||
rs600_mc_disable_clients(rdev);
|
||||
if (rs600_mc_wait_for_idle(rdev)) {
|
||||
printk(KERN_WARNING "Failed to wait MC idle while "
|
||||
"programming pipes. Bad things might happen.\n");
|
||||
}
|
||||
tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1;
|
||||
tmp = REG_SET(RS600_MC_FB_TOP, tmp >> 16);
|
||||
tmp |= REG_SET(RS600_MC_FB_START, rdev->mc.vram_location >> 16);
|
||||
WREG32_MC(RS600_MC_FB_LOCATION, tmp);
|
||||
WREG32(RS690_HDP_FB_LOCATION, rdev->mc.vram_location >> 16);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void rs600_mc_fini(struct radeon_device *rdev)
|
||||
{
|
||||
rs600_gart_disable(rdev);
|
||||
radeon_gart_table_vram_free(rdev);
|
||||
radeon_gart_fini(rdev);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Global GPU functions
|
||||
*/
|
||||
void rs600_disable_vga(struct radeon_device *rdev)
|
||||
{
|
||||
unsigned tmp;
|
||||
|
||||
WREG32(0x330, 0);
|
||||
WREG32(0x338, 0);
|
||||
tmp = RREG32(0x300);
|
||||
tmp &= ~(3 << 16);
|
||||
WREG32(0x300, tmp);
|
||||
WREG32(0x308, (1 << 8));
|
||||
WREG32(0x310, rdev->mc.vram_location);
|
||||
WREG32(0x594, 0);
|
||||
}
|
||||
|
||||
int rs600_mc_wait_for_idle(struct radeon_device *rdev)
|
||||
{
|
||||
unsigned i;
|
||||
uint32_t tmp;
|
||||
|
||||
for (i = 0; i < rdev->usec_timeout; i++) {
|
||||
/* read MC_STATUS */
|
||||
tmp = RREG32_MC(RS600_MC_STATUS);
|
||||
if (tmp & RS600_MC_STATUS_IDLE) {
|
||||
return 0;
|
||||
}
|
||||
DRM_UDELAY(1);
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
void rs600_errata(struct radeon_device *rdev)
|
||||
{
|
||||
rdev->pll_errata = 0;
|
||||
}
|
||||
|
||||
void rs600_gpu_init(struct radeon_device *rdev)
|
||||
{
|
||||
/* FIXME: HDP same place on rs600 ? */
|
||||
r100_hdp_reset(rdev);
|
||||
rs600_disable_vga(rdev);
|
||||
/* FIXME: is this correct ? */
|
||||
r420_pipes_init(rdev);
|
||||
if (rs600_mc_wait_for_idle(rdev)) {
|
||||
printk(KERN_WARNING "Failed to wait MC idle while "
|
||||
"programming pipes. Bad things might happen.\n");
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* VRAM info.
|
||||
*/
|
||||
void rs600_vram_info(struct radeon_device *rdev)
|
||||
{
|
||||
/* FIXME: to do or is these values sane ? */
|
||||
rdev->mc.vram_is_ddr = true;
|
||||
rdev->mc.vram_width = 128;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Indirect registers accessor
|
||||
*/
|
||||
uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg)
|
||||
{
|
||||
uint32_t r;
|
||||
|
||||
WREG32(RS600_MC_INDEX,
|
||||
((reg & RS600_MC_ADDR_MASK) | RS600_MC_IND_CITF_ARB0));
|
||||
r = RREG32(RS600_MC_DATA);
|
||||
return r;
|
||||
}
|
||||
|
||||
void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
|
||||
{
|
||||
WREG32(RS600_MC_INDEX,
|
||||
RS600_MC_IND_WR_EN | RS600_MC_IND_CITF_ARB0 |
|
||||
((reg) & RS600_MC_ADDR_MASK));
|
||||
WREG32(RS600_MC_DATA, v);
|
||||
}
|
|
@ -0,0 +1,181 @@
|
|||
/*
|
||||
* Copyright 2008 Advanced Micro Devices, Inc.
|
||||
* Copyright 2008 Red Hat Inc.
|
||||
* Copyright 2009 Jerome Glisse.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Dave Airlie
|
||||
* Alex Deucher
|
||||
* Jerome Glisse
|
||||
*/
|
||||
#include "drmP.h"
|
||||
#include "radeon_reg.h"
|
||||
#include "radeon.h"
|
||||
|
||||
/* rs690,rs740 depends on : */
|
||||
void r100_hdp_reset(struct radeon_device *rdev);
|
||||
int r300_mc_wait_for_idle(struct radeon_device *rdev);
|
||||
void r420_pipes_init(struct radeon_device *rdev);
|
||||
void rs400_gart_disable(struct radeon_device *rdev);
|
||||
int rs400_gart_enable(struct radeon_device *rdev);
|
||||
void rs400_gart_adjust_size(struct radeon_device *rdev);
|
||||
void rs600_mc_disable_clients(struct radeon_device *rdev);
|
||||
void rs600_disable_vga(struct radeon_device *rdev);
|
||||
|
||||
/* This files gather functions specifics to :
|
||||
* rs690,rs740
|
||||
*
|
||||
* Some of these functions might be used by newer ASICs.
|
||||
*/
|
||||
void rs690_gpu_init(struct radeon_device *rdev);
|
||||
int rs690_mc_wait_for_idle(struct radeon_device *rdev);
|
||||
|
||||
|
||||
/*
|
||||
* MC functions.
|
||||
*/
|
||||
int rs690_mc_init(struct radeon_device *rdev)
|
||||
{
|
||||
uint32_t tmp;
|
||||
int r;
|
||||
|
||||
if (r100_debugfs_rbbm_init(rdev)) {
|
||||
DRM_ERROR("Failed to register debugfs file for RBBM !\n");
|
||||
}
|
||||
|
||||
rs690_gpu_init(rdev);
|
||||
rs400_gart_disable(rdev);
|
||||
|
||||
/* Setup GPU memory space */
|
||||
rdev->mc.gtt_location = rdev->mc.vram_size;
|
||||
rdev->mc.gtt_location += (rdev->mc.gtt_size - 1);
|
||||
rdev->mc.gtt_location &= ~(rdev->mc.gtt_size - 1);
|
||||
rdev->mc.vram_location = 0xFFFFFFFFUL;
|
||||
r = radeon_mc_setup(rdev);
|
||||
if (r) {
|
||||
return r;
|
||||
}
|
||||
|
||||
/* Program GPU memory space */
|
||||
rs600_mc_disable_clients(rdev);
|
||||
if (rs690_mc_wait_for_idle(rdev)) {
|
||||
printk(KERN_WARNING "Failed to wait MC idle while "
|
||||
"programming pipes. Bad things might happen.\n");
|
||||
}
|
||||
tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1;
|
||||
tmp = REG_SET(RS690_MC_FB_TOP, tmp >> 16);
|
||||
tmp |= REG_SET(RS690_MC_FB_START, rdev->mc.vram_location >> 16);
|
||||
WREG32_MC(RS690_MCCFG_FB_LOCATION, tmp);
|
||||
/* FIXME: Does this reg exist on RS480,RS740 ? */
|
||||
WREG32(0x310, rdev->mc.vram_location);
|
||||
WREG32(RS690_HDP_FB_LOCATION, rdev->mc.vram_location >> 16);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void rs690_mc_fini(struct radeon_device *rdev)
|
||||
{
|
||||
rs400_gart_disable(rdev);
|
||||
radeon_gart_table_ram_free(rdev);
|
||||
radeon_gart_fini(rdev);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Global GPU functions
|
||||
*/
|
||||
int rs690_mc_wait_for_idle(struct radeon_device *rdev)
|
||||
{
|
||||
unsigned i;
|
||||
uint32_t tmp;
|
||||
|
||||
for (i = 0; i < rdev->usec_timeout; i++) {
|
||||
/* read MC_STATUS */
|
||||
tmp = RREG32_MC(RS690_MC_STATUS);
|
||||
if (tmp & RS690_MC_STATUS_IDLE) {
|
||||
return 0;
|
||||
}
|
||||
DRM_UDELAY(1);
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
void rs690_errata(struct radeon_device *rdev)
|
||||
{
|
||||
rdev->pll_errata = 0;
|
||||
}
|
||||
|
||||
void rs690_gpu_init(struct radeon_device *rdev)
|
||||
{
|
||||
/* FIXME: HDP same place on rs690 ? */
|
||||
r100_hdp_reset(rdev);
|
||||
rs600_disable_vga(rdev);
|
||||
/* FIXME: is this correct ? */
|
||||
r420_pipes_init(rdev);
|
||||
if (rs690_mc_wait_for_idle(rdev)) {
|
||||
printk(KERN_WARNING "Failed to wait MC idle while "
|
||||
"programming pipes. Bad things might happen.\n");
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* VRAM info.
|
||||
*/
|
||||
void rs690_vram_info(struct radeon_device *rdev)
|
||||
{
|
||||
uint32_t tmp;
|
||||
|
||||
rs400_gart_adjust_size(rdev);
|
||||
/* DDR for all card after R300 & IGP */
|
||||
rdev->mc.vram_is_ddr = true;
|
||||
/* FIXME: is this correct for RS690/RS740 ? */
|
||||
tmp = RREG32(RADEON_MEM_CNTL);
|
||||
if (tmp & R300_MEM_NUM_CHANNELS_MASK) {
|
||||
rdev->mc.vram_width = 128;
|
||||
} else {
|
||||
rdev->mc.vram_width = 64;
|
||||
}
|
||||
rdev->mc.vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
|
||||
|
||||
rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
|
||||
rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Indirect registers accessor
|
||||
*/
|
||||
uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg)
|
||||
{
|
||||
uint32_t r;
|
||||
|
||||
WREG32(RS690_MC_INDEX, (reg & RS690_MC_INDEX_MASK));
|
||||
r = RREG32(RS690_MC_DATA);
|
||||
WREG32(RS690_MC_INDEX, RS690_MC_INDEX_MASK);
|
||||
return r;
|
||||
}
|
||||
|
||||
void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
|
||||
{
|
||||
WREG32(RS690_MC_INDEX,
|
||||
RS690_MC_INDEX_WR_EN | ((reg) & RS690_MC_INDEX_MASK));
|
||||
WREG32(RS690_MC_DATA, v);
|
||||
WREG32(RS690_MC_INDEX, RS690_MC_INDEX_WR_ACK);
|
||||
}
|
|
@ -0,0 +1,102 @@
|
|||
/*
|
||||
* Copyright 2008 Advanced Micro Devices, Inc.
|
||||
* Copyright 2008 Red Hat Inc.
|
||||
* Copyright 2009 Jerome Glisse.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Dave Airlie
|
||||
* Alex Deucher
|
||||
* Jerome Glisse
|
||||
*/
|
||||
#include "drmP.h"
|
||||
#include "radeon_reg.h"
|
||||
#include "radeon.h"
|
||||
|
||||
/* rs780 depends on : */
|
||||
void rs600_mc_disable_clients(struct radeon_device *rdev);
|
||||
|
||||
/* This files gather functions specifics to:
|
||||
* rs780
|
||||
*
|
||||
* Some of these functions might be used by newer ASICs.
|
||||
*/
|
||||
int rs780_mc_wait_for_idle(struct radeon_device *rdev);
|
||||
void rs780_gpu_init(struct radeon_device *rdev);
|
||||
|
||||
|
||||
/*
|
||||
* MC
|
||||
*/
|
||||
int rs780_mc_init(struct radeon_device *rdev)
|
||||
{
|
||||
rs780_gpu_init(rdev);
|
||||
/* FIXME: implement */
|
||||
|
||||
rs600_mc_disable_clients(rdev);
|
||||
if (rs780_mc_wait_for_idle(rdev)) {
|
||||
printk(KERN_WARNING "Failed to wait MC idle while "
|
||||
"programming pipes. Bad things might happen.\n");
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void rs780_mc_fini(struct radeon_device *rdev)
|
||||
{
|
||||
/* FIXME: implement */
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Global GPU functions
|
||||
*/
|
||||
void rs780_errata(struct radeon_device *rdev)
|
||||
{
|
||||
rdev->pll_errata = 0;
|
||||
}
|
||||
|
||||
int rs780_mc_wait_for_idle(struct radeon_device *rdev)
|
||||
{
|
||||
/* FIXME: implement */
|
||||
return 0;
|
||||
}
|
||||
|
||||
void rs780_gpu_init(struct radeon_device *rdev)
|
||||
{
|
||||
/* FIXME: implement */
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* VRAM info
|
||||
*/
|
||||
void rs780_vram_get_type(struct radeon_device *rdev)
|
||||
{
|
||||
/* FIXME: implement */
|
||||
}
|
||||
|
||||
void rs780_vram_info(struct radeon_device *rdev)
|
||||
{
|
||||
rs780_vram_get_type(rdev);
|
||||
|
||||
/* FIXME: implement */
|
||||
/* Could aper size report 0 ? */
|
||||
rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
|
||||
rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
|
||||
}
|
|
@ -0,0 +1,504 @@
|
|||
/*
|
||||
* Copyright 2008 Advanced Micro Devices, Inc.
|
||||
* Copyright 2008 Red Hat Inc.
|
||||
* Copyright 2009 Jerome Glisse.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Dave Airlie
|
||||
* Alex Deucher
|
||||
* Jerome Glisse
|
||||
*/
|
||||
#include <linux/seq_file.h>
|
||||
#include "drmP.h"
|
||||
#include "radeon_reg.h"
|
||||
#include "radeon.h"
|
||||
|
||||
/* rv515 depends on : */
|
||||
void r100_hdp_reset(struct radeon_device *rdev);
|
||||
int r100_cp_reset(struct radeon_device *rdev);
|
||||
int r100_rb2d_reset(struct radeon_device *rdev);
|
||||
int r100_gui_wait_for_idle(struct radeon_device *rdev);
|
||||
int r100_cp_init(struct radeon_device *rdev, unsigned ring_size);
|
||||
int rv370_pcie_gart_enable(struct radeon_device *rdev);
|
||||
void rv370_pcie_gart_disable(struct radeon_device *rdev);
|
||||
void r420_pipes_init(struct radeon_device *rdev);
|
||||
void rs600_mc_disable_clients(struct radeon_device *rdev);
|
||||
void rs600_disable_vga(struct radeon_device *rdev);
|
||||
|
||||
/* This files gather functions specifics to:
|
||||
* rv515
|
||||
*
|
||||
* Some of these functions might be used by newer ASICs.
|
||||
*/
|
||||
int rv515_debugfs_pipes_info_init(struct radeon_device *rdev);
|
||||
int rv515_debugfs_ga_info_init(struct radeon_device *rdev);
|
||||
void rv515_gpu_init(struct radeon_device *rdev);
|
||||
int rv515_mc_wait_for_idle(struct radeon_device *rdev);
|
||||
|
||||
|
||||
/*
|
||||
* MC
|
||||
*/
|
||||
int rv515_mc_init(struct radeon_device *rdev)
|
||||
{
|
||||
uint32_t tmp;
|
||||
int r;
|
||||
|
||||
if (r100_debugfs_rbbm_init(rdev)) {
|
||||
DRM_ERROR("Failed to register debugfs file for RBBM !\n");
|
||||
}
|
||||
if (rv515_debugfs_pipes_info_init(rdev)) {
|
||||
DRM_ERROR("Failed to register debugfs file for pipes !\n");
|
||||
}
|
||||
if (rv515_debugfs_ga_info_init(rdev)) {
|
||||
DRM_ERROR("Failed to register debugfs file for pipes !\n");
|
||||
}
|
||||
|
||||
rv515_gpu_init(rdev);
|
||||
rv370_pcie_gart_disable(rdev);
|
||||
|
||||
/* Setup GPU memory space */
|
||||
rdev->mc.vram_location = 0xFFFFFFFFUL;
|
||||
rdev->mc.gtt_location = 0xFFFFFFFFUL;
|
||||
if (rdev->flags & RADEON_IS_AGP) {
|
||||
r = radeon_agp_init(rdev);
|
||||
if (r) {
|
||||
printk(KERN_WARNING "[drm] Disabling AGP\n");
|
||||
rdev->flags &= ~RADEON_IS_AGP;
|
||||
rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
|
||||
} else {
|
||||
rdev->mc.gtt_location = rdev->mc.agp_base;
|
||||
}
|
||||
}
|
||||
r = radeon_mc_setup(rdev);
|
||||
if (r) {
|
||||
return r;
|
||||
}
|
||||
|
||||
/* Program GPU memory space */
|
||||
rs600_mc_disable_clients(rdev);
|
||||
if (rv515_mc_wait_for_idle(rdev)) {
|
||||
printk(KERN_WARNING "Failed to wait MC idle while "
|
||||
"programming pipes. Bad things might happen.\n");
|
||||
}
|
||||
/* Write VRAM size in case we are limiting it */
|
||||
WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.vram_size);
|
||||
tmp = REG_SET(RV515_MC_FB_START, rdev->mc.vram_location >> 16);
|
||||
WREG32(0x134, tmp);
|
||||
tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1;
|
||||
tmp = REG_SET(RV515_MC_FB_TOP, tmp >> 16);
|
||||
tmp |= REG_SET(RV515_MC_FB_START, rdev->mc.vram_location >> 16);
|
||||
WREG32_MC(RV515_MC_FB_LOCATION, tmp);
|
||||
WREG32(RS690_HDP_FB_LOCATION, rdev->mc.vram_location >> 16);
|
||||
WREG32(0x310, rdev->mc.vram_location);
|
||||
if (rdev->flags & RADEON_IS_AGP) {
|
||||
tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
|
||||
tmp = REG_SET(RV515_MC_AGP_TOP, tmp >> 16);
|
||||
tmp |= REG_SET(RV515_MC_AGP_START, rdev->mc.gtt_location >> 16);
|
||||
WREG32_MC(RV515_MC_AGP_LOCATION, tmp);
|
||||
WREG32_MC(RV515_MC_AGP_BASE, rdev->mc.agp_base);
|
||||
WREG32_MC(RV515_MC_AGP_BASE_2, 0);
|
||||
} else {
|
||||
WREG32_MC(RV515_MC_AGP_LOCATION, 0x0FFFFFFF);
|
||||
WREG32_MC(RV515_MC_AGP_BASE, 0);
|
||||
WREG32_MC(RV515_MC_AGP_BASE_2, 0);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void rv515_mc_fini(struct radeon_device *rdev)
|
||||
{
|
||||
rv370_pcie_gart_disable(rdev);
|
||||
radeon_gart_table_vram_free(rdev);
|
||||
radeon_gart_fini(rdev);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Global GPU functions
|
||||
*/
|
||||
void rv515_ring_start(struct radeon_device *rdev)
|
||||
{
|
||||
unsigned gb_tile_config;
|
||||
int r;
|
||||
|
||||
/* Sub pixel 1/12 so we can have 4K rendering according to doc */
|
||||
gb_tile_config = R300_ENABLE_TILING | R300_TILE_SIZE_16;
|
||||
switch (rdev->num_gb_pipes) {
|
||||
case 2:
|
||||
gb_tile_config |= R300_PIPE_COUNT_R300;
|
||||
break;
|
||||
case 3:
|
||||
gb_tile_config |= R300_PIPE_COUNT_R420_3P;
|
||||
break;
|
||||
case 4:
|
||||
gb_tile_config |= R300_PIPE_COUNT_R420;
|
||||
break;
|
||||
case 1:
|
||||
default:
|
||||
gb_tile_config |= R300_PIPE_COUNT_RV350;
|
||||
break;
|
||||
}
|
||||
|
||||
r = radeon_ring_lock(rdev, 64);
|
||||
if (r) {
|
||||
return;
|
||||
}
|
||||
radeon_ring_write(rdev, PACKET0(RADEON_ISYNC_CNTL, 0));
|
||||
radeon_ring_write(rdev,
|
||||
RADEON_ISYNC_ANY2D_IDLE3D |
|
||||
RADEON_ISYNC_ANY3D_IDLE2D |
|
||||
RADEON_ISYNC_WAIT_IDLEGUI |
|
||||
RADEON_ISYNC_CPSCRATCH_IDLEGUI);
|
||||
radeon_ring_write(rdev, PACKET0(R300_GB_TILE_CONFIG, 0));
|
||||
radeon_ring_write(rdev, gb_tile_config);
|
||||
radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
|
||||
radeon_ring_write(rdev,
|
||||
RADEON_WAIT_2D_IDLECLEAN |
|
||||
RADEON_WAIT_3D_IDLECLEAN);
|
||||
radeon_ring_write(rdev, PACKET0(0x170C, 0));
|
||||
radeon_ring_write(rdev, 1 << 31);
|
||||
radeon_ring_write(rdev, PACKET0(R300_GB_SELECT, 0));
|
||||
radeon_ring_write(rdev, 0);
|
||||
radeon_ring_write(rdev, PACKET0(R300_GB_ENABLE, 0));
|
||||
radeon_ring_write(rdev, 0);
|
||||
radeon_ring_write(rdev, PACKET0(0x42C8, 0));
|
||||
radeon_ring_write(rdev, (1 << rdev->num_gb_pipes) - 1);
|
||||
radeon_ring_write(rdev, PACKET0(R500_VAP_INDEX_OFFSET, 0));
|
||||
radeon_ring_write(rdev, 0);
|
||||
radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
|
||||
radeon_ring_write(rdev, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE);
|
||||
radeon_ring_write(rdev, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
|
||||
radeon_ring_write(rdev, R300_ZC_FLUSH | R300_ZC_FREE);
|
||||
radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
|
||||
radeon_ring_write(rdev,
|
||||
RADEON_WAIT_2D_IDLECLEAN |
|
||||
RADEON_WAIT_3D_IDLECLEAN);
|
||||
radeon_ring_write(rdev, PACKET0(R300_GB_AA_CONFIG, 0));
|
||||
radeon_ring_write(rdev, 0);
|
||||
radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
|
||||
radeon_ring_write(rdev, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE);
|
||||
radeon_ring_write(rdev, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
|
||||
radeon_ring_write(rdev, R300_ZC_FLUSH | R300_ZC_FREE);
|
||||
radeon_ring_write(rdev, PACKET0(R300_GB_MSPOS0, 0));
|
||||
radeon_ring_write(rdev,
|
||||
((6 << R300_MS_X0_SHIFT) |
|
||||
(6 << R300_MS_Y0_SHIFT) |
|
||||
(6 << R300_MS_X1_SHIFT) |
|
||||
(6 << R300_MS_Y1_SHIFT) |
|
||||
(6 << R300_MS_X2_SHIFT) |
|
||||
(6 << R300_MS_Y2_SHIFT) |
|
||||
(6 << R300_MSBD0_Y_SHIFT) |
|
||||
(6 << R300_MSBD0_X_SHIFT)));
|
||||
radeon_ring_write(rdev, PACKET0(R300_GB_MSPOS1, 0));
|
||||
radeon_ring_write(rdev,
|
||||
((6 << R300_MS_X3_SHIFT) |
|
||||
(6 << R300_MS_Y3_SHIFT) |
|
||||
(6 << R300_MS_X4_SHIFT) |
|
||||
(6 << R300_MS_Y4_SHIFT) |
|
||||
(6 << R300_MS_X5_SHIFT) |
|
||||
(6 << R300_MS_Y5_SHIFT) |
|
||||
(6 << R300_MSBD1_SHIFT)));
|
||||
radeon_ring_write(rdev, PACKET0(R300_GA_ENHANCE, 0));
|
||||
radeon_ring_write(rdev, R300_GA_DEADLOCK_CNTL | R300_GA_FASTSYNC_CNTL);
|
||||
radeon_ring_write(rdev, PACKET0(R300_GA_POLY_MODE, 0));
|
||||
radeon_ring_write(rdev,
|
||||
R300_FRONT_PTYPE_TRIANGE | R300_BACK_PTYPE_TRIANGE);
|
||||
radeon_ring_write(rdev, PACKET0(R300_GA_ROUND_MODE, 0));
|
||||
radeon_ring_write(rdev,
|
||||
R300_GEOMETRY_ROUND_NEAREST |
|
||||
R300_COLOR_ROUND_NEAREST);
|
||||
radeon_ring_unlock_commit(rdev);
|
||||
}
|
||||
|
||||
void rv515_errata(struct radeon_device *rdev)
|
||||
{
|
||||
rdev->pll_errata = 0;
|
||||
}
|
||||
|
||||
int rv515_mc_wait_for_idle(struct radeon_device *rdev)
|
||||
{
|
||||
unsigned i;
|
||||
uint32_t tmp;
|
||||
|
||||
for (i = 0; i < rdev->usec_timeout; i++) {
|
||||
/* read MC_STATUS */
|
||||
tmp = RREG32_MC(RV515_MC_STATUS);
|
||||
if (tmp & RV515_MC_STATUS_IDLE) {
|
||||
return 0;
|
||||
}
|
||||
DRM_UDELAY(1);
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
void rv515_gpu_init(struct radeon_device *rdev)
|
||||
{
|
||||
unsigned pipe_select_current, gb_pipe_select, tmp;
|
||||
|
||||
r100_hdp_reset(rdev);
|
||||
r100_rb2d_reset(rdev);
|
||||
|
||||
if (r100_gui_wait_for_idle(rdev)) {
|
||||
printk(KERN_WARNING "Failed to wait GUI idle while "
|
||||
"reseting GPU. Bad things might happen.\n");
|
||||
}
|
||||
|
||||
rs600_disable_vga(rdev);
|
||||
|
||||
r420_pipes_init(rdev);
|
||||
gb_pipe_select = RREG32(0x402C);
|
||||
tmp = RREG32(0x170C);
|
||||
pipe_select_current = (tmp >> 2) & 3;
|
||||
tmp = (1 << pipe_select_current) |
|
||||
(((gb_pipe_select >> 8) & 0xF) << 4);
|
||||
WREG32_PLL(0x000D, tmp);
|
||||
if (r100_gui_wait_for_idle(rdev)) {
|
||||
printk(KERN_WARNING "Failed to wait GUI idle while "
|
||||
"reseting GPU. Bad things might happen.\n");
|
||||
}
|
||||
if (rv515_mc_wait_for_idle(rdev)) {
|
||||
printk(KERN_WARNING "Failed to wait MC idle while "
|
||||
"programming pipes. Bad things might happen.\n");
|
||||
}
|
||||
}
|
||||
|
||||
int rv515_ga_reset(struct radeon_device *rdev)
|
||||
{
|
||||
uint32_t tmp;
|
||||
bool reinit_cp;
|
||||
int i;
|
||||
|
||||
reinit_cp = rdev->cp.ready;
|
||||
rdev->cp.ready = false;
|
||||
for (i = 0; i < rdev->usec_timeout; i++) {
|
||||
WREG32(RADEON_CP_CSQ_MODE, 0);
|
||||
WREG32(RADEON_CP_CSQ_CNTL, 0);
|
||||
WREG32(RADEON_RBBM_SOFT_RESET, 0x32005);
|
||||
(void)RREG32(RADEON_RBBM_SOFT_RESET);
|
||||
udelay(200);
|
||||
WREG32(RADEON_RBBM_SOFT_RESET, 0);
|
||||
/* Wait to prevent race in RBBM_STATUS */
|
||||
mdelay(1);
|
||||
tmp = RREG32(RADEON_RBBM_STATUS);
|
||||
if (tmp & ((1 << 20) | (1 << 26))) {
|
||||
DRM_ERROR("VAP & CP still busy (RBBM_STATUS=0x%08X)\n", tmp);
|
||||
/* GA still busy soft reset it */
|
||||
WREG32(0x429C, 0x200);
|
||||
WREG32(R300_VAP_PVS_STATE_FLUSH_REG, 0);
|
||||
WREG32(0x43E0, 0);
|
||||
WREG32(0x43E4, 0);
|
||||
WREG32(0x24AC, 0);
|
||||
}
|
||||
/* Wait to prevent race in RBBM_STATUS */
|
||||
mdelay(1);
|
||||
tmp = RREG32(RADEON_RBBM_STATUS);
|
||||
if (!(tmp & ((1 << 20) | (1 << 26)))) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
for (i = 0; i < rdev->usec_timeout; i++) {
|
||||
tmp = RREG32(RADEON_RBBM_STATUS);
|
||||
if (!(tmp & ((1 << 20) | (1 << 26)))) {
|
||||
DRM_INFO("GA reset succeed (RBBM_STATUS=0x%08X)\n",
|
||||
tmp);
|
||||
DRM_INFO("GA_IDLE=0x%08X\n", RREG32(0x425C));
|
||||
DRM_INFO("RB3D_RESET_STATUS=0x%08X\n", RREG32(0x46f0));
|
||||
DRM_INFO("ISYNC_CNTL=0x%08X\n", RREG32(0x1724));
|
||||
if (reinit_cp) {
|
||||
return r100_cp_init(rdev, rdev->cp.ring_size);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
DRM_UDELAY(1);
|
||||
}
|
||||
tmp = RREG32(RADEON_RBBM_STATUS);
|
||||
DRM_ERROR("Failed to reset GA ! (RBBM_STATUS=0x%08X)\n", tmp);
|
||||
return -1;
|
||||
}
|
||||
|
||||
int rv515_gpu_reset(struct radeon_device *rdev)
|
||||
{
|
||||
uint32_t status;
|
||||
|
||||
/* reset order likely matter */
|
||||
status = RREG32(RADEON_RBBM_STATUS);
|
||||
/* reset HDP */
|
||||
r100_hdp_reset(rdev);
|
||||
/* reset rb2d */
|
||||
if (status & ((1 << 17) | (1 << 18) | (1 << 27))) {
|
||||
r100_rb2d_reset(rdev);
|
||||
}
|
||||
/* reset GA */
|
||||
if (status & ((1 << 20) | (1 << 26))) {
|
||||
rv515_ga_reset(rdev);
|
||||
}
|
||||
/* reset CP */
|
||||
status = RREG32(RADEON_RBBM_STATUS);
|
||||
if (status & (1 << 16)) {
|
||||
r100_cp_reset(rdev);
|
||||
}
|
||||
/* Check if GPU is idle */
|
||||
status = RREG32(RADEON_RBBM_STATUS);
|
||||
if (status & (1 << 31)) {
|
||||
DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status);
|
||||
return -1;
|
||||
}
|
||||
DRM_INFO("GPU reset succeed (RBBM_STATUS=0x%08X)\n", status);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* VRAM info
|
||||
*/
|
||||
static void rv515_vram_get_type(struct radeon_device *rdev)
|
||||
{
|
||||
uint32_t tmp;
|
||||
|
||||
rdev->mc.vram_width = 128;
|
||||
rdev->mc.vram_is_ddr = true;
|
||||
tmp = RREG32_MC(RV515_MC_CNTL);
|
||||
tmp &= RV515_MEM_NUM_CHANNELS_MASK;
|
||||
switch (tmp) {
|
||||
case 0:
|
||||
rdev->mc.vram_width = 64;
|
||||
break;
|
||||
case 1:
|
||||
rdev->mc.vram_width = 128;
|
||||
break;
|
||||
default:
|
||||
rdev->mc.vram_width = 128;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
void rv515_vram_info(struct radeon_device *rdev)
|
||||
{
|
||||
rv515_vram_get_type(rdev);
|
||||
rdev->mc.vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
|
||||
|
||||
rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
|
||||
rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Indirect registers accessor
|
||||
*/
|
||||
uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg)
|
||||
{
|
||||
uint32_t r;
|
||||
|
||||
WREG32(R520_MC_IND_INDEX, 0x7f0000 | (reg & 0xffff));
|
||||
r = RREG32(R520_MC_IND_DATA);
|
||||
WREG32(R520_MC_IND_INDEX, 0);
|
||||
return r;
|
||||
}
|
||||
|
||||
void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
|
||||
{
|
||||
WREG32(R520_MC_IND_INDEX, 0xff0000 | ((reg) & 0xffff));
|
||||
WREG32(R520_MC_IND_DATA, (v));
|
||||
WREG32(R520_MC_IND_INDEX, 0);
|
||||
}
|
||||
|
||||
uint32_t rv515_pcie_rreg(struct radeon_device *rdev, uint32_t reg)
|
||||
{
|
||||
uint32_t r;
|
||||
|
||||
WREG32(RADEON_PCIE_INDEX, ((reg) & 0x7ff));
|
||||
(void)RREG32(RADEON_PCIE_INDEX);
|
||||
r = RREG32(RADEON_PCIE_DATA);
|
||||
return r;
|
||||
}
|
||||
|
||||
void rv515_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
|
||||
{
|
||||
WREG32(RADEON_PCIE_INDEX, ((reg) & 0x7ff));
|
||||
(void)RREG32(RADEON_PCIE_INDEX);
|
||||
WREG32(RADEON_PCIE_DATA, (v));
|
||||
(void)RREG32(RADEON_PCIE_DATA);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Debugfs info
|
||||
*/
|
||||
#if defined(CONFIG_DEBUG_FS)
|
||||
static int rv515_debugfs_pipes_info(struct seq_file *m, void *data)
|
||||
{
|
||||
struct drm_info_node *node = (struct drm_info_node *) m->private;
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
uint32_t tmp;
|
||||
|
||||
tmp = RREG32(R400_GB_PIPE_SELECT);
|
||||
seq_printf(m, "GB_PIPE_SELECT 0x%08x\n", tmp);
|
||||
tmp = RREG32(R500_SU_REG_DEST);
|
||||
seq_printf(m, "SU_REG_DEST 0x%08x\n", tmp);
|
||||
tmp = RREG32(R300_GB_TILE_CONFIG);
|
||||
seq_printf(m, "GB_TILE_CONFIG 0x%08x\n", tmp);
|
||||
tmp = RREG32(R300_DST_PIPE_CONFIG);
|
||||
seq_printf(m, "DST_PIPE_CONFIG 0x%08x\n", tmp);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int rv515_debugfs_ga_info(struct seq_file *m, void *data)
|
||||
{
|
||||
struct drm_info_node *node = (struct drm_info_node *) m->private;
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
uint32_t tmp;
|
||||
|
||||
tmp = RREG32(0x2140);
|
||||
seq_printf(m, "VAP_CNTL_STATUS 0x%08x\n", tmp);
|
||||
radeon_gpu_reset(rdev);
|
||||
tmp = RREG32(0x425C);
|
||||
seq_printf(m, "GA_IDLE 0x%08x\n", tmp);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct drm_info_list rv515_pipes_info_list[] = {
|
||||
{"rv515_pipes_info", rv515_debugfs_pipes_info, 0, NULL},
|
||||
};
|
||||
|
||||
static struct drm_info_list rv515_ga_info_list[] = {
|
||||
{"rv515_ga_info", rv515_debugfs_ga_info, 0, NULL},
|
||||
};
|
||||
#endif
|
||||
|
||||
int rv515_debugfs_pipes_info_init(struct radeon_device *rdev)
|
||||
{
|
||||
#if defined(CONFIG_DEBUG_FS)
|
||||
return radeon_debugfs_add_files(rdev, rv515_pipes_info_list, 1);
|
||||
#else
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
int rv515_debugfs_ga_info_init(struct radeon_device *rdev)
|
||||
{
|
||||
#if defined(CONFIG_DEBUG_FS)
|
||||
return radeon_debugfs_add_files(rdev, rv515_ga_info_list, 1);
|
||||
#else
|
||||
return 0;
|
||||
#endif
|
||||
}
|
|
@ -0,0 +1,124 @@
|
|||
/*
|
||||
* Copyright 2008 Advanced Micro Devices, Inc.
|
||||
* Copyright 2008 Red Hat Inc.
|
||||
* Copyright 2009 Jerome Glisse.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Dave Airlie
|
||||
* Alex Deucher
|
||||
* Jerome Glisse
|
||||
*/
|
||||
#include "drmP.h"
|
||||
#include "radeon_reg.h"
|
||||
#include "radeon.h"
|
||||
|
||||
/* rv770,rv730,rv710 depends on : */
|
||||
void rs600_mc_disable_clients(struct radeon_device *rdev);
|
||||
|
||||
/* This files gather functions specifics to:
|
||||
* rv770,rv730,rv710
|
||||
*
|
||||
* Some of these functions might be used by newer ASICs.
|
||||
*/
|
||||
int rv770_mc_wait_for_idle(struct radeon_device *rdev);
|
||||
void rv770_gpu_init(struct radeon_device *rdev);
|
||||
|
||||
|
||||
/*
|
||||
* MC
|
||||
*/
|
||||
int rv770_mc_init(struct radeon_device *rdev)
|
||||
{
|
||||
uint32_t tmp;
|
||||
|
||||
rv770_gpu_init(rdev);
|
||||
|
||||
/* setup the gart before changing location so we can ask to
|
||||
* discard unmapped mc request
|
||||
*/
|
||||
/* FIXME: disable out of gart access */
|
||||
tmp = rdev->mc.gtt_location / 4096;
|
||||
tmp = REG_SET(R700_LOGICAL_PAGE_NUMBER, tmp);
|
||||
WREG32(R700_MC_VM_SYSTEM_APERTURE_LOW_ADDR, tmp);
|
||||
tmp = (rdev->mc.gtt_location + rdev->mc.gtt_size) / 4096;
|
||||
tmp = REG_SET(R700_LOGICAL_PAGE_NUMBER, tmp);
|
||||
WREG32(R700_MC_VM_SYSTEM_APERTURE_HIGH_ADDR, tmp);
|
||||
|
||||
rs600_mc_disable_clients(rdev);
|
||||
if (rv770_mc_wait_for_idle(rdev)) {
|
||||
printk(KERN_WARNING "Failed to wait MC idle while "
|
||||
"programming pipes. Bad things might happen.\n");
|
||||
}
|
||||
|
||||
tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1;
|
||||
tmp = REG_SET(R700_MC_FB_TOP, tmp >> 24);
|
||||
tmp |= REG_SET(R700_MC_FB_BASE, rdev->mc.vram_location >> 24);
|
||||
WREG32(R700_MC_VM_FB_LOCATION, tmp);
|
||||
tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
|
||||
tmp = REG_SET(R700_MC_AGP_TOP, tmp >> 22);
|
||||
WREG32(R700_MC_VM_AGP_TOP, tmp);
|
||||
tmp = REG_SET(R700_MC_AGP_BOT, rdev->mc.gtt_location >> 22);
|
||||
WREG32(R700_MC_VM_AGP_BOT, tmp);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void rv770_mc_fini(struct radeon_device *rdev)
|
||||
{
|
||||
/* FIXME: implement */
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Global GPU functions
|
||||
*/
|
||||
void rv770_errata(struct radeon_device *rdev)
|
||||
{
|
||||
rdev->pll_errata = 0;
|
||||
}
|
||||
|
||||
int rv770_mc_wait_for_idle(struct radeon_device *rdev)
|
||||
{
|
||||
/* FIXME: implement */
|
||||
return 0;
|
||||
}
|
||||
|
||||
void rv770_gpu_init(struct radeon_device *rdev)
|
||||
{
|
||||
/* FIXME: implement */
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* VRAM info
|
||||
*/
|
||||
void rv770_vram_get_type(struct radeon_device *rdev)
|
||||
{
|
||||
/* FIXME: implement */
|
||||
}
|
||||
|
||||
void rv770_vram_info(struct radeon_device *rdev)
|
||||
{
|
||||
rv770_vram_get_type(rdev);
|
||||
|
||||
/* FIXME: implement */
|
||||
/* Could aper size report 0 ? */
|
||||
rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
|
||||
rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
|
||||
}
|
|
@ -0,0 +1,8 @@
|
|||
#
|
||||
# Makefile for the drm device driver. This driver provides support for the
|
||||
|
||||
ccflags-y := -Iinclude/drm
|
||||
ttm-y := ttm_agp_backend.o ttm_memory.o ttm_tt.o ttm_bo.o \
|
||||
ttm_bo_util.o ttm_bo_vm.o ttm_module.o ttm_global.o
|
||||
|
||||
obj-$(CONFIG_DRM_TTM) += ttm.o
|
|
@ -0,0 +1,150 @@
|
|||
/**************************************************************************
|
||||
*
|
||||
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
/*
|
||||
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
|
||||
* Keith Packard.
|
||||
*/
|
||||
|
||||
#include "ttm/ttm_module.h"
|
||||
#include "ttm/ttm_bo_driver.h"
|
||||
#ifdef TTM_HAS_AGP
|
||||
#include "ttm/ttm_placement.h"
|
||||
#include <linux/agp_backend.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/io.h>
|
||||
#include <asm/agp.h>
|
||||
|
||||
struct ttm_agp_backend {
|
||||
struct ttm_backend backend;
|
||||
struct agp_memory *mem;
|
||||
struct agp_bridge_data *bridge;
|
||||
};
|
||||
|
||||
static int ttm_agp_populate(struct ttm_backend *backend,
|
||||
unsigned long num_pages, struct page **pages,
|
||||
struct page *dummy_read_page)
|
||||
{
|
||||
struct ttm_agp_backend *agp_be =
|
||||
container_of(backend, struct ttm_agp_backend, backend);
|
||||
struct page **cur_page, **last_page = pages + num_pages;
|
||||
struct agp_memory *mem;
|
||||
|
||||
mem = agp_allocate_memory(agp_be->bridge, num_pages, AGP_USER_MEMORY);
|
||||
if (unlikely(mem == NULL))
|
||||
return -ENOMEM;
|
||||
|
||||
mem->page_count = 0;
|
||||
for (cur_page = pages; cur_page < last_page; ++cur_page) {
|
||||
struct page *page = *cur_page;
|
||||
if (!page)
|
||||
page = dummy_read_page;
|
||||
|
||||
mem->memory[mem->page_count++] =
|
||||
phys_to_gart(page_to_phys(page));
|
||||
}
|
||||
agp_be->mem = mem;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ttm_agp_bind(struct ttm_backend *backend, struct ttm_mem_reg *bo_mem)
|
||||
{
|
||||
struct ttm_agp_backend *agp_be =
|
||||
container_of(backend, struct ttm_agp_backend, backend);
|
||||
struct agp_memory *mem = agp_be->mem;
|
||||
int cached = (bo_mem->placement & TTM_PL_FLAG_CACHED);
|
||||
int ret;
|
||||
|
||||
mem->is_flushed = 1;
|
||||
mem->type = (cached) ? AGP_USER_CACHED_MEMORY : AGP_USER_MEMORY;
|
||||
|
||||
ret = agp_bind_memory(mem, bo_mem->mm_node->start);
|
||||
if (ret)
|
||||
printk(KERN_ERR TTM_PFX "AGP Bind memory failed.\n");
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int ttm_agp_unbind(struct ttm_backend *backend)
|
||||
{
|
||||
struct ttm_agp_backend *agp_be =
|
||||
container_of(backend, struct ttm_agp_backend, backend);
|
||||
|
||||
if (agp_be->mem->is_bound)
|
||||
return agp_unbind_memory(agp_be->mem);
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void ttm_agp_clear(struct ttm_backend *backend)
|
||||
{
|
||||
struct ttm_agp_backend *agp_be =
|
||||
container_of(backend, struct ttm_agp_backend, backend);
|
||||
struct agp_memory *mem = agp_be->mem;
|
||||
|
||||
if (mem) {
|
||||
ttm_agp_unbind(backend);
|
||||
agp_free_memory(mem);
|
||||
}
|
||||
agp_be->mem = NULL;
|
||||
}
|
||||
|
||||
static void ttm_agp_destroy(struct ttm_backend *backend)
|
||||
{
|
||||
struct ttm_agp_backend *agp_be =
|
||||
container_of(backend, struct ttm_agp_backend, backend);
|
||||
|
||||
if (agp_be->mem)
|
||||
ttm_agp_clear(backend);
|
||||
kfree(agp_be);
|
||||
}
|
||||
|
||||
static struct ttm_backend_func ttm_agp_func = {
|
||||
.populate = ttm_agp_populate,
|
||||
.clear = ttm_agp_clear,
|
||||
.bind = ttm_agp_bind,
|
||||
.unbind = ttm_agp_unbind,
|
||||
.destroy = ttm_agp_destroy,
|
||||
};
|
||||
|
||||
struct ttm_backend *ttm_agp_backend_init(struct ttm_bo_device *bdev,
|
||||
struct agp_bridge_data *bridge)
|
||||
{
|
||||
struct ttm_agp_backend *agp_be;
|
||||
|
||||
agp_be = kmalloc(sizeof(*agp_be), GFP_KERNEL);
|
||||
if (!agp_be)
|
||||
return NULL;
|
||||
|
||||
agp_be->mem = NULL;
|
||||
agp_be->bridge = bridge;
|
||||
agp_be->backend.func = &ttm_agp_func;
|
||||
agp_be->backend.bdev = bdev;
|
||||
return &agp_be->backend;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_agp_backend_init);
|
||||
|
||||
#endif
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,561 @@
|
|||
/**************************************************************************
|
||||
*
|
||||
* Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
/*
|
||||
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
|
||||
*/
|
||||
|
||||
#include "ttm/ttm_bo_driver.h"
|
||||
#include "ttm/ttm_placement.h"
|
||||
#include <linux/io.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/wait.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/version.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
|
||||
{
|
||||
struct ttm_mem_reg *old_mem = &bo->mem;
|
||||
|
||||
if (old_mem->mm_node) {
|
||||
spin_lock(&bo->bdev->lru_lock);
|
||||
drm_mm_put_block(old_mem->mm_node);
|
||||
spin_unlock(&bo->bdev->lru_lock);
|
||||
}
|
||||
old_mem->mm_node = NULL;
|
||||
}
|
||||
|
||||
int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
|
||||
bool evict, bool no_wait, struct ttm_mem_reg *new_mem)
|
||||
{
|
||||
struct ttm_tt *ttm = bo->ttm;
|
||||
struct ttm_mem_reg *old_mem = &bo->mem;
|
||||
uint32_t save_flags = old_mem->placement;
|
||||
int ret;
|
||||
|
||||
if (old_mem->mem_type != TTM_PL_SYSTEM) {
|
||||
ttm_tt_unbind(ttm);
|
||||
ttm_bo_free_old_node(bo);
|
||||
ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
|
||||
TTM_PL_MASK_MEM);
|
||||
old_mem->mem_type = TTM_PL_SYSTEM;
|
||||
save_flags = old_mem->placement;
|
||||
}
|
||||
|
||||
ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
|
||||
if (new_mem->mem_type != TTM_PL_SYSTEM) {
|
||||
ret = ttm_tt_bind(ttm, new_mem);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
}
|
||||
|
||||
*old_mem = *new_mem;
|
||||
new_mem->mm_node = NULL;
|
||||
ttm_flag_masked(&save_flags, new_mem->placement, TTM_PL_MASK_MEMTYPE);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_bo_move_ttm);
|
||||
|
||||
int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
|
||||
void **virtual)
|
||||
{
|
||||
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
|
||||
unsigned long bus_offset;
|
||||
unsigned long bus_size;
|
||||
unsigned long bus_base;
|
||||
int ret;
|
||||
void *addr;
|
||||
|
||||
*virtual = NULL;
|
||||
ret = ttm_bo_pci_offset(bdev, mem, &bus_base, &bus_offset, &bus_size);
|
||||
if (ret || bus_size == 0)
|
||||
return ret;
|
||||
|
||||
if (!(man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP))
|
||||
addr = (void *)(((u8 *) man->io_addr) + bus_offset);
|
||||
else {
|
||||
if (mem->placement & TTM_PL_FLAG_WC)
|
||||
addr = ioremap_wc(bus_base + bus_offset, bus_size);
|
||||
else
|
||||
addr = ioremap_nocache(bus_base + bus_offset, bus_size);
|
||||
if (!addr)
|
||||
return -ENOMEM;
|
||||
}
|
||||
*virtual = addr;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
|
||||
void *virtual)
|
||||
{
|
||||
struct ttm_mem_type_manager *man;
|
||||
|
||||
man = &bdev->man[mem->mem_type];
|
||||
|
||||
if (virtual && (man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP))
|
||||
iounmap(virtual);
|
||||
}
|
||||
|
||||
static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
|
||||
{
|
||||
uint32_t *dstP =
|
||||
(uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
|
||||
uint32_t *srcP =
|
||||
(uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
|
||||
|
||||
int i;
|
||||
for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
|
||||
iowrite32(ioread32(srcP++), dstP++);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
|
||||
unsigned long page)
|
||||
{
|
||||
struct page *d = ttm_tt_get_page(ttm, page);
|
||||
void *dst;
|
||||
|
||||
if (!d)
|
||||
return -ENOMEM;
|
||||
|
||||
src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
|
||||
dst = kmap(d);
|
||||
if (!dst)
|
||||
return -ENOMEM;
|
||||
|
||||
memcpy_fromio(dst, src, PAGE_SIZE);
|
||||
kunmap(d);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
|
||||
unsigned long page)
|
||||
{
|
||||
struct page *s = ttm_tt_get_page(ttm, page);
|
||||
void *src;
|
||||
|
||||
if (!s)
|
||||
return -ENOMEM;
|
||||
|
||||
dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
|
||||
src = kmap(s);
|
||||
if (!src)
|
||||
return -ENOMEM;
|
||||
|
||||
memcpy_toio(dst, src, PAGE_SIZE);
|
||||
kunmap(s);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
|
||||
bool evict, bool no_wait, struct ttm_mem_reg *new_mem)
|
||||
{
|
||||
struct ttm_bo_device *bdev = bo->bdev;
|
||||
struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
|
||||
struct ttm_tt *ttm = bo->ttm;
|
||||
struct ttm_mem_reg *old_mem = &bo->mem;
|
||||
struct ttm_mem_reg old_copy = *old_mem;
|
||||
void *old_iomap;
|
||||
void *new_iomap;
|
||||
int ret;
|
||||
uint32_t save_flags = old_mem->placement;
|
||||
unsigned long i;
|
||||
unsigned long page;
|
||||
unsigned long add = 0;
|
||||
int dir;
|
||||
|
||||
ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
|
||||
if (ret)
|
||||
return ret;
|
||||
ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
if (old_iomap == NULL && new_iomap == NULL)
|
||||
goto out2;
|
||||
if (old_iomap == NULL && ttm == NULL)
|
||||
goto out2;
|
||||
|
||||
add = 0;
|
||||
dir = 1;
|
||||
|
||||
if ((old_mem->mem_type == new_mem->mem_type) &&
|
||||
(new_mem->mm_node->start <
|
||||
old_mem->mm_node->start + old_mem->mm_node->size)) {
|
||||
dir = -1;
|
||||
add = new_mem->num_pages - 1;
|
||||
}
|
||||
|
||||
for (i = 0; i < new_mem->num_pages; ++i) {
|
||||
page = i * dir + add;
|
||||
if (old_iomap == NULL)
|
||||
ret = ttm_copy_ttm_io_page(ttm, new_iomap, page);
|
||||
else if (new_iomap == NULL)
|
||||
ret = ttm_copy_io_ttm_page(ttm, old_iomap, page);
|
||||
else
|
||||
ret = ttm_copy_io_page(new_iomap, old_iomap, page);
|
||||
if (ret)
|
||||
goto out1;
|
||||
}
|
||||
mb();
|
||||
out2:
|
||||
ttm_bo_free_old_node(bo);
|
||||
|
||||
*old_mem = *new_mem;
|
||||
new_mem->mm_node = NULL;
|
||||
ttm_flag_masked(&save_flags, new_mem->placement, TTM_PL_MASK_MEMTYPE);
|
||||
|
||||
if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) {
|
||||
ttm_tt_unbind(ttm);
|
||||
ttm_tt_destroy(ttm);
|
||||
bo->ttm = NULL;
|
||||
}
|
||||
|
||||
out1:
|
||||
ttm_mem_reg_iounmap(bdev, new_mem, new_iomap);
|
||||
out:
|
||||
ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_bo_move_memcpy);
|
||||
|
||||
static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
|
||||
{
|
||||
kfree(bo);
|
||||
}
|
||||
|
||||
/**
|
||||
* ttm_buffer_object_transfer
|
||||
*
|
||||
* @bo: A pointer to a struct ttm_buffer_object.
|
||||
* @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
|
||||
* holding the data of @bo with the old placement.
|
||||
*
|
||||
* This is a utility function that may be called after an accelerated move
|
||||
* has been scheduled. A new buffer object is created as a placeholder for
|
||||
* the old data while it's being copied. When that buffer object is idle,
|
||||
* it can be destroyed, releasing the space of the old placement.
|
||||
* Returns:
|
||||
* !0: Failure.
|
||||
*/
|
||||
|
||||
static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
|
||||
struct ttm_buffer_object **new_obj)
|
||||
{
|
||||
struct ttm_buffer_object *fbo;
|
||||
struct ttm_bo_device *bdev = bo->bdev;
|
||||
struct ttm_bo_driver *driver = bdev->driver;
|
||||
|
||||
fbo = kzalloc(sizeof(*fbo), GFP_KERNEL);
|
||||
if (!fbo)
|
||||
return -ENOMEM;
|
||||
|
||||
*fbo = *bo;
|
||||
|
||||
/**
|
||||
* Fix up members that we shouldn't copy directly:
|
||||
* TODO: Explicit member copy would probably be better here.
|
||||
*/
|
||||
|
||||
spin_lock_init(&fbo->lock);
|
||||
init_waitqueue_head(&fbo->event_queue);
|
||||
INIT_LIST_HEAD(&fbo->ddestroy);
|
||||
INIT_LIST_HEAD(&fbo->lru);
|
||||
INIT_LIST_HEAD(&fbo->swap);
|
||||
fbo->vm_node = NULL;
|
||||
|
||||
fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj);
|
||||
if (fbo->mem.mm_node)
|
||||
fbo->mem.mm_node->private = (void *)fbo;
|
||||
kref_init(&fbo->list_kref);
|
||||
kref_init(&fbo->kref);
|
||||
fbo->destroy = &ttm_transfered_destroy;
|
||||
|
||||
*new_obj = fbo;
|
||||
return 0;
|
||||
}
|
||||
|
||||
pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
|
||||
{
|
||||
#if defined(__i386__) || defined(__x86_64__)
|
||||
if (caching_flags & TTM_PL_FLAG_WC)
|
||||
tmp = pgprot_writecombine(tmp);
|
||||
else if (boot_cpu_data.x86 > 3)
|
||||
tmp = pgprot_noncached(tmp);
|
||||
|
||||
#elif defined(__powerpc__)
|
||||
if (!(caching_flags & TTM_PL_FLAG_CACHED)) {
|
||||
pgprot_val(tmp) |= _PAGE_NO_CACHE;
|
||||
if (caching_flags & TTM_PL_FLAG_UNCACHED)
|
||||
pgprot_val(tmp) |= _PAGE_GUARDED;
|
||||
}
|
||||
#endif
|
||||
#if defined(__ia64__)
|
||||
if (caching_flags & TTM_PL_FLAG_WC)
|
||||
tmp = pgprot_writecombine(tmp);
|
||||
else
|
||||
tmp = pgprot_noncached(tmp);
|
||||
#endif
|
||||
#if defined(__sparc__)
|
||||
if (!(caching_flags & TTM_PL_FLAG_CACHED))
|
||||
tmp = pgprot_noncached(tmp);
|
||||
#endif
|
||||
return tmp;
|
||||
}
|
||||
|
||||
static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
|
||||
unsigned long bus_base,
|
||||
unsigned long bus_offset,
|
||||
unsigned long bus_size,
|
||||
struct ttm_bo_kmap_obj *map)
|
||||
{
|
||||
struct ttm_bo_device *bdev = bo->bdev;
|
||||
struct ttm_mem_reg *mem = &bo->mem;
|
||||
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
|
||||
|
||||
if (!(man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP)) {
|
||||
map->bo_kmap_type = ttm_bo_map_premapped;
|
||||
map->virtual = (void *)(((u8 *) man->io_addr) + bus_offset);
|
||||
} else {
|
||||
map->bo_kmap_type = ttm_bo_map_iomap;
|
||||
if (mem->placement & TTM_PL_FLAG_WC)
|
||||
map->virtual = ioremap_wc(bus_base + bus_offset,
|
||||
bus_size);
|
||||
else
|
||||
map->virtual = ioremap_nocache(bus_base + bus_offset,
|
||||
bus_size);
|
||||
}
|
||||
return (!map->virtual) ? -ENOMEM : 0;
|
||||
}
|
||||
|
||||
static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
|
||||
unsigned long start_page,
|
||||
unsigned long num_pages,
|
||||
struct ttm_bo_kmap_obj *map)
|
||||
{
|
||||
struct ttm_mem_reg *mem = &bo->mem; pgprot_t prot;
|
||||
struct ttm_tt *ttm = bo->ttm;
|
||||
struct page *d;
|
||||
int i;
|
||||
|
||||
BUG_ON(!ttm);
|
||||
if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
|
||||
/*
|
||||
* We're mapping a single page, and the desired
|
||||
* page protection is consistent with the bo.
|
||||
*/
|
||||
|
||||
map->bo_kmap_type = ttm_bo_map_kmap;
|
||||
map->page = ttm_tt_get_page(ttm, start_page);
|
||||
map->virtual = kmap(map->page);
|
||||
} else {
|
||||
/*
|
||||
* Populate the part we're mapping;
|
||||
*/
|
||||
for (i = start_page; i < start_page + num_pages; ++i) {
|
||||
d = ttm_tt_get_page(ttm, i);
|
||||
if (!d)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/*
|
||||
* We need to use vmap to get the desired page protection
|
||||
* or to make the buffer object look contigous.
|
||||
*/
|
||||
prot = (mem->placement & TTM_PL_FLAG_CACHED) ?
|
||||
PAGE_KERNEL :
|
||||
ttm_io_prot(mem->placement, PAGE_KERNEL);
|
||||
map->bo_kmap_type = ttm_bo_map_vmap;
|
||||
map->virtual = vmap(ttm->pages + start_page, num_pages,
|
||||
0, prot);
|
||||
}
|
||||
return (!map->virtual) ? -ENOMEM : 0;
|
||||
}
|
||||
|
||||
int ttm_bo_kmap(struct ttm_buffer_object *bo,
|
||||
unsigned long start_page, unsigned long num_pages,
|
||||
struct ttm_bo_kmap_obj *map)
|
||||
{
|
||||
int ret;
|
||||
unsigned long bus_base;
|
||||
unsigned long bus_offset;
|
||||
unsigned long bus_size;
|
||||
|
||||
BUG_ON(!list_empty(&bo->swap));
|
||||
map->virtual = NULL;
|
||||
if (num_pages > bo->num_pages)
|
||||
return -EINVAL;
|
||||
if (start_page > bo->num_pages)
|
||||
return -EINVAL;
|
||||
#if 0
|
||||
if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC))
|
||||
return -EPERM;
|
||||
#endif
|
||||
ret = ttm_bo_pci_offset(bo->bdev, &bo->mem, &bus_base,
|
||||
&bus_offset, &bus_size);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (bus_size == 0) {
|
||||
return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
|
||||
} else {
|
||||
bus_offset += start_page << PAGE_SHIFT;
|
||||
bus_size = num_pages << PAGE_SHIFT;
|
||||
return ttm_bo_ioremap(bo, bus_base, bus_offset, bus_size, map);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_bo_kmap);
|
||||
|
||||
void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
|
||||
{
|
||||
if (!map->virtual)
|
||||
return;
|
||||
switch (map->bo_kmap_type) {
|
||||
case ttm_bo_map_iomap:
|
||||
iounmap(map->virtual);
|
||||
break;
|
||||
case ttm_bo_map_vmap:
|
||||
vunmap(map->virtual);
|
||||
break;
|
||||
case ttm_bo_map_kmap:
|
||||
kunmap(map->page);
|
||||
break;
|
||||
case ttm_bo_map_premapped:
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
map->virtual = NULL;
|
||||
map->page = NULL;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_bo_kunmap);
|
||||
|
||||
int ttm_bo_pfn_prot(struct ttm_buffer_object *bo,
|
||||
unsigned long dst_offset,
|
||||
unsigned long *pfn, pgprot_t *prot)
|
||||
{
|
||||
struct ttm_mem_reg *mem = &bo->mem;
|
||||
struct ttm_bo_device *bdev = bo->bdev;
|
||||
unsigned long bus_offset;
|
||||
unsigned long bus_size;
|
||||
unsigned long bus_base;
|
||||
int ret;
|
||||
ret = ttm_bo_pci_offset(bdev, mem, &bus_base, &bus_offset,
|
||||
&bus_size);
|
||||
if (ret)
|
||||
return -EINVAL;
|
||||
if (bus_size != 0)
|
||||
*pfn = (bus_base + bus_offset + dst_offset) >> PAGE_SHIFT;
|
||||
else
|
||||
if (!bo->ttm)
|
||||
return -EINVAL;
|
||||
else
|
||||
*pfn = page_to_pfn(ttm_tt_get_page(bo->ttm,
|
||||
dst_offset >>
|
||||
PAGE_SHIFT));
|
||||
*prot = (mem->placement & TTM_PL_FLAG_CACHED) ?
|
||||
PAGE_KERNEL : ttm_io_prot(mem->placement, PAGE_KERNEL);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
|
||||
void *sync_obj,
|
||||
void *sync_obj_arg,
|
||||
bool evict, bool no_wait,
|
||||
struct ttm_mem_reg *new_mem)
|
||||
{
|
||||
struct ttm_bo_device *bdev = bo->bdev;
|
||||
struct ttm_bo_driver *driver = bdev->driver;
|
||||
struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
|
||||
struct ttm_mem_reg *old_mem = &bo->mem;
|
||||
int ret;
|
||||
uint32_t save_flags = old_mem->placement;
|
||||
struct ttm_buffer_object *ghost_obj;
|
||||
void *tmp_obj = NULL;
|
||||
|
||||
spin_lock(&bo->lock);
|
||||
if (bo->sync_obj) {
|
||||
tmp_obj = bo->sync_obj;
|
||||
bo->sync_obj = NULL;
|
||||
}
|
||||
bo->sync_obj = driver->sync_obj_ref(sync_obj);
|
||||
bo->sync_obj_arg = sync_obj_arg;
|
||||
if (evict) {
|
||||
ret = ttm_bo_wait(bo, false, false, false);
|
||||
spin_unlock(&bo->lock);
|
||||
driver->sync_obj_unref(&bo->sync_obj);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ttm_bo_free_old_node(bo);
|
||||
if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
|
||||
(bo->ttm != NULL)) {
|
||||
ttm_tt_unbind(bo->ttm);
|
||||
ttm_tt_destroy(bo->ttm);
|
||||
bo->ttm = NULL;
|
||||
}
|
||||
} else {
|
||||
/**
|
||||
* This should help pipeline ordinary buffer moves.
|
||||
*
|
||||
* Hang old buffer memory on a new buffer object,
|
||||
* and leave it to be released when the GPU
|
||||
* operation has completed.
|
||||
*/
|
||||
|
||||
set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
|
||||
spin_unlock(&bo->lock);
|
||||
|
||||
ret = ttm_buffer_object_transfer(bo, &ghost_obj);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/**
|
||||
* If we're not moving to fixed memory, the TTM object
|
||||
* needs to stay alive. Otherwhise hang it on the ghost
|
||||
* bo to be unbound and destroyed.
|
||||
*/
|
||||
|
||||
if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED))
|
||||
ghost_obj->ttm = NULL;
|
||||
else
|
||||
bo->ttm = NULL;
|
||||
|
||||
ttm_bo_unreserve(ghost_obj);
|
||||
ttm_bo_unref(&ghost_obj);
|
||||
}
|
||||
|
||||
*old_mem = *new_mem;
|
||||
new_mem->mm_node = NULL;
|
||||
ttm_flag_masked(&save_flags, new_mem->placement, TTM_PL_MASK_MEMTYPE);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
|
|
@ -0,0 +1,454 @@
|
|||
/**************************************************************************
|
||||
*
|
||||
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
/*
|
||||
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
|
||||
*/
|
||||
|
||||
#include <ttm/ttm_module.h>
|
||||
#include <ttm/ttm_bo_driver.h>
|
||||
#include <ttm/ttm_placement.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/version.h>
|
||||
#include <linux/rbtree.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/uaccess.h>
|
||||
|
||||
#define TTM_BO_VM_NUM_PREFAULT 16
|
||||
|
||||
static struct ttm_buffer_object *ttm_bo_vm_lookup_rb(struct ttm_bo_device *bdev,
|
||||
unsigned long page_start,
|
||||
unsigned long num_pages)
|
||||
{
|
||||
struct rb_node *cur = bdev->addr_space_rb.rb_node;
|
||||
unsigned long cur_offset;
|
||||
struct ttm_buffer_object *bo;
|
||||
struct ttm_buffer_object *best_bo = NULL;
|
||||
|
||||
while (likely(cur != NULL)) {
|
||||
bo = rb_entry(cur, struct ttm_buffer_object, vm_rb);
|
||||
cur_offset = bo->vm_node->start;
|
||||
if (page_start >= cur_offset) {
|
||||
cur = cur->rb_right;
|
||||
best_bo = bo;
|
||||
if (page_start == cur_offset)
|
||||
break;
|
||||
} else
|
||||
cur = cur->rb_left;
|
||||
}
|
||||
|
||||
if (unlikely(best_bo == NULL))
|
||||
return NULL;
|
||||
|
||||
if (unlikely((best_bo->vm_node->start + best_bo->num_pages) <
|
||||
(page_start + num_pages)))
|
||||
return NULL;
|
||||
|
||||
return best_bo;
|
||||
}
|
||||
|
||||
static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
|
||||
{
|
||||
struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
|
||||
vma->vm_private_data;
|
||||
struct ttm_bo_device *bdev = bo->bdev;
|
||||
unsigned long bus_base;
|
||||
unsigned long bus_offset;
|
||||
unsigned long bus_size;
|
||||
unsigned long page_offset;
|
||||
unsigned long page_last;
|
||||
unsigned long pfn;
|
||||
struct ttm_tt *ttm = NULL;
|
||||
struct page *page;
|
||||
int ret;
|
||||
int i;
|
||||
bool is_iomem;
|
||||
unsigned long address = (unsigned long)vmf->virtual_address;
|
||||
int retval = VM_FAULT_NOPAGE;
|
||||
|
||||
/*
|
||||
* Work around locking order reversal in fault / nopfn
|
||||
* between mmap_sem and bo_reserve: Perform a trylock operation
|
||||
* for reserve, and if it fails, retry the fault after scheduling.
|
||||
*/
|
||||
|
||||
ret = ttm_bo_reserve(bo, true, true, false, 0);
|
||||
if (unlikely(ret != 0)) {
|
||||
if (ret == -EBUSY)
|
||||
set_need_resched();
|
||||
return VM_FAULT_NOPAGE;
|
||||
}
|
||||
|
||||
/*
|
||||
* Wait for buffer data in transit, due to a pipelined
|
||||
* move.
|
||||
*/
|
||||
|
||||
spin_lock(&bo->lock);
|
||||
if (test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)) {
|
||||
ret = ttm_bo_wait(bo, false, true, false);
|
||||
spin_unlock(&bo->lock);
|
||||
if (unlikely(ret != 0)) {
|
||||
retval = (ret != -ERESTART) ?
|
||||
VM_FAULT_SIGBUS : VM_FAULT_NOPAGE;
|
||||
goto out_unlock;
|
||||
}
|
||||
} else
|
||||
spin_unlock(&bo->lock);
|
||||
|
||||
|
||||
ret = ttm_bo_pci_offset(bdev, &bo->mem, &bus_base, &bus_offset,
|
||||
&bus_size);
|
||||
if (unlikely(ret != 0)) {
|
||||
retval = VM_FAULT_SIGBUS;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
is_iomem = (bus_size != 0);
|
||||
|
||||
page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
|
||||
bo->vm_node->start - vma->vm_pgoff;
|
||||
page_last = ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) +
|
||||
bo->vm_node->start - vma->vm_pgoff;
|
||||
|
||||
if (unlikely(page_offset >= bo->num_pages)) {
|
||||
retval = VM_FAULT_SIGBUS;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
/*
|
||||
* Strictly, we're not allowed to modify vma->vm_page_prot here,
|
||||
* since the mmap_sem is only held in read mode. However, we
|
||||
* modify only the caching bits of vma->vm_page_prot and
|
||||
* consider those bits protected by
|
||||
* the bo->mutex, as we should be the only writers.
|
||||
* There shouldn't really be any readers of these bits except
|
||||
* within vm_insert_mixed()? fork?
|
||||
*
|
||||
* TODO: Add a list of vmas to the bo, and change the
|
||||
* vma->vm_page_prot when the object changes caching policy, with
|
||||
* the correct locks held.
|
||||
*/
|
||||
|
||||
if (is_iomem) {
|
||||
vma->vm_page_prot = ttm_io_prot(bo->mem.placement,
|
||||
vma->vm_page_prot);
|
||||
} else {
|
||||
ttm = bo->ttm;
|
||||
vma->vm_page_prot = (bo->mem.placement & TTM_PL_FLAG_CACHED) ?
|
||||
vm_get_page_prot(vma->vm_flags) :
|
||||
ttm_io_prot(bo->mem.placement, vma->vm_page_prot);
|
||||
}
|
||||
|
||||
/*
|
||||
* Speculatively prefault a number of pages. Only error on
|
||||
* first page.
|
||||
*/
|
||||
|
||||
for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) {
|
||||
|
||||
if (is_iomem)
|
||||
pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) +
|
||||
page_offset;
|
||||
else {
|
||||
page = ttm_tt_get_page(ttm, page_offset);
|
||||
if (unlikely(!page && i == 0)) {
|
||||
retval = VM_FAULT_OOM;
|
||||
goto out_unlock;
|
||||
} else if (unlikely(!page)) {
|
||||
break;
|
||||
}
|
||||
pfn = page_to_pfn(page);
|
||||
}
|
||||
|
||||
ret = vm_insert_mixed(vma, address, pfn);
|
||||
/*
|
||||
* Somebody beat us to this PTE or prefaulting to
|
||||
* an already populated PTE, or prefaulting error.
|
||||
*/
|
||||
|
||||
if (unlikely((ret == -EBUSY) || (ret != 0 && i > 0)))
|
||||
break;
|
||||
else if (unlikely(ret != 0)) {
|
||||
retval =
|
||||
(ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
|
||||
goto out_unlock;
|
||||
|
||||
}
|
||||
|
||||
address += PAGE_SIZE;
|
||||
if (unlikely(++page_offset >= page_last))
|
||||
break;
|
||||
}
|
||||
|
||||
out_unlock:
|
||||
ttm_bo_unreserve(bo);
|
||||
return retval;
|
||||
}
|
||||
|
||||
static void ttm_bo_vm_open(struct vm_area_struct *vma)
|
||||
{
|
||||
struct ttm_buffer_object *bo =
|
||||
(struct ttm_buffer_object *)vma->vm_private_data;
|
||||
|
||||
(void)ttm_bo_reference(bo);
|
||||
}
|
||||
|
||||
static void ttm_bo_vm_close(struct vm_area_struct *vma)
|
||||
{
|
||||
struct ttm_buffer_object *bo =
|
||||
(struct ttm_buffer_object *)vma->vm_private_data;
|
||||
|
||||
ttm_bo_unref(&bo);
|
||||
vma->vm_private_data = NULL;
|
||||
}
|
||||
|
||||
static struct vm_operations_struct ttm_bo_vm_ops = {
|
||||
.fault = ttm_bo_vm_fault,
|
||||
.open = ttm_bo_vm_open,
|
||||
.close = ttm_bo_vm_close
|
||||
};
|
||||
|
||||
int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
|
||||
struct ttm_bo_device *bdev)
|
||||
{
|
||||
struct ttm_bo_driver *driver;
|
||||
struct ttm_buffer_object *bo;
|
||||
int ret;
|
||||
|
||||
read_lock(&bdev->vm_lock);
|
||||
bo = ttm_bo_vm_lookup_rb(bdev, vma->vm_pgoff,
|
||||
(vma->vm_end - vma->vm_start) >> PAGE_SHIFT);
|
||||
if (likely(bo != NULL))
|
||||
ttm_bo_reference(bo);
|
||||
read_unlock(&bdev->vm_lock);
|
||||
|
||||
if (unlikely(bo == NULL)) {
|
||||
printk(KERN_ERR TTM_PFX
|
||||
"Could not find buffer object to map.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
driver = bo->bdev->driver;
|
||||
if (unlikely(!driver->verify_access)) {
|
||||
ret = -EPERM;
|
||||
goto out_unref;
|
||||
}
|
||||
ret = driver->verify_access(bo, filp);
|
||||
if (unlikely(ret != 0))
|
||||
goto out_unref;
|
||||
|
||||
vma->vm_ops = &ttm_bo_vm_ops;
|
||||
|
||||
/*
|
||||
* Note: We're transferring the bo reference to
|
||||
* vma->vm_private_data here.
|
||||
*/
|
||||
|
||||
vma->vm_private_data = bo;
|
||||
vma->vm_flags |= VM_RESERVED | VM_IO | VM_MIXEDMAP | VM_DONTEXPAND;
|
||||
return 0;
|
||||
out_unref:
|
||||
ttm_bo_unref(&bo);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_bo_mmap);
|
||||
|
||||
int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
|
||||
{
|
||||
if (vma->vm_pgoff != 0)
|
||||
return -EACCES;
|
||||
|
||||
vma->vm_ops = &ttm_bo_vm_ops;
|
||||
vma->vm_private_data = ttm_bo_reference(bo);
|
||||
vma->vm_flags |= VM_RESERVED | VM_IO | VM_MIXEDMAP | VM_DONTEXPAND;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_fbdev_mmap);
|
||||
|
||||
|
||||
ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
|
||||
const char __user *wbuf, char __user *rbuf, size_t count,
|
||||
loff_t *f_pos, bool write)
|
||||
{
|
||||
struct ttm_buffer_object *bo;
|
||||
struct ttm_bo_driver *driver;
|
||||
struct ttm_bo_kmap_obj map;
|
||||
unsigned long dev_offset = (*f_pos >> PAGE_SHIFT);
|
||||
unsigned long kmap_offset;
|
||||
unsigned long kmap_end;
|
||||
unsigned long kmap_num;
|
||||
size_t io_size;
|
||||
unsigned int page_offset;
|
||||
char *virtual;
|
||||
int ret;
|
||||
bool no_wait = false;
|
||||
bool dummy;
|
||||
|
||||
read_lock(&bdev->vm_lock);
|
||||
bo = ttm_bo_vm_lookup_rb(bdev, dev_offset, 1);
|
||||
if (likely(bo != NULL))
|
||||
ttm_bo_reference(bo);
|
||||
read_unlock(&bdev->vm_lock);
|
||||
|
||||
if (unlikely(bo == NULL))
|
||||
return -EFAULT;
|
||||
|
||||
driver = bo->bdev->driver;
|
||||
if (unlikely(driver->verify_access)) {
|
||||
ret = -EPERM;
|
||||
goto out_unref;
|
||||
}
|
||||
|
||||
ret = driver->verify_access(bo, filp);
|
||||
if (unlikely(ret != 0))
|
||||
goto out_unref;
|
||||
|
||||
kmap_offset = dev_offset - bo->vm_node->start;
|
||||
if (unlikely(kmap_offset) >= bo->num_pages) {
|
||||
ret = -EFBIG;
|
||||
goto out_unref;
|
||||
}
|
||||
|
||||
page_offset = *f_pos & ~PAGE_MASK;
|
||||
io_size = bo->num_pages - kmap_offset;
|
||||
io_size = (io_size << PAGE_SHIFT) - page_offset;
|
||||
if (count < io_size)
|
||||
io_size = count;
|
||||
|
||||
kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
|
||||
kmap_num = kmap_end - kmap_offset + 1;
|
||||
|
||||
ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
|
||||
|
||||
switch (ret) {
|
||||
case 0:
|
||||
break;
|
||||
case -ERESTART:
|
||||
ret = -EINTR;
|
||||
goto out_unref;
|
||||
case -EBUSY:
|
||||
ret = -EAGAIN;
|
||||
goto out_unref;
|
||||
default:
|
||||
goto out_unref;
|
||||
}
|
||||
|
||||
ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
|
||||
if (unlikely(ret != 0)) {
|
||||
ttm_bo_unreserve(bo);
|
||||
goto out_unref;
|
||||
}
|
||||
|
||||
virtual = ttm_kmap_obj_virtual(&map, &dummy);
|
||||
virtual += page_offset;
|
||||
|
||||
if (write)
|
||||
ret = copy_from_user(virtual, wbuf, io_size);
|
||||
else
|
||||
ret = copy_to_user(rbuf, virtual, io_size);
|
||||
|
||||
ttm_bo_kunmap(&map);
|
||||
ttm_bo_unreserve(bo);
|
||||
ttm_bo_unref(&bo);
|
||||
|
||||
if (unlikely(ret != 0))
|
||||
return -EFBIG;
|
||||
|
||||
*f_pos += io_size;
|
||||
|
||||
return io_size;
|
||||
out_unref:
|
||||
ttm_bo_unref(&bo);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ssize_t ttm_bo_fbdev_io(struct ttm_buffer_object *bo, const char __user *wbuf,
|
||||
char __user *rbuf, size_t count, loff_t *f_pos,
|
||||
bool write)
|
||||
{
|
||||
struct ttm_bo_kmap_obj map;
|
||||
unsigned long kmap_offset;
|
||||
unsigned long kmap_end;
|
||||
unsigned long kmap_num;
|
||||
size_t io_size;
|
||||
unsigned int page_offset;
|
||||
char *virtual;
|
||||
int ret;
|
||||
bool no_wait = false;
|
||||
bool dummy;
|
||||
|
||||
kmap_offset = (*f_pos >> PAGE_SHIFT);
|
||||
if (unlikely(kmap_offset) >= bo->num_pages)
|
||||
return -EFBIG;
|
||||
|
||||
page_offset = *f_pos & ~PAGE_MASK;
|
||||
io_size = bo->num_pages - kmap_offset;
|
||||
io_size = (io_size << PAGE_SHIFT) - page_offset;
|
||||
if (count < io_size)
|
||||
io_size = count;
|
||||
|
||||
kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
|
||||
kmap_num = kmap_end - kmap_offset + 1;
|
||||
|
||||
ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
|
||||
|
||||
switch (ret) {
|
||||
case 0:
|
||||
break;
|
||||
case -ERESTART:
|
||||
return -EINTR;
|
||||
case -EBUSY:
|
||||
return -EAGAIN;
|
||||
default:
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
|
||||
if (unlikely(ret != 0)) {
|
||||
ttm_bo_unreserve(bo);
|
||||
return ret;
|
||||
}
|
||||
|
||||
virtual = ttm_kmap_obj_virtual(&map, &dummy);
|
||||
virtual += page_offset;
|
||||
|
||||
if (write)
|
||||
ret = copy_from_user(virtual, wbuf, io_size);
|
||||
else
|
||||
ret = copy_to_user(rbuf, virtual, io_size);
|
||||
|
||||
ttm_bo_kunmap(&map);
|
||||
ttm_bo_unreserve(bo);
|
||||
ttm_bo_unref(&bo);
|
||||
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
|
||||
*f_pos += io_size;
|
||||
|
||||
return io_size;
|
||||
}
|
|
@ -0,0 +1,114 @@
|
|||
/**************************************************************************
|
||||
*
|
||||
* Copyright 2008-2009 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
/*
|
||||
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
|
||||
*/
|
||||
|
||||
#include "ttm/ttm_module.h"
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
struct ttm_global_item {
|
||||
struct mutex mutex;
|
||||
void *object;
|
||||
int refcount;
|
||||
};
|
||||
|
||||
static struct ttm_global_item glob[TTM_GLOBAL_NUM];
|
||||
|
||||
void ttm_global_init(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < TTM_GLOBAL_NUM; ++i) {
|
||||
struct ttm_global_item *item = &glob[i];
|
||||
mutex_init(&item->mutex);
|
||||
item->object = NULL;
|
||||
item->refcount = 0;
|
||||
}
|
||||
}
|
||||
|
||||
void ttm_global_release(void)
|
||||
{
|
||||
int i;
|
||||
for (i = 0; i < TTM_GLOBAL_NUM; ++i) {
|
||||
struct ttm_global_item *item = &glob[i];
|
||||
BUG_ON(item->object != NULL);
|
||||
BUG_ON(item->refcount != 0);
|
||||
}
|
||||
}
|
||||
|
||||
int ttm_global_item_ref(struct ttm_global_reference *ref)
|
||||
{
|
||||
int ret;
|
||||
struct ttm_global_item *item = &glob[ref->global_type];
|
||||
void *object;
|
||||
|
||||
mutex_lock(&item->mutex);
|
||||
if (item->refcount == 0) {
|
||||
item->object = kmalloc(ref->size, GFP_KERNEL);
|
||||
if (unlikely(item->object == NULL)) {
|
||||
ret = -ENOMEM;
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
ref->object = item->object;
|
||||
ret = ref->init(ref);
|
||||
if (unlikely(ret != 0))
|
||||
goto out_err;
|
||||
|
||||
++item->refcount;
|
||||
}
|
||||
ref->object = item->object;
|
||||
object = item->object;
|
||||
mutex_unlock(&item->mutex);
|
||||
return 0;
|
||||
out_err:
|
||||
kfree(item->object);
|
||||
mutex_unlock(&item->mutex);
|
||||
item->object = NULL;
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_global_item_ref);
|
||||
|
||||
void ttm_global_item_unref(struct ttm_global_reference *ref)
|
||||
{
|
||||
struct ttm_global_item *item = &glob[ref->global_type];
|
||||
|
||||
mutex_lock(&item->mutex);
|
||||
BUG_ON(item->refcount == 0);
|
||||
BUG_ON(ref->object != item->object);
|
||||
if (--item->refcount == 0) {
|
||||
ref->release(ref);
|
||||
kfree(item->object);
|
||||
item->object = NULL;
|
||||
}
|
||||
mutex_unlock(&item->mutex);
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_global_item_unref);
|
||||
|
|
@ -0,0 +1,234 @@
|
|||
/**************************************************************************
|
||||
*
|
||||
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
|
||||
#include "ttm/ttm_memory.h"
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/wait.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
#define TTM_PFX "[TTM] "
|
||||
#define TTM_MEMORY_ALLOC_RETRIES 4
|
||||
|
||||
/**
|
||||
* At this point we only support a single shrink callback.
|
||||
* Extend this if needed, perhaps using a linked list of callbacks.
|
||||
* Note that this function is reentrant:
|
||||
* many threads may try to swap out at any given time.
|
||||
*/
|
||||
|
||||
static void ttm_shrink(struct ttm_mem_global *glob, bool from_workqueue,
|
||||
uint64_t extra)
|
||||
{
|
||||
int ret;
|
||||
struct ttm_mem_shrink *shrink;
|
||||
uint64_t target;
|
||||
uint64_t total_target;
|
||||
|
||||
spin_lock(&glob->lock);
|
||||
if (glob->shrink == NULL)
|
||||
goto out;
|
||||
|
||||
if (from_workqueue) {
|
||||
target = glob->swap_limit;
|
||||
total_target = glob->total_memory_swap_limit;
|
||||
} else if (capable(CAP_SYS_ADMIN)) {
|
||||
total_target = glob->emer_total_memory;
|
||||
target = glob->emer_memory;
|
||||
} else {
|
||||
total_target = glob->max_total_memory;
|
||||
target = glob->max_memory;
|
||||
}
|
||||
|
||||
total_target = (extra >= total_target) ? 0 : total_target - extra;
|
||||
target = (extra >= target) ? 0 : target - extra;
|
||||
|
||||
while (glob->used_memory > target ||
|
||||
glob->used_total_memory > total_target) {
|
||||
shrink = glob->shrink;
|
||||
spin_unlock(&glob->lock);
|
||||
ret = shrink->do_shrink(shrink);
|
||||
spin_lock(&glob->lock);
|
||||
if (unlikely(ret != 0))
|
||||
goto out;
|
||||
}
|
||||
out:
|
||||
spin_unlock(&glob->lock);
|
||||
}
|
||||
|
||||
static void ttm_shrink_work(struct work_struct *work)
|
||||
{
|
||||
struct ttm_mem_global *glob =
|
||||
container_of(work, struct ttm_mem_global, work);
|
||||
|
||||
ttm_shrink(glob, true, 0ULL);
|
||||
}
|
||||
|
||||
int ttm_mem_global_init(struct ttm_mem_global *glob)
|
||||
{
|
||||
struct sysinfo si;
|
||||
uint64_t mem;
|
||||
|
||||
spin_lock_init(&glob->lock);
|
||||
glob->swap_queue = create_singlethread_workqueue("ttm_swap");
|
||||
INIT_WORK(&glob->work, ttm_shrink_work);
|
||||
init_waitqueue_head(&glob->queue);
|
||||
|
||||
si_meminfo(&si);
|
||||
|
||||
mem = si.totalram - si.totalhigh;
|
||||
mem *= si.mem_unit;
|
||||
|
||||
glob->max_memory = mem >> 1;
|
||||
glob->emer_memory = (mem >> 1) + (mem >> 2);
|
||||
glob->swap_limit = glob->max_memory - (mem >> 3);
|
||||
glob->used_memory = 0;
|
||||
glob->used_total_memory = 0;
|
||||
glob->shrink = NULL;
|
||||
|
||||
mem = si.totalram;
|
||||
mem *= si.mem_unit;
|
||||
|
||||
glob->max_total_memory = mem >> 1;
|
||||
glob->emer_total_memory = (mem >> 1) + (mem >> 2);
|
||||
|
||||
glob->total_memory_swap_limit = glob->max_total_memory - (mem >> 3);
|
||||
|
||||
printk(KERN_INFO TTM_PFX "TTM available graphics memory: %llu MiB\n",
|
||||
glob->max_total_memory >> 20);
|
||||
printk(KERN_INFO TTM_PFX "TTM available object memory: %llu MiB\n",
|
||||
glob->max_memory >> 20);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_mem_global_init);
|
||||
|
||||
void ttm_mem_global_release(struct ttm_mem_global *glob)
|
||||
{
|
||||
printk(KERN_INFO TTM_PFX "Used total memory is %llu bytes.\n",
|
||||
(unsigned long long)glob->used_total_memory);
|
||||
flush_workqueue(glob->swap_queue);
|
||||
destroy_workqueue(glob->swap_queue);
|
||||
glob->swap_queue = NULL;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_mem_global_release);
|
||||
|
||||
static inline void ttm_check_swapping(struct ttm_mem_global *glob)
|
||||
{
|
||||
bool needs_swapping;
|
||||
|
||||
spin_lock(&glob->lock);
|
||||
needs_swapping = (glob->used_memory > glob->swap_limit ||
|
||||
glob->used_total_memory >
|
||||
glob->total_memory_swap_limit);
|
||||
spin_unlock(&glob->lock);
|
||||
|
||||
if (unlikely(needs_swapping))
|
||||
(void)queue_work(glob->swap_queue, &glob->work);
|
||||
|
||||
}
|
||||
|
||||
void ttm_mem_global_free(struct ttm_mem_global *glob,
|
||||
uint64_t amount, bool himem)
|
||||
{
|
||||
spin_lock(&glob->lock);
|
||||
glob->used_total_memory -= amount;
|
||||
if (!himem)
|
||||
glob->used_memory -= amount;
|
||||
wake_up_all(&glob->queue);
|
||||
spin_unlock(&glob->lock);
|
||||
}
|
||||
|
||||
static int ttm_mem_global_reserve(struct ttm_mem_global *glob,
|
||||
uint64_t amount, bool himem, bool reserve)
|
||||
{
|
||||
uint64_t limit;
|
||||
uint64_t lomem_limit;
|
||||
int ret = -ENOMEM;
|
||||
|
||||
spin_lock(&glob->lock);
|
||||
|
||||
if (capable(CAP_SYS_ADMIN)) {
|
||||
limit = glob->emer_total_memory;
|
||||
lomem_limit = glob->emer_memory;
|
||||
} else {
|
||||
limit = glob->max_total_memory;
|
||||
lomem_limit = glob->max_memory;
|
||||
}
|
||||
|
||||
if (unlikely(glob->used_total_memory + amount > limit))
|
||||
goto out_unlock;
|
||||
if (unlikely(!himem && glob->used_memory + amount > lomem_limit))
|
||||
goto out_unlock;
|
||||
|
||||
if (reserve) {
|
||||
glob->used_total_memory += amount;
|
||||
if (!himem)
|
||||
glob->used_memory += amount;
|
||||
}
|
||||
ret = 0;
|
||||
out_unlock:
|
||||
spin_unlock(&glob->lock);
|
||||
ttm_check_swapping(glob);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
|
||||
bool no_wait, bool interruptible, bool himem)
|
||||
{
|
||||
int count = TTM_MEMORY_ALLOC_RETRIES;
|
||||
|
||||
while (unlikely(ttm_mem_global_reserve(glob, memory, himem, true)
|
||||
!= 0)) {
|
||||
if (no_wait)
|
||||
return -ENOMEM;
|
||||
if (unlikely(count-- == 0))
|
||||
return -ENOMEM;
|
||||
ttm_shrink(glob, false, memory + (memory >> 2) + 16);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
size_t ttm_round_pot(size_t size)
|
||||
{
|
||||
if ((size & (size - 1)) == 0)
|
||||
return size;
|
||||
else if (size > PAGE_SIZE)
|
||||
return PAGE_ALIGN(size);
|
||||
else {
|
||||
size_t tmp_size = 4;
|
||||
|
||||
while (tmp_size < size)
|
||||
tmp_size <<= 1;
|
||||
|
||||
return tmp_size;
|
||||
}
|
||||
return 0;
|
||||
}
|
|
@ -0,0 +1,50 @@
|
|||
/**************************************************************************
|
||||
*
|
||||
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
/*
|
||||
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
|
||||
* Jerome Glisse
|
||||
*/
|
||||
#include <linux/module.h>
|
||||
#include <ttm/ttm_module.h>
|
||||
|
||||
static int __init ttm_init(void)
|
||||
{
|
||||
ttm_global_init();
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __exit ttm_exit(void)
|
||||
{
|
||||
ttm_global_release();
|
||||
}
|
||||
|
||||
module_init(ttm_init);
|
||||
module_exit(ttm_exit);
|
||||
|
||||
MODULE_AUTHOR("Thomas Hellstrom, Jerome Glisse");
|
||||
MODULE_DESCRIPTION("TTM memory manager subsystem (for DRM device)");
|
||||
MODULE_LICENSE("GPL and additional rights");
|
|
@ -0,0 +1,635 @@
|
|||
/**************************************************************************
|
||||
*
|
||||
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
/*
|
||||
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
|
||||
*/
|
||||
|
||||
#include <linux/version.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/pagemap.h>
|
||||
#include <linux/file.h>
|
||||
#include <linux/swap.h>
|
||||
#include "ttm/ttm_module.h"
|
||||
#include "ttm/ttm_bo_driver.h"
|
||||
#include "ttm/ttm_placement.h"
|
||||
|
||||
static int ttm_tt_swapin(struct ttm_tt *ttm);
|
||||
|
||||
#if defined(CONFIG_X86)
|
||||
static void ttm_tt_clflush_page(struct page *page)
|
||||
{
|
||||
uint8_t *page_virtual;
|
||||
unsigned int i;
|
||||
|
||||
if (unlikely(page == NULL))
|
||||
return;
|
||||
|
||||
page_virtual = kmap_atomic(page, KM_USER0);
|
||||
|
||||
for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
|
||||
clflush(page_virtual + i);
|
||||
|
||||
kunmap_atomic(page_virtual, KM_USER0);
|
||||
}
|
||||
|
||||
static void ttm_tt_cache_flush_clflush(struct page *pages[],
|
||||
unsigned long num_pages)
|
||||
{
|
||||
unsigned long i;
|
||||
|
||||
mb();
|
||||
for (i = 0; i < num_pages; ++i)
|
||||
ttm_tt_clflush_page(*pages++);
|
||||
mb();
|
||||
}
|
||||
#else
|
||||
static void ttm_tt_ipi_handler(void *null)
|
||||
{
|
||||
;
|
||||
}
|
||||
#endif
|
||||
|
||||
void ttm_tt_cache_flush(struct page *pages[], unsigned long num_pages)
|
||||
{
|
||||
|
||||
#if defined(CONFIG_X86)
|
||||
if (cpu_has_clflush) {
|
||||
ttm_tt_cache_flush_clflush(pages, num_pages);
|
||||
return;
|
||||
}
|
||||
#else
|
||||
if (on_each_cpu(ttm_tt_ipi_handler, NULL, 1) != 0)
|
||||
printk(KERN_ERR TTM_PFX
|
||||
"Timed out waiting for drm cache flush.\n");
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
* Allocates storage for pointers to the pages that back the ttm.
|
||||
*
|
||||
* Uses kmalloc if possible. Otherwise falls back to vmalloc.
|
||||
*/
|
||||
static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
|
||||
{
|
||||
unsigned long size = ttm->num_pages * sizeof(*ttm->pages);
|
||||
ttm->pages = NULL;
|
||||
|
||||
if (size <= PAGE_SIZE)
|
||||
ttm->pages = kzalloc(size, GFP_KERNEL);
|
||||
|
||||
if (!ttm->pages) {
|
||||
ttm->pages = vmalloc_user(size);
|
||||
if (ttm->pages)
|
||||
ttm->page_flags |= TTM_PAGE_FLAG_VMALLOC;
|
||||
}
|
||||
}
|
||||
|
||||
static void ttm_tt_free_page_directory(struct ttm_tt *ttm)
|
||||
{
|
||||
if (ttm->page_flags & TTM_PAGE_FLAG_VMALLOC) {
|
||||
vfree(ttm->pages);
|
||||
ttm->page_flags &= ~TTM_PAGE_FLAG_VMALLOC;
|
||||
} else {
|
||||
kfree(ttm->pages);
|
||||
}
|
||||
ttm->pages = NULL;
|
||||
}
|
||||
|
||||
static struct page *ttm_tt_alloc_page(unsigned page_flags)
|
||||
{
|
||||
if (page_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
|
||||
return alloc_page(GFP_HIGHUSER | __GFP_ZERO);
|
||||
|
||||
return alloc_page(GFP_HIGHUSER);
|
||||
}
|
||||
|
||||
static void ttm_tt_free_user_pages(struct ttm_tt *ttm)
|
||||
{
|
||||
int write;
|
||||
int dirty;
|
||||
struct page *page;
|
||||
int i;
|
||||
struct ttm_backend *be = ttm->be;
|
||||
|
||||
BUG_ON(!(ttm->page_flags & TTM_PAGE_FLAG_USER));
|
||||
write = ((ttm->page_flags & TTM_PAGE_FLAG_WRITE) != 0);
|
||||
dirty = ((ttm->page_flags & TTM_PAGE_FLAG_USER_DIRTY) != 0);
|
||||
|
||||
if (be)
|
||||
be->func->clear(be);
|
||||
|
||||
for (i = 0; i < ttm->num_pages; ++i) {
|
||||
page = ttm->pages[i];
|
||||
if (page == NULL)
|
||||
continue;
|
||||
|
||||
if (page == ttm->dummy_read_page) {
|
||||
BUG_ON(write);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (write && dirty && !PageReserved(page))
|
||||
set_page_dirty_lock(page);
|
||||
|
||||
ttm->pages[i] = NULL;
|
||||
ttm_mem_global_free(ttm->bdev->mem_glob, PAGE_SIZE, false);
|
||||
put_page(page);
|
||||
}
|
||||
ttm->state = tt_unpopulated;
|
||||
ttm->first_himem_page = ttm->num_pages;
|
||||
ttm->last_lomem_page = -1;
|
||||
}
|
||||
|
||||
static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index)
|
||||
{
|
||||
struct page *p;
|
||||
struct ttm_bo_device *bdev = ttm->bdev;
|
||||
struct ttm_mem_global *mem_glob = bdev->mem_glob;
|
||||
int ret;
|
||||
|
||||
while (NULL == (p = ttm->pages[index])) {
|
||||
p = ttm_tt_alloc_page(ttm->page_flags);
|
||||
|
||||
if (!p)
|
||||
return NULL;
|
||||
|
||||
if (PageHighMem(p)) {
|
||||
ret =
|
||||
ttm_mem_global_alloc(mem_glob, PAGE_SIZE,
|
||||
false, false, true);
|
||||
if (unlikely(ret != 0))
|
||||
goto out_err;
|
||||
ttm->pages[--ttm->first_himem_page] = p;
|
||||
} else {
|
||||
ret =
|
||||
ttm_mem_global_alloc(mem_glob, PAGE_SIZE,
|
||||
false, false, false);
|
||||
if (unlikely(ret != 0))
|
||||
goto out_err;
|
||||
ttm->pages[++ttm->last_lomem_page] = p;
|
||||
}
|
||||
}
|
||||
return p;
|
||||
out_err:
|
||||
put_page(p);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
struct page *ttm_tt_get_page(struct ttm_tt *ttm, int index)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
|
||||
ret = ttm_tt_swapin(ttm);
|
||||
if (unlikely(ret != 0))
|
||||
return NULL;
|
||||
}
|
||||
return __ttm_tt_get_page(ttm, index);
|
||||
}
|
||||
|
||||
int ttm_tt_populate(struct ttm_tt *ttm)
|
||||
{
|
||||
struct page *page;
|
||||
unsigned long i;
|
||||
struct ttm_backend *be;
|
||||
int ret;
|
||||
|
||||
if (ttm->state != tt_unpopulated)
|
||||
return 0;
|
||||
|
||||
if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
|
||||
ret = ttm_tt_swapin(ttm);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
}
|
||||
|
||||
be = ttm->be;
|
||||
|
||||
for (i = 0; i < ttm->num_pages; ++i) {
|
||||
page = __ttm_tt_get_page(ttm, i);
|
||||
if (!page)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
be->func->populate(be, ttm->num_pages, ttm->pages,
|
||||
ttm->dummy_read_page);
|
||||
ttm->state = tt_unbound;
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86
|
||||
static inline int ttm_tt_set_page_caching(struct page *p,
|
||||
enum ttm_caching_state c_state)
|
||||
{
|
||||
if (PageHighMem(p))
|
||||
return 0;
|
||||
|
||||
switch (c_state) {
|
||||
case tt_cached:
|
||||
return set_pages_wb(p, 1);
|
||||
case tt_wc:
|
||||
return set_memory_wc((unsigned long) page_address(p), 1);
|
||||
default:
|
||||
return set_pages_uc(p, 1);
|
||||
}
|
||||
}
|
||||
#else /* CONFIG_X86 */
|
||||
static inline int ttm_tt_set_page_caching(struct page *p,
|
||||
enum ttm_caching_state c_state)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_X86 */
|
||||
|
||||
/*
|
||||
* Change caching policy for the linear kernel map
|
||||
* for range of pages in a ttm.
|
||||
*/
|
||||
|
||||
static int ttm_tt_set_caching(struct ttm_tt *ttm,
|
||||
enum ttm_caching_state c_state)
|
||||
{
|
||||
int i, j;
|
||||
struct page *cur_page;
|
||||
int ret;
|
||||
|
||||
if (ttm->caching_state == c_state)
|
||||
return 0;
|
||||
|
||||
if (c_state != tt_cached) {
|
||||
ret = ttm_tt_populate(ttm);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (ttm->caching_state == tt_cached)
|
||||
ttm_tt_cache_flush(ttm->pages, ttm->num_pages);
|
||||
|
||||
for (i = 0; i < ttm->num_pages; ++i) {
|
||||
cur_page = ttm->pages[i];
|
||||
if (likely(cur_page != NULL)) {
|
||||
ret = ttm_tt_set_page_caching(cur_page, c_state);
|
||||
if (unlikely(ret != 0))
|
||||
goto out_err;
|
||||
}
|
||||
}
|
||||
|
||||
ttm->caching_state = c_state;
|
||||
|
||||
return 0;
|
||||
|
||||
out_err:
|
||||
for (j = 0; j < i; ++j) {
|
||||
cur_page = ttm->pages[j];
|
||||
if (likely(cur_page != NULL)) {
|
||||
(void)ttm_tt_set_page_caching(cur_page,
|
||||
ttm->caching_state);
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement)
|
||||
{
|
||||
enum ttm_caching_state state;
|
||||
|
||||
if (placement & TTM_PL_FLAG_WC)
|
||||
state = tt_wc;
|
||||
else if (placement & TTM_PL_FLAG_UNCACHED)
|
||||
state = tt_uncached;
|
||||
else
|
||||
state = tt_cached;
|
||||
|
||||
return ttm_tt_set_caching(ttm, state);
|
||||
}
|
||||
|
||||
static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm)
|
||||
{
|
||||
int i;
|
||||
struct page *cur_page;
|
||||
struct ttm_backend *be = ttm->be;
|
||||
|
||||
if (be)
|
||||
be->func->clear(be);
|
||||
(void)ttm_tt_set_caching(ttm, tt_cached);
|
||||
for (i = 0; i < ttm->num_pages; ++i) {
|
||||
cur_page = ttm->pages[i];
|
||||
ttm->pages[i] = NULL;
|
||||
if (cur_page) {
|
||||
if (page_count(cur_page) != 1)
|
||||
printk(KERN_ERR TTM_PFX
|
||||
"Erroneous page count. "
|
||||
"Leaking pages.\n");
|
||||
ttm_mem_global_free(ttm->bdev->mem_glob, PAGE_SIZE,
|
||||
PageHighMem(cur_page));
|
||||
__free_page(cur_page);
|
||||
}
|
||||
}
|
||||
ttm->state = tt_unpopulated;
|
||||
ttm->first_himem_page = ttm->num_pages;
|
||||
ttm->last_lomem_page = -1;
|
||||
}
|
||||
|
||||
void ttm_tt_destroy(struct ttm_tt *ttm)
|
||||
{
|
||||
struct ttm_backend *be;
|
||||
|
||||
if (unlikely(ttm == NULL))
|
||||
return;
|
||||
|
||||
be = ttm->be;
|
||||
if (likely(be != NULL)) {
|
||||
be->func->destroy(be);
|
||||
ttm->be = NULL;
|
||||
}
|
||||
|
||||
if (likely(ttm->pages != NULL)) {
|
||||
if (ttm->page_flags & TTM_PAGE_FLAG_USER)
|
||||
ttm_tt_free_user_pages(ttm);
|
||||
else
|
||||
ttm_tt_free_alloced_pages(ttm);
|
||||
|
||||
ttm_tt_free_page_directory(ttm);
|
||||
}
|
||||
|
||||
if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTANT_SWAP) &&
|
||||
ttm->swap_storage)
|
||||
fput(ttm->swap_storage);
|
||||
|
||||
kfree(ttm);
|
||||
}
|
||||
|
||||
int ttm_tt_set_user(struct ttm_tt *ttm,
|
||||
struct task_struct *tsk,
|
||||
unsigned long start, unsigned long num_pages)
|
||||
{
|
||||
struct mm_struct *mm = tsk->mm;
|
||||
int ret;
|
||||
int write = (ttm->page_flags & TTM_PAGE_FLAG_WRITE) != 0;
|
||||
struct ttm_mem_global *mem_glob = ttm->bdev->mem_glob;
|
||||
|
||||
BUG_ON(num_pages != ttm->num_pages);
|
||||
BUG_ON((ttm->page_flags & TTM_PAGE_FLAG_USER) == 0);
|
||||
|
||||
/**
|
||||
* Account user pages as lowmem pages for now.
|
||||
*/
|
||||
|
||||
ret = ttm_mem_global_alloc(mem_glob, num_pages * PAGE_SIZE,
|
||||
false, false, false);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
|
||||
down_read(&mm->mmap_sem);
|
||||
ret = get_user_pages(tsk, mm, start, num_pages,
|
||||
write, 0, ttm->pages, NULL);
|
||||
up_read(&mm->mmap_sem);
|
||||
|
||||
if (ret != num_pages && write) {
|
||||
ttm_tt_free_user_pages(ttm);
|
||||
ttm_mem_global_free(mem_glob, num_pages * PAGE_SIZE, false);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
ttm->tsk = tsk;
|
||||
ttm->start = start;
|
||||
ttm->state = tt_unbound;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct ttm_tt *ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size,
|
||||
uint32_t page_flags, struct page *dummy_read_page)
|
||||
{
|
||||
struct ttm_bo_driver *bo_driver = bdev->driver;
|
||||
struct ttm_tt *ttm;
|
||||
|
||||
if (!bo_driver)
|
||||
return NULL;
|
||||
|
||||
ttm = kzalloc(sizeof(*ttm), GFP_KERNEL);
|
||||
if (!ttm)
|
||||
return NULL;
|
||||
|
||||
ttm->bdev = bdev;
|
||||
|
||||
ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
||||
ttm->first_himem_page = ttm->num_pages;
|
||||
ttm->last_lomem_page = -1;
|
||||
ttm->caching_state = tt_cached;
|
||||
ttm->page_flags = page_flags;
|
||||
|
||||
ttm->dummy_read_page = dummy_read_page;
|
||||
|
||||
ttm_tt_alloc_page_directory(ttm);
|
||||
if (!ttm->pages) {
|
||||
ttm_tt_destroy(ttm);
|
||||
printk(KERN_ERR TTM_PFX "Failed allocating page table\n");
|
||||
return NULL;
|
||||
}
|
||||
ttm->be = bo_driver->create_ttm_backend_entry(bdev);
|
||||
if (!ttm->be) {
|
||||
ttm_tt_destroy(ttm);
|
||||
printk(KERN_ERR TTM_PFX "Failed creating ttm backend entry\n");
|
||||
return NULL;
|
||||
}
|
||||
ttm->state = tt_unpopulated;
|
||||
return ttm;
|
||||
}
|
||||
|
||||
void ttm_tt_unbind(struct ttm_tt *ttm)
|
||||
{
|
||||
int ret;
|
||||
struct ttm_backend *be = ttm->be;
|
||||
|
||||
if (ttm->state == tt_bound) {
|
||||
ret = be->func->unbind(be);
|
||||
BUG_ON(ret);
|
||||
ttm->state = tt_unbound;
|
||||
}
|
||||
}
|
||||
|
||||
int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
|
||||
{
|
||||
int ret = 0;
|
||||
struct ttm_backend *be;
|
||||
|
||||
if (!ttm)
|
||||
return -EINVAL;
|
||||
|
||||
if (ttm->state == tt_bound)
|
||||
return 0;
|
||||
|
||||
be = ttm->be;
|
||||
|
||||
ret = ttm_tt_populate(ttm);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = be->func->bind(be, bo_mem);
|
||||
if (ret) {
|
||||
printk(KERN_ERR TTM_PFX "Couldn't bind backend.\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
ttm->state = tt_bound;
|
||||
|
||||
if (ttm->page_flags & TTM_PAGE_FLAG_USER)
|
||||
ttm->page_flags |= TTM_PAGE_FLAG_USER_DIRTY;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_tt_bind);
|
||||
|
||||
static int ttm_tt_swapin(struct ttm_tt *ttm)
|
||||
{
|
||||
struct address_space *swap_space;
|
||||
struct file *swap_storage;
|
||||
struct page *from_page;
|
||||
struct page *to_page;
|
||||
void *from_virtual;
|
||||
void *to_virtual;
|
||||
int i;
|
||||
int ret;
|
||||
|
||||
if (ttm->page_flags & TTM_PAGE_FLAG_USER) {
|
||||
ret = ttm_tt_set_user(ttm, ttm->tsk, ttm->start,
|
||||
ttm->num_pages);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
|
||||
ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
|
||||
return 0;
|
||||
}
|
||||
|
||||
swap_storage = ttm->swap_storage;
|
||||
BUG_ON(swap_storage == NULL);
|
||||
|
||||
swap_space = swap_storage->f_path.dentry->d_inode->i_mapping;
|
||||
|
||||
for (i = 0; i < ttm->num_pages; ++i) {
|
||||
from_page = read_mapping_page(swap_space, i, NULL);
|
||||
if (IS_ERR(from_page))
|
||||
goto out_err;
|
||||
to_page = __ttm_tt_get_page(ttm, i);
|
||||
if (unlikely(to_page == NULL))
|
||||
goto out_err;
|
||||
|
||||
preempt_disable();
|
||||
from_virtual = kmap_atomic(from_page, KM_USER0);
|
||||
to_virtual = kmap_atomic(to_page, KM_USER1);
|
||||
memcpy(to_virtual, from_virtual, PAGE_SIZE);
|
||||
kunmap_atomic(to_virtual, KM_USER1);
|
||||
kunmap_atomic(from_virtual, KM_USER0);
|
||||
preempt_enable();
|
||||
page_cache_release(from_page);
|
||||
}
|
||||
|
||||
if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTANT_SWAP))
|
||||
fput(swap_storage);
|
||||
ttm->swap_storage = NULL;
|
||||
ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
|
||||
|
||||
return 0;
|
||||
out_err:
|
||||
ttm_tt_free_alloced_pages(ttm);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistant_swap_storage)
|
||||
{
|
||||
struct address_space *swap_space;
|
||||
struct file *swap_storage;
|
||||
struct page *from_page;
|
||||
struct page *to_page;
|
||||
void *from_virtual;
|
||||
void *to_virtual;
|
||||
int i;
|
||||
|
||||
BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated);
|
||||
BUG_ON(ttm->caching_state != tt_cached);
|
||||
|
||||
/*
|
||||
* For user buffers, just unpin the pages, as there should be
|
||||
* vma references.
|
||||
*/
|
||||
|
||||
if (ttm->page_flags & TTM_PAGE_FLAG_USER) {
|
||||
ttm_tt_free_user_pages(ttm);
|
||||
ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
|
||||
ttm->swap_storage = NULL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!persistant_swap_storage) {
|
||||
swap_storage = shmem_file_setup("ttm swap",
|
||||
ttm->num_pages << PAGE_SHIFT,
|
||||
0);
|
||||
if (unlikely(IS_ERR(swap_storage))) {
|
||||
printk(KERN_ERR "Failed allocating swap storage.\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
} else
|
||||
swap_storage = persistant_swap_storage;
|
||||
|
||||
swap_space = swap_storage->f_path.dentry->d_inode->i_mapping;
|
||||
|
||||
for (i = 0; i < ttm->num_pages; ++i) {
|
||||
from_page = ttm->pages[i];
|
||||
if (unlikely(from_page == NULL))
|
||||
continue;
|
||||
to_page = read_mapping_page(swap_space, i, NULL);
|
||||
if (unlikely(to_page == NULL))
|
||||
goto out_err;
|
||||
|
||||
preempt_disable();
|
||||
from_virtual = kmap_atomic(from_page, KM_USER0);
|
||||
to_virtual = kmap_atomic(to_page, KM_USER1);
|
||||
memcpy(to_virtual, from_virtual, PAGE_SIZE);
|
||||
kunmap_atomic(to_virtual, KM_USER1);
|
||||
kunmap_atomic(from_virtual, KM_USER0);
|
||||
preempt_enable();
|
||||
set_page_dirty(to_page);
|
||||
mark_page_accessed(to_page);
|
||||
page_cache_release(to_page);
|
||||
}
|
||||
|
||||
ttm_tt_free_alloced_pages(ttm);
|
||||
ttm->swap_storage = swap_storage;
|
||||
ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
|
||||
if (persistant_swap_storage)
|
||||
ttm->page_flags |= TTM_PAGE_FLAG_PERSISTANT_SWAP;
|
||||
|
||||
return 0;
|
||||
out_err:
|
||||
if (!persistant_swap_storage)
|
||||
fput(swap_storage);
|
||||
|
||||
return -ENOMEM;
|
||||
}
|
|
@ -115,5 +115,7 @@ source "drivers/staging/line6/Kconfig"
|
|||
|
||||
source "drivers/staging/serqt_usb/Kconfig"
|
||||
|
||||
source "drivers/gpu/drm/radeon/Kconfig"
|
||||
|
||||
endif # !STAGING_EXCLUDE_BUILD
|
||||
endif # STAGING
|
||||
|
|
|
@ -254,8 +254,8 @@
|
|||
{0x1002, 0x940A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R600|RADEON_NEW_MEMMAP}, \
|
||||
{0x1002, 0x940B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R600|RADEON_NEW_MEMMAP}, \
|
||||
{0x1002, 0x940F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R600|RADEON_NEW_MEMMAP}, \
|
||||
{0x1002, 0x94A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_NEW_MEMMAP}, \
|
||||
{0x1002, 0x94A1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_NEW_MEMMAP}, \
|
||||
{0x1002, 0x94A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
|
||||
{0x1002, 0x94A1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
|
||||
{0x1002, 0x94B1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_NEW_MEMMAP}, \
|
||||
{0x1002, 0x94B3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_NEW_MEMMAP}, \
|
||||
{0x1002, 0x94B5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_NEW_MEMMAP}, \
|
||||
|
@ -273,8 +273,8 @@
|
|||
{0x1002, 0x9456, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \
|
||||
{0x1002, 0x945A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
|
||||
{0x1002, 0x945B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
|
||||
{0x1002, 0x9460, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
|
||||
{0x1002, 0x9462, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
|
||||
{0x1002, 0x9460, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \
|
||||
{0x1002, 0x9462, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \
|
||||
{0x1002, 0x946A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
|
||||
{0x1002, 0x946B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
|
||||
{0x1002, 0x947A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
|
||||
|
|
|
@ -496,6 +496,16 @@ typedef struct {
|
|||
#define DRM_RADEON_SETPARAM 0x19
|
||||
#define DRM_RADEON_SURF_ALLOC 0x1a
|
||||
#define DRM_RADEON_SURF_FREE 0x1b
|
||||
/* KMS ioctl */
|
||||
#define DRM_RADEON_GEM_INFO 0x1c
|
||||
#define DRM_RADEON_GEM_CREATE 0x1d
|
||||
#define DRM_RADEON_GEM_MMAP 0x1e
|
||||
#define DRM_RADEON_GEM_PREAD 0x21
|
||||
#define DRM_RADEON_GEM_PWRITE 0x22
|
||||
#define DRM_RADEON_GEM_SET_DOMAIN 0x23
|
||||
#define DRM_RADEON_GEM_WAIT_IDLE 0x24
|
||||
#define DRM_RADEON_CS 0x26
|
||||
#define DRM_RADEON_INFO 0x27
|
||||
|
||||
#define DRM_IOCTL_RADEON_CP_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CP_INIT, drm_radeon_init_t)
|
||||
#define DRM_IOCTL_RADEON_CP_START DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_CP_START)
|
||||
|
@ -524,6 +534,17 @@ typedef struct {
|
|||
#define DRM_IOCTL_RADEON_SETPARAM DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_SETPARAM, drm_radeon_setparam_t)
|
||||
#define DRM_IOCTL_RADEON_SURF_ALLOC DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_SURF_ALLOC, drm_radeon_surface_alloc_t)
|
||||
#define DRM_IOCTL_RADEON_SURF_FREE DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_SURF_FREE, drm_radeon_surface_free_t)
|
||||
/* KMS */
|
||||
#define DRM_IOCTL_RADEON_GEM_INFO DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_INFO, struct drm_radeon_gem_info)
|
||||
#define DRM_IOCTL_RADEON_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_CREATE, struct drm_radeon_gem_create)
|
||||
#define DRM_IOCTL_RADEON_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_MMAP, struct drm_radeon_gem_mmap)
|
||||
#define DRM_IOCTL_RADEON_GEM_PREAD DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_PREAD, struct drm_radeon_gem_pread)
|
||||
#define DRM_IOCTL_RADEON_GEM_PWRITE DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_PWRITE, struct drm_radeon_gem_pwrite)
|
||||
#define DRM_IOCTL_RADEON_GEM_SET_DOMAIN DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_SET_DOMAIN, struct drm_radeon_gem_set_domain)
|
||||
#define DRM_IOCTL_RADEON_GEM_WAIT_IDLE DRM_IOW(DRM_COMMAND_BASE + DRM_RADEON_GEM_WAIT_IDLE, struct drm_radeon_gem_wait_idle)
|
||||
#define DRM_IOCTL_RADEON_CS DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_CS, struct drm_radeon_cs)
|
||||
#define DRM_IOCTL_RADEON_INFO DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_INFO, struct drm_radeon_info)
|
||||
|
||||
|
||||
typedef struct drm_radeon_init {
|
||||
enum {
|
||||
|
@ -682,6 +703,7 @@ typedef struct drm_radeon_indirect {
|
|||
#define RADEON_PARAM_VBLANK_CRTC 13 /* VBLANK CRTC */
|
||||
#define RADEON_PARAM_FB_LOCATION 14 /* FB location */
|
||||
#define RADEON_PARAM_NUM_GB_PIPES 15 /* num GB pipes */
|
||||
#define RADEON_PARAM_DEVICE_ID 16
|
||||
|
||||
typedef struct drm_radeon_getparam {
|
||||
int param;
|
||||
|
@ -751,4 +773,112 @@ typedef struct drm_radeon_surface_free {
|
|||
#define DRM_RADEON_VBLANK_CRTC1 1
|
||||
#define DRM_RADEON_VBLANK_CRTC2 2
|
||||
|
||||
/*
|
||||
* Kernel modesetting world below.
|
||||
*/
|
||||
#define RADEON_GEM_DOMAIN_CPU 0x1
|
||||
#define RADEON_GEM_DOMAIN_GTT 0x2
|
||||
#define RADEON_GEM_DOMAIN_VRAM 0x4
|
||||
|
||||
struct drm_radeon_gem_info {
|
||||
uint64_t gart_size;
|
||||
uint64_t vram_size;
|
||||
uint64_t vram_visible;
|
||||
};
|
||||
|
||||
#define RADEON_GEM_NO_BACKING_STORE 1
|
||||
|
||||
struct drm_radeon_gem_create {
|
||||
uint64_t size;
|
||||
uint64_t alignment;
|
||||
uint32_t handle;
|
||||
uint32_t initial_domain;
|
||||
uint32_t flags;
|
||||
};
|
||||
|
||||
struct drm_radeon_gem_mmap {
|
||||
uint32_t handle;
|
||||
uint32_t pad;
|
||||
uint64_t offset;
|
||||
uint64_t size;
|
||||
uint64_t addr_ptr;
|
||||
};
|
||||
|
||||
struct drm_radeon_gem_set_domain {
|
||||
uint32_t handle;
|
||||
uint32_t read_domains;
|
||||
uint32_t write_domain;
|
||||
};
|
||||
|
||||
struct drm_radeon_gem_wait_idle {
|
||||
uint32_t handle;
|
||||
uint32_t pad;
|
||||
};
|
||||
|
||||
struct drm_radeon_gem_busy {
|
||||
uint32_t handle;
|
||||
uint32_t busy;
|
||||
};
|
||||
|
||||
struct drm_radeon_gem_pread {
|
||||
/** Handle for the object being read. */
|
||||
uint32_t handle;
|
||||
uint32_t pad;
|
||||
/** Offset into the object to read from */
|
||||
uint64_t offset;
|
||||
/** Length of data to read */
|
||||
uint64_t size;
|
||||
/** Pointer to write the data into. */
|
||||
/* void *, but pointers are not 32/64 compatible */
|
||||
uint64_t data_ptr;
|
||||
};
|
||||
|
||||
struct drm_radeon_gem_pwrite {
|
||||
/** Handle for the object being written to. */
|
||||
uint32_t handle;
|
||||
uint32_t pad;
|
||||
/** Offset into the object to write to */
|
||||
uint64_t offset;
|
||||
/** Length of data to write */
|
||||
uint64_t size;
|
||||
/** Pointer to read the data from. */
|
||||
/* void *, but pointers are not 32/64 compatible */
|
||||
uint64_t data_ptr;
|
||||
};
|
||||
|
||||
#define RADEON_CHUNK_ID_RELOCS 0x01
|
||||
#define RADEON_CHUNK_ID_IB 0x02
|
||||
|
||||
struct drm_radeon_cs_chunk {
|
||||
uint32_t chunk_id;
|
||||
uint32_t length_dw;
|
||||
uint64_t chunk_data;
|
||||
};
|
||||
|
||||
struct drm_radeon_cs_reloc {
|
||||
uint32_t handle;
|
||||
uint32_t read_domains;
|
||||
uint32_t write_domain;
|
||||
uint32_t flags;
|
||||
};
|
||||
|
||||
struct drm_radeon_cs {
|
||||
uint32_t num_chunks;
|
||||
uint32_t cs_id;
|
||||
/* this points to uint64_t * which point to cs chunks */
|
||||
uint64_t chunks;
|
||||
/* updates to the limits after this CS ioctl */
|
||||
uint64_t gart_limit;
|
||||
uint64_t vram_limit;
|
||||
};
|
||||
|
||||
#define RADEON_INFO_DEVICE_ID 0x00
|
||||
#define RADEON_INFO_NUM_GB_PIPES 0x01
|
||||
|
||||
struct drm_radeon_info {
|
||||
uint32_t request;
|
||||
uint32_t pad;
|
||||
uint64_t value;
|
||||
};
|
||||
|
||||
#endif
|
||||
|
|
|
@ -0,0 +1,618 @@
|
|||
/**************************************************************************
|
||||
*
|
||||
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
/*
|
||||
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
|
||||
*/
|
||||
|
||||
#ifndef _TTM_BO_API_H_
|
||||
#define _TTM_BO_API_H_
|
||||
|
||||
#include "drm_hashtab.h"
|
||||
#include <linux/kref.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/wait.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/rbtree.h>
|
||||
#include <linux/bitmap.h>
|
||||
|
||||
struct ttm_bo_device;
|
||||
|
||||
struct drm_mm_node;
|
||||
|
||||
/**
|
||||
* struct ttm_mem_reg
|
||||
*
|
||||
* @mm_node: Memory manager node.
|
||||
* @size: Requested size of memory region.
|
||||
* @num_pages: Actual size of memory region in pages.
|
||||
* @page_alignment: Page alignment.
|
||||
* @placement: Placement flags.
|
||||
*
|
||||
* Structure indicating the placement and space resources used by a
|
||||
* buffer object.
|
||||
*/
|
||||
|
||||
struct ttm_mem_reg {
|
||||
struct drm_mm_node *mm_node;
|
||||
unsigned long size;
|
||||
unsigned long num_pages;
|
||||
uint32_t page_alignment;
|
||||
uint32_t mem_type;
|
||||
uint32_t placement;
|
||||
};
|
||||
|
||||
/**
|
||||
* enum ttm_bo_type
|
||||
*
|
||||
* @ttm_bo_type_device: These are 'normal' buffers that can
|
||||
* be mmapped by user space. Each of these bos occupy a slot in the
|
||||
* device address space, that can be used for normal vm operations.
|
||||
*
|
||||
* @ttm_bo_type_user: These are user-space memory areas that are made
|
||||
* available to the GPU by mapping the buffer pages into the GPU aperture
|
||||
* space. These buffers cannot be mmaped from the device address space.
|
||||
*
|
||||
* @ttm_bo_type_kernel: These buffers are like ttm_bo_type_device buffers,
|
||||
* but they cannot be accessed from user-space. For kernel-only use.
|
||||
*/
|
||||
|
||||
enum ttm_bo_type {
|
||||
ttm_bo_type_device,
|
||||
ttm_bo_type_user,
|
||||
ttm_bo_type_kernel
|
||||
};
|
||||
|
||||
struct ttm_tt;
|
||||
|
||||
/**
|
||||
* struct ttm_buffer_object
|
||||
*
|
||||
* @bdev: Pointer to the buffer object device structure.
|
||||
* @buffer_start: The virtual user-space start address of ttm_bo_type_user
|
||||
* buffers.
|
||||
* @type: The bo type.
|
||||
* @destroy: Destruction function. If NULL, kfree is used.
|
||||
* @num_pages: Actual number of pages.
|
||||
* @addr_space_offset: Address space offset.
|
||||
* @acc_size: Accounted size for this object.
|
||||
* @kref: Reference count of this buffer object. When this refcount reaches
|
||||
* zero, the object is put on the delayed delete list.
|
||||
* @list_kref: List reference count of this buffer object. This member is
|
||||
* used to avoid destruction while the buffer object is still on a list.
|
||||
* Lru lists may keep one refcount, the delayed delete list, and kref != 0
|
||||
* keeps one refcount. When this refcount reaches zero,
|
||||
* the object is destroyed.
|
||||
* @event_queue: Queue for processes waiting on buffer object status change.
|
||||
* @lock: spinlock protecting mostly synchronization members.
|
||||
* @proposed_placement: Proposed placement for the buffer. Changed only by the
|
||||
* creator prior to validation as opposed to bo->mem.proposed_flags which is
|
||||
* changed by the implementation prior to a buffer move if it wants to outsmart
|
||||
* the buffer creator / user. This latter happens, for example, at eviction.
|
||||
* @mem: structure describing current placement.
|
||||
* @persistant_swap_storage: Usually the swap storage is deleted for buffers
|
||||
* pinned in physical memory. If this behaviour is not desired, this member
|
||||
* holds a pointer to a persistant shmem object.
|
||||
* @ttm: TTM structure holding system pages.
|
||||
* @evicted: Whether the object was evicted without user-space knowing.
|
||||
* @cpu_writes: For synchronization. Number of cpu writers.
|
||||
* @lru: List head for the lru list.
|
||||
* @ddestroy: List head for the delayed destroy list.
|
||||
* @swap: List head for swap LRU list.
|
||||
* @val_seq: Sequence of the validation holding the @reserved lock.
|
||||
* Used to avoid starvation when many processes compete to validate the
|
||||
* buffer. This member is protected by the bo_device::lru_lock.
|
||||
* @seq_valid: The value of @val_seq is valid. This value is protected by
|
||||
* the bo_device::lru_lock.
|
||||
* @reserved: Deadlock-free lock used for synchronization state transitions.
|
||||
* @sync_obj_arg: Opaque argument to synchronization object function.
|
||||
* @sync_obj: Pointer to a synchronization object.
|
||||
* @priv_flags: Flags describing buffer object internal state.
|
||||
* @vm_rb: Rb node for the vm rb tree.
|
||||
* @vm_node: Address space manager node.
|
||||
* @offset: The current GPU offset, which can have different meanings
|
||||
* depending on the memory type. For SYSTEM type memory, it should be 0.
|
||||
* @cur_placement: Hint of current placement.
|
||||
*
|
||||
* Base class for TTM buffer object, that deals with data placement and CPU
|
||||
* mappings. GPU mappings are really up to the driver, but for simpler GPUs
|
||||
* the driver can usually use the placement offset @offset directly as the
|
||||
* GPU virtual address. For drivers implementing multiple
|
||||
* GPU memory manager contexts, the driver should manage the address space
|
||||
* in these contexts separately and use these objects to get the correct
|
||||
* placement and caching for these GPU maps. This makes it possible to use
|
||||
* these objects for even quite elaborate memory management schemes.
|
||||
* The destroy member, the API visibility of this object makes it possible
|
||||
* to derive driver specific types.
|
||||
*/
|
||||
|
||||
struct ttm_buffer_object {
|
||||
/**
|
||||
* Members constant at init.
|
||||
*/
|
||||
|
||||
struct ttm_bo_device *bdev;
|
||||
unsigned long buffer_start;
|
||||
enum ttm_bo_type type;
|
||||
void (*destroy) (struct ttm_buffer_object *);
|
||||
unsigned long num_pages;
|
||||
uint64_t addr_space_offset;
|
||||
size_t acc_size;
|
||||
|
||||
/**
|
||||
* Members not needing protection.
|
||||
*/
|
||||
|
||||
struct kref kref;
|
||||
struct kref list_kref;
|
||||
wait_queue_head_t event_queue;
|
||||
spinlock_t lock;
|
||||
|
||||
/**
|
||||
* Members protected by the bo::reserved lock.
|
||||
*/
|
||||
|
||||
uint32_t proposed_placement;
|
||||
struct ttm_mem_reg mem;
|
||||
struct file *persistant_swap_storage;
|
||||
struct ttm_tt *ttm;
|
||||
bool evicted;
|
||||
|
||||
/**
|
||||
* Members protected by the bo::reserved lock only when written to.
|
||||
*/
|
||||
|
||||
atomic_t cpu_writers;
|
||||
|
||||
/**
|
||||
* Members protected by the bdev::lru_lock.
|
||||
*/
|
||||
|
||||
struct list_head lru;
|
||||
struct list_head ddestroy;
|
||||
struct list_head swap;
|
||||
uint32_t val_seq;
|
||||
bool seq_valid;
|
||||
|
||||
/**
|
||||
* Members protected by the bdev::lru_lock
|
||||
* only when written to.
|
||||
*/
|
||||
|
||||
atomic_t reserved;
|
||||
|
||||
|
||||
/**
|
||||
* Members protected by the bo::lock
|
||||
*/
|
||||
|
||||
void *sync_obj_arg;
|
||||
void *sync_obj;
|
||||
unsigned long priv_flags;
|
||||
|
||||
/**
|
||||
* Members protected by the bdev::vm_lock
|
||||
*/
|
||||
|
||||
struct rb_node vm_rb;
|
||||
struct drm_mm_node *vm_node;
|
||||
|
||||
|
||||
/**
|
||||
* Special members that are protected by the reserve lock
|
||||
* and the bo::lock when written to. Can be read with
|
||||
* either of these locks held.
|
||||
*/
|
||||
|
||||
unsigned long offset;
|
||||
uint32_t cur_placement;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct ttm_bo_kmap_obj
|
||||
*
|
||||
* @virtual: The current kernel virtual address.
|
||||
* @page: The page when kmap'ing a single page.
|
||||
* @bo_kmap_type: Type of bo_kmap.
|
||||
*
|
||||
* Object describing a kernel mapping. Since a TTM bo may be located
|
||||
* in various memory types with various caching policies, the
|
||||
* mapping can either be an ioremap, a vmap, a kmap or part of a
|
||||
* premapped region.
|
||||
*/
|
||||
|
||||
struct ttm_bo_kmap_obj {
|
||||
void *virtual;
|
||||
struct page *page;
|
||||
enum {
|
||||
ttm_bo_map_iomap,
|
||||
ttm_bo_map_vmap,
|
||||
ttm_bo_map_kmap,
|
||||
ttm_bo_map_premapped,
|
||||
} bo_kmap_type;
|
||||
};
|
||||
|
||||
/**
|
||||
* ttm_bo_reference - reference a struct ttm_buffer_object
|
||||
*
|
||||
* @bo: The buffer object.
|
||||
*
|
||||
* Returns a refcounted pointer to a buffer object.
|
||||
*/
|
||||
|
||||
static inline struct ttm_buffer_object *
|
||||
ttm_bo_reference(struct ttm_buffer_object *bo)
|
||||
{
|
||||
kref_get(&bo->kref);
|
||||
return bo;
|
||||
}
|
||||
|
||||
/**
|
||||
* ttm_bo_wait - wait for buffer idle.
|
||||
*
|
||||
* @bo: The buffer object.
|
||||
* @interruptible: Use interruptible wait.
|
||||
* @no_wait: Return immediately if buffer is busy.
|
||||
*
|
||||
* This function must be called with the bo::mutex held, and makes
|
||||
* sure any previous rendering to the buffer is completed.
|
||||
* Note: It might be necessary to block validations before the
|
||||
* wait by reserving the buffer.
|
||||
* Returns -EBUSY if no_wait is true and the buffer is busy.
|
||||
* Returns -ERESTART if interrupted by a signal.
|
||||
*/
|
||||
extern int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy,
|
||||
bool interruptible, bool no_wait);
|
||||
/**
|
||||
* ttm_buffer_object_validate
|
||||
*
|
||||
* @bo: The buffer object.
|
||||
* @proposed_placement: Proposed_placement for the buffer object.
|
||||
* @interruptible: Sleep interruptible if sleeping.
|
||||
* @no_wait: Return immediately if the buffer is busy.
|
||||
*
|
||||
* Changes placement and caching policy of the buffer object
|
||||
* according to bo::proposed_flags.
|
||||
* Returns
|
||||
* -EINVAL on invalid proposed_flags.
|
||||
* -ENOMEM on out-of-memory condition.
|
||||
* -EBUSY if no_wait is true and buffer busy.
|
||||
* -ERESTART if interrupted by a signal.
|
||||
*/
|
||||
extern int ttm_buffer_object_validate(struct ttm_buffer_object *bo,
|
||||
uint32_t proposed_placement,
|
||||
bool interruptible, bool no_wait);
|
||||
/**
|
||||
* ttm_bo_unref
|
||||
*
|
||||
* @bo: The buffer object.
|
||||
*
|
||||
* Unreference and clear a pointer to a buffer object.
|
||||
*/
|
||||
extern void ttm_bo_unref(struct ttm_buffer_object **bo);
|
||||
|
||||
/**
|
||||
* ttm_bo_synccpu_write_grab
|
||||
*
|
||||
* @bo: The buffer object:
|
||||
* @no_wait: Return immediately if buffer is busy.
|
||||
*
|
||||
* Synchronizes a buffer object for CPU RW access. This means
|
||||
* blocking command submission that affects the buffer and
|
||||
* waiting for buffer idle. This lock is recursive.
|
||||
* Returns
|
||||
* -EBUSY if the buffer is busy and no_wait is true.
|
||||
* -ERESTART if interrupted by a signal.
|
||||
*/
|
||||
|
||||
extern int
|
||||
ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait);
|
||||
/**
|
||||
* ttm_bo_synccpu_write_release:
|
||||
*
|
||||
* @bo : The buffer object.
|
||||
*
|
||||
* Releases a synccpu lock.
|
||||
*/
|
||||
extern void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo);
|
||||
|
||||
/**
|
||||
* ttm_buffer_object_init
|
||||
*
|
||||
* @bdev: Pointer to a ttm_bo_device struct.
|
||||
* @bo: Pointer to a ttm_buffer_object to be initialized.
|
||||
* @size: Requested size of buffer object.
|
||||
* @type: Requested type of buffer object.
|
||||
* @flags: Initial placement flags.
|
||||
* @page_alignment: Data alignment in pages.
|
||||
* @buffer_start: Virtual address of user space data backing a
|
||||
* user buffer object.
|
||||
* @interruptible: If needing to sleep to wait for GPU resources,
|
||||
* sleep interruptible.
|
||||
* @persistant_swap_storage: Usually the swap storage is deleted for buffers
|
||||
* pinned in physical memory. If this behaviour is not desired, this member
|
||||
* holds a pointer to a persistant shmem object. Typically, this would
|
||||
* point to the shmem object backing a GEM object if TTM is used to back a
|
||||
* GEM user interface.
|
||||
* @acc_size: Accounted size for this object.
|
||||
* @destroy: Destroy function. Use NULL for kfree().
|
||||
*
|
||||
* This function initializes a pre-allocated struct ttm_buffer_object.
|
||||
* As this object may be part of a larger structure, this function,
|
||||
* together with the @destroy function,
|
||||
* enables driver-specific objects derived from a ttm_buffer_object.
|
||||
* On successful return, the object kref and list_kref are set to 1.
|
||||
* Returns
|
||||
* -ENOMEM: Out of memory.
|
||||
* -EINVAL: Invalid placement flags.
|
||||
* -ERESTART: Interrupted by signal while sleeping waiting for resources.
|
||||
*/
|
||||
|
||||
extern int ttm_buffer_object_init(struct ttm_bo_device *bdev,
|
||||
struct ttm_buffer_object *bo,
|
||||
unsigned long size,
|
||||
enum ttm_bo_type type,
|
||||
uint32_t flags,
|
||||
uint32_t page_alignment,
|
||||
unsigned long buffer_start,
|
||||
bool interrubtible,
|
||||
struct file *persistant_swap_storage,
|
||||
size_t acc_size,
|
||||
void (*destroy) (struct ttm_buffer_object *));
|
||||
/**
|
||||
* ttm_bo_synccpu_object_init
|
||||
*
|
||||
* @bdev: Pointer to a ttm_bo_device struct.
|
||||
* @bo: Pointer to a ttm_buffer_object to be initialized.
|
||||
* @size: Requested size of buffer object.
|
||||
* @type: Requested type of buffer object.
|
||||
* @flags: Initial placement flags.
|
||||
* @page_alignment: Data alignment in pages.
|
||||
* @buffer_start: Virtual address of user space data backing a
|
||||
* user buffer object.
|
||||
* @interruptible: If needing to sleep while waiting for GPU resources,
|
||||
* sleep interruptible.
|
||||
* @persistant_swap_storage: Usually the swap storage is deleted for buffers
|
||||
* pinned in physical memory. If this behaviour is not desired, this member
|
||||
* holds a pointer to a persistant shmem object. Typically, this would
|
||||
* point to the shmem object backing a GEM object if TTM is used to back a
|
||||
* GEM user interface.
|
||||
* @p_bo: On successful completion *p_bo points to the created object.
|
||||
*
|
||||
* This function allocates a ttm_buffer_object, and then calls
|
||||
* ttm_buffer_object_init on that object.
|
||||
* The destroy function is set to kfree().
|
||||
* Returns
|
||||
* -ENOMEM: Out of memory.
|
||||
* -EINVAL: Invalid placement flags.
|
||||
* -ERESTART: Interrupted by signal while waiting for resources.
|
||||
*/
|
||||
|
||||
extern int ttm_buffer_object_create(struct ttm_bo_device *bdev,
|
||||
unsigned long size,
|
||||
enum ttm_bo_type type,
|
||||
uint32_t flags,
|
||||
uint32_t page_alignment,
|
||||
unsigned long buffer_start,
|
||||
bool interruptible,
|
||||
struct file *persistant_swap_storage,
|
||||
struct ttm_buffer_object **p_bo);
|
||||
|
||||
/**
|
||||
* ttm_bo_check_placement
|
||||
*
|
||||
* @bo: the buffer object.
|
||||
* @set_flags: placement flags to set.
|
||||
* @clr_flags: placement flags to clear.
|
||||
*
|
||||
* Performs minimal validity checking on an intended change of
|
||||
* placement flags.
|
||||
* Returns
|
||||
* -EINVAL: Intended change is invalid or not allowed.
|
||||
*/
|
||||
|
||||
extern int ttm_bo_check_placement(struct ttm_buffer_object *bo,
|
||||
uint32_t set_flags, uint32_t clr_flags);
|
||||
|
||||
/**
|
||||
* ttm_bo_init_mm
|
||||
*
|
||||
* @bdev: Pointer to a ttm_bo_device struct.
|
||||
* @mem_type: The memory type.
|
||||
* @p_offset: offset for managed area in pages.
|
||||
* @p_size: size managed area in pages.
|
||||
*
|
||||
* Initialize a manager for a given memory type.
|
||||
* Note: if part of driver firstopen, it must be protected from a
|
||||
* potentially racing lastclose.
|
||||
* Returns:
|
||||
* -EINVAL: invalid size or memory type.
|
||||
* -ENOMEM: Not enough memory.
|
||||
* May also return driver-specified errors.
|
||||
*/
|
||||
|
||||
extern int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
|
||||
unsigned long p_offset, unsigned long p_size);
|
||||
/**
|
||||
* ttm_bo_clean_mm
|
||||
*
|
||||
* @bdev: Pointer to a ttm_bo_device struct.
|
||||
* @mem_type: The memory type.
|
||||
*
|
||||
* Take down a manager for a given memory type after first walking
|
||||
* the LRU list to evict any buffers left alive.
|
||||
*
|
||||
* Normally, this function is part of lastclose() or unload(), and at that
|
||||
* point there shouldn't be any buffers left created by user-space, since
|
||||
* there should've been removed by the file descriptor release() method.
|
||||
* However, before this function is run, make sure to signal all sync objects,
|
||||
* and verify that the delayed delete queue is empty. The driver must also
|
||||
* make sure that there are no NO_EVICT buffers present in this memory type
|
||||
* when the call is made.
|
||||
*
|
||||
* If this function is part of a VT switch, the caller must make sure that
|
||||
* there are no appications currently validating buffers before this
|
||||
* function is called. The caller can do that by first taking the
|
||||
* struct ttm_bo_device::ttm_lock in write mode.
|
||||
*
|
||||
* Returns:
|
||||
* -EINVAL: invalid or uninitialized memory type.
|
||||
* -EBUSY: There are still buffers left in this memory type.
|
||||
*/
|
||||
|
||||
extern int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type);
|
||||
|
||||
/**
|
||||
* ttm_bo_evict_mm
|
||||
*
|
||||
* @bdev: Pointer to a ttm_bo_device struct.
|
||||
* @mem_type: The memory type.
|
||||
*
|
||||
* Evicts all buffers on the lru list of the memory type.
|
||||
* This is normally part of a VT switch or an
|
||||
* out-of-memory-space-due-to-fragmentation handler.
|
||||
* The caller must make sure that there are no other processes
|
||||
* currently validating buffers, and can do that by taking the
|
||||
* struct ttm_bo_device::ttm_lock in write mode.
|
||||
*
|
||||
* Returns:
|
||||
* -EINVAL: Invalid or uninitialized memory type.
|
||||
* -ERESTART: The call was interrupted by a signal while waiting to
|
||||
* evict a buffer.
|
||||
*/
|
||||
|
||||
extern int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type);
|
||||
|
||||
/**
|
||||
* ttm_kmap_obj_virtual
|
||||
*
|
||||
* @map: A struct ttm_bo_kmap_obj returned from ttm_bo_kmap.
|
||||
* @is_iomem: Pointer to an integer that on return indicates 1 if the
|
||||
* virtual map is io memory, 0 if normal memory.
|
||||
*
|
||||
* Returns the virtual address of a buffer object area mapped by ttm_bo_kmap.
|
||||
* If *is_iomem is 1 on return, the virtual address points to an io memory area,
|
||||
* that should strictly be accessed by the iowriteXX() and similar functions.
|
||||
*/
|
||||
|
||||
static inline void *ttm_kmap_obj_virtual(struct ttm_bo_kmap_obj *map,
|
||||
bool *is_iomem)
|
||||
{
|
||||
*is_iomem = (map->bo_kmap_type == ttm_bo_map_iomap ||
|
||||
map->bo_kmap_type == ttm_bo_map_premapped);
|
||||
return map->virtual;
|
||||
}
|
||||
|
||||
/**
|
||||
* ttm_bo_kmap
|
||||
*
|
||||
* @bo: The buffer object.
|
||||
* @start_page: The first page to map.
|
||||
* @num_pages: Number of pages to map.
|
||||
* @map: pointer to a struct ttm_bo_kmap_obj representing the map.
|
||||
*
|
||||
* Sets up a kernel virtual mapping, using ioremap, vmap or kmap to the
|
||||
* data in the buffer object. The ttm_kmap_obj_virtual function can then be
|
||||
* used to obtain a virtual address to the data.
|
||||
*
|
||||
* Returns
|
||||
* -ENOMEM: Out of memory.
|
||||
* -EINVAL: Invalid range.
|
||||
*/
|
||||
|
||||
extern int ttm_bo_kmap(struct ttm_buffer_object *bo, unsigned long start_page,
|
||||
unsigned long num_pages, struct ttm_bo_kmap_obj *map);
|
||||
|
||||
/**
|
||||
* ttm_bo_kunmap
|
||||
*
|
||||
* @map: Object describing the map to unmap.
|
||||
*
|
||||
* Unmaps a kernel map set up by ttm_bo_kmap.
|
||||
*/
|
||||
|
||||
extern void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map);
|
||||
|
||||
#if 0
|
||||
#endif
|
||||
|
||||
/**
|
||||
* ttm_fbdev_mmap - mmap fbdev memory backed by a ttm buffer object.
|
||||
*
|
||||
* @vma: vma as input from the fbdev mmap method.
|
||||
* @bo: The bo backing the address space. The address space will
|
||||
* have the same size as the bo, and start at offset 0.
|
||||
*
|
||||
* This function is intended to be called by the fbdev mmap method
|
||||
* if the fbdev address space is to be backed by a bo.
|
||||
*/
|
||||
|
||||
extern int ttm_fbdev_mmap(struct vm_area_struct *vma,
|
||||
struct ttm_buffer_object *bo);
|
||||
|
||||
/**
|
||||
* ttm_bo_mmap - mmap out of the ttm device address space.
|
||||
*
|
||||
* @filp: filp as input from the mmap method.
|
||||
* @vma: vma as input from the mmap method.
|
||||
* @bdev: Pointer to the ttm_bo_device with the address space manager.
|
||||
*
|
||||
* This function is intended to be called by the device mmap method.
|
||||
* if the device address space is to be backed by the bo manager.
|
||||
*/
|
||||
|
||||
extern int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
|
||||
struct ttm_bo_device *bdev);
|
||||
|
||||
/**
|
||||
* ttm_bo_io
|
||||
*
|
||||
* @bdev: Pointer to the struct ttm_bo_device.
|
||||
* @filp: Pointer to the struct file attempting to read / write.
|
||||
* @wbuf: User-space pointer to address of buffer to write. NULL on read.
|
||||
* @rbuf: User-space pointer to address of buffer to read into.
|
||||
* Null on write.
|
||||
* @count: Number of bytes to read / write.
|
||||
* @f_pos: Pointer to current file position.
|
||||
* @write: 1 for read, 0 for write.
|
||||
*
|
||||
* This function implements read / write into ttm buffer objects, and is
|
||||
* intended to
|
||||
* be called from the fops::read and fops::write method.
|
||||
* Returns:
|
||||
* See man (2) write, man(2) read. In particular,
|
||||
* the function may return -EINTR if
|
||||
* interrupted by a signal.
|
||||
*/
|
||||
|
||||
extern ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
|
||||
const char __user *wbuf, char __user *rbuf,
|
||||
size_t count, loff_t *f_pos, bool write);
|
||||
|
||||
extern void ttm_bo_swapout_all(struct ttm_bo_device *bdev);
|
||||
|
||||
#endif
|
|
@ -0,0 +1,867 @@
|
|||
/**************************************************************************
|
||||
*
|
||||
* Copyright (c) 2006-2009 Vmware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
/*
|
||||
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
|
||||
*/
|
||||
#ifndef _TTM_BO_DRIVER_H_
|
||||
#define _TTM_BO_DRIVER_H_
|
||||
|
||||
#include "ttm/ttm_bo_api.h"
|
||||
#include "ttm/ttm_memory.h"
|
||||
#include "drm_mm.h"
|
||||
#include "linux/workqueue.h"
|
||||
#include "linux/fs.h"
|
||||
#include "linux/spinlock.h"
|
||||
|
||||
struct ttm_backend;
|
||||
|
||||
struct ttm_backend_func {
|
||||
/**
|
||||
* struct ttm_backend_func member populate
|
||||
*
|
||||
* @backend: Pointer to a struct ttm_backend.
|
||||
* @num_pages: Number of pages to populate.
|
||||
* @pages: Array of pointers to ttm pages.
|
||||
* @dummy_read_page: Page to be used instead of NULL pages in the
|
||||
* array @pages.
|
||||
*
|
||||
* Populate the backend with ttm pages. Depending on the backend,
|
||||
* it may or may not copy the @pages array.
|
||||
*/
|
||||
int (*populate) (struct ttm_backend *backend,
|
||||
unsigned long num_pages, struct page **pages,
|
||||
struct page *dummy_read_page);
|
||||
/**
|
||||
* struct ttm_backend_func member clear
|
||||
*
|
||||
* @backend: Pointer to a struct ttm_backend.
|
||||
*
|
||||
* This is an "unpopulate" function. Release all resources
|
||||
* allocated with populate.
|
||||
*/
|
||||
void (*clear) (struct ttm_backend *backend);
|
||||
|
||||
/**
|
||||
* struct ttm_backend_func member bind
|
||||
*
|
||||
* @backend: Pointer to a struct ttm_backend.
|
||||
* @bo_mem: Pointer to a struct ttm_mem_reg describing the
|
||||
* memory type and location for binding.
|
||||
*
|
||||
* Bind the backend pages into the aperture in the location
|
||||
* indicated by @bo_mem. This function should be able to handle
|
||||
* differences between aperture- and system page sizes.
|
||||
*/
|
||||
int (*bind) (struct ttm_backend *backend, struct ttm_mem_reg *bo_mem);
|
||||
|
||||
/**
|
||||
* struct ttm_backend_func member unbind
|
||||
*
|
||||
* @backend: Pointer to a struct ttm_backend.
|
||||
*
|
||||
* Unbind previously bound backend pages. This function should be
|
||||
* able to handle differences between aperture- and system page sizes.
|
||||
*/
|
||||
int (*unbind) (struct ttm_backend *backend);
|
||||
|
||||
/**
|
||||
* struct ttm_backend_func member destroy
|
||||
*
|
||||
* @backend: Pointer to a struct ttm_backend.
|
||||
*
|
||||
* Destroy the backend.
|
||||
*/
|
||||
void (*destroy) (struct ttm_backend *backend);
|
||||
};
|
||||
|
||||
/**
|
||||
* struct ttm_backend
|
||||
*
|
||||
* @bdev: Pointer to a struct ttm_bo_device.
|
||||
* @flags: For driver use.
|
||||
* @func: Pointer to a struct ttm_backend_func that describes
|
||||
* the backend methods.
|
||||
*
|
||||
*/
|
||||
|
||||
struct ttm_backend {
|
||||
struct ttm_bo_device *bdev;
|
||||
uint32_t flags;
|
||||
struct ttm_backend_func *func;
|
||||
};
|
||||
|
||||
#define TTM_PAGE_FLAG_VMALLOC (1 << 0)
|
||||
#define TTM_PAGE_FLAG_USER (1 << 1)
|
||||
#define TTM_PAGE_FLAG_USER_DIRTY (1 << 2)
|
||||
#define TTM_PAGE_FLAG_WRITE (1 << 3)
|
||||
#define TTM_PAGE_FLAG_SWAPPED (1 << 4)
|
||||
#define TTM_PAGE_FLAG_PERSISTANT_SWAP (1 << 5)
|
||||
#define TTM_PAGE_FLAG_ZERO_ALLOC (1 << 6)
|
||||
|
||||
enum ttm_caching_state {
|
||||
tt_uncached,
|
||||
tt_wc,
|
||||
tt_cached
|
||||
};
|
||||
|
||||
/**
|
||||
* struct ttm_tt
|
||||
*
|
||||
* @dummy_read_page: Page to map where the ttm_tt page array contains a NULL
|
||||
* pointer.
|
||||
* @pages: Array of pages backing the data.
|
||||
* @first_himem_page: Himem pages are put last in the page array, which
|
||||
* enables us to run caching attribute changes on only the first part
|
||||
* of the page array containing lomem pages. This is the index of the
|
||||
* first himem page.
|
||||
* @last_lomem_page: Index of the last lomem page in the page array.
|
||||
* @num_pages: Number of pages in the page array.
|
||||
* @bdev: Pointer to the current struct ttm_bo_device.
|
||||
* @be: Pointer to the ttm backend.
|
||||
* @tsk: The task for user ttm.
|
||||
* @start: virtual address for user ttm.
|
||||
* @swap_storage: Pointer to shmem struct file for swap storage.
|
||||
* @caching_state: The current caching state of the pages.
|
||||
* @state: The current binding state of the pages.
|
||||
*
|
||||
* This is a structure holding the pages, caching- and aperture binding
|
||||
* status for a buffer object that isn't backed by fixed (VRAM / AGP)
|
||||
* memory.
|
||||
*/
|
||||
|
||||
struct ttm_tt {
|
||||
struct page *dummy_read_page;
|
||||
struct page **pages;
|
||||
long first_himem_page;
|
||||
long last_lomem_page;
|
||||
uint32_t page_flags;
|
||||
unsigned long num_pages;
|
||||
struct ttm_bo_device *bdev;
|
||||
struct ttm_backend *be;
|
||||
struct task_struct *tsk;
|
||||
unsigned long start;
|
||||
struct file *swap_storage;
|
||||
enum ttm_caching_state caching_state;
|
||||
enum {
|
||||
tt_bound,
|
||||
tt_unbound,
|
||||
tt_unpopulated,
|
||||
} state;
|
||||
};
|
||||
|
||||
#define TTM_MEMTYPE_FLAG_FIXED (1 << 0) /* Fixed (on-card) PCI memory */
|
||||
#define TTM_MEMTYPE_FLAG_MAPPABLE (1 << 1) /* Memory mappable */
|
||||
#define TTM_MEMTYPE_FLAG_NEEDS_IOREMAP (1 << 2) /* Fixed memory needs ioremap
|
||||
before kernel access. */
|
||||
#define TTM_MEMTYPE_FLAG_CMA (1 << 3) /* Can't map aperture */
|
||||
|
||||
/**
|
||||
* struct ttm_mem_type_manager
|
||||
*
|
||||
* @has_type: The memory type has been initialized.
|
||||
* @use_type: The memory type is enabled.
|
||||
* @flags: TTM_MEMTYPE_XX flags identifying the traits of the memory
|
||||
* managed by this memory type.
|
||||
* @gpu_offset: If used, the GPU offset of the first managed page of
|
||||
* fixed memory or the first managed location in an aperture.
|
||||
* @io_offset: The io_offset of the first managed page of IO memory or
|
||||
* the first managed location in an aperture. For TTM_MEMTYPE_FLAG_CMA
|
||||
* memory, this should be set to NULL.
|
||||
* @io_size: The size of a managed IO region (fixed memory or aperture).
|
||||
* @io_addr: Virtual kernel address if the io region is pre-mapped. For
|
||||
* TTM_MEMTYPE_FLAG_NEEDS_IOREMAP there is no pre-mapped io map and
|
||||
* @io_addr should be set to NULL.
|
||||
* @size: Size of the managed region.
|
||||
* @available_caching: A mask of available caching types, TTM_PL_FLAG_XX,
|
||||
* as defined in ttm_placement_common.h
|
||||
* @default_caching: The default caching policy used for a buffer object
|
||||
* placed in this memory type if the user doesn't provide one.
|
||||
* @manager: The range manager used for this memory type. FIXME: If the aperture
|
||||
* has a page size different from the underlying system, the granularity
|
||||
* of this manager should take care of this. But the range allocating code
|
||||
* in ttm_bo.c needs to be modified for this.
|
||||
* @lru: The lru list for this memory type.
|
||||
*
|
||||
* This structure is used to identify and manage memory types for a device.
|
||||
* It's set up by the ttm_bo_driver::init_mem_type method.
|
||||
*/
|
||||
|
||||
struct ttm_mem_type_manager {
|
||||
|
||||
/*
|
||||
* No protection. Constant from start.
|
||||
*/
|
||||
|
||||
bool has_type;
|
||||
bool use_type;
|
||||
uint32_t flags;
|
||||
unsigned long gpu_offset;
|
||||
unsigned long io_offset;
|
||||
unsigned long io_size;
|
||||
void *io_addr;
|
||||
uint64_t size;
|
||||
uint32_t available_caching;
|
||||
uint32_t default_caching;
|
||||
|
||||
/*
|
||||
* Protected by the bdev->lru_lock.
|
||||
* TODO: Consider one lru_lock per ttm_mem_type_manager.
|
||||
* Plays ill with list removal, though.
|
||||
*/
|
||||
|
||||
struct drm_mm manager;
|
||||
struct list_head lru;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct ttm_bo_driver
|
||||
*
|
||||
* @mem_type_prio: Priority array of memory types to place a buffer object in
|
||||
* if it fits without evicting buffers from any of these memory types.
|
||||
* @mem_busy_prio: Priority array of memory types to place a buffer object in
|
||||
* if it needs to evict buffers to make room.
|
||||
* @num_mem_type_prio: Number of elements in the @mem_type_prio array.
|
||||
* @num_mem_busy_prio: Number of elements in the @num_mem_busy_prio array.
|
||||
* @create_ttm_backend_entry: Callback to create a struct ttm_backend.
|
||||
* @invalidate_caches: Callback to invalidate read caches when a buffer object
|
||||
* has been evicted.
|
||||
* @init_mem_type: Callback to initialize a struct ttm_mem_type_manager
|
||||
* structure.
|
||||
* @evict_flags: Callback to obtain placement flags when a buffer is evicted.
|
||||
* @move: Callback for a driver to hook in accelerated functions to
|
||||
* move a buffer.
|
||||
* If set to NULL, a potentially slow memcpy() move is used.
|
||||
* @sync_obj_signaled: See ttm_fence_api.h
|
||||
* @sync_obj_wait: See ttm_fence_api.h
|
||||
* @sync_obj_flush: See ttm_fence_api.h
|
||||
* @sync_obj_unref: See ttm_fence_api.h
|
||||
* @sync_obj_ref: See ttm_fence_api.h
|
||||
*/
|
||||
|
||||
struct ttm_bo_driver {
|
||||
const uint32_t *mem_type_prio;
|
||||
const uint32_t *mem_busy_prio;
|
||||
uint32_t num_mem_type_prio;
|
||||
uint32_t num_mem_busy_prio;
|
||||
|
||||
/**
|
||||
* struct ttm_bo_driver member create_ttm_backend_entry
|
||||
*
|
||||
* @bdev: The buffer object device.
|
||||
*
|
||||
* Create a driver specific struct ttm_backend.
|
||||
*/
|
||||
|
||||
struct ttm_backend *(*create_ttm_backend_entry)
|
||||
(struct ttm_bo_device *bdev);
|
||||
|
||||
/**
|
||||
* struct ttm_bo_driver member invalidate_caches
|
||||
*
|
||||
* @bdev: the buffer object device.
|
||||
* @flags: new placement of the rebound buffer object.
|
||||
*
|
||||
* A previosly evicted buffer has been rebound in a
|
||||
* potentially new location. Tell the driver that it might
|
||||
* consider invalidating read (texture) caches on the next command
|
||||
* submission as a consequence.
|
||||
*/
|
||||
|
||||
int (*invalidate_caches) (struct ttm_bo_device *bdev, uint32_t flags);
|
||||
int (*init_mem_type) (struct ttm_bo_device *bdev, uint32_t type,
|
||||
struct ttm_mem_type_manager *man);
|
||||
/**
|
||||
* struct ttm_bo_driver member evict_flags:
|
||||
*
|
||||
* @bo: the buffer object to be evicted
|
||||
*
|
||||
* Return the bo flags for a buffer which is not mapped to the hardware.
|
||||
* These will be placed in proposed_flags so that when the move is
|
||||
* finished, they'll end up in bo->mem.flags
|
||||
*/
|
||||
|
||||
uint32_t(*evict_flags) (struct ttm_buffer_object *bo);
|
||||
/**
|
||||
* struct ttm_bo_driver member move:
|
||||
*
|
||||
* @bo: the buffer to move
|
||||
* @evict: whether this motion is evicting the buffer from
|
||||
* the graphics address space
|
||||
* @interruptible: Use interruptible sleeps if possible when sleeping.
|
||||
* @no_wait: whether this should give up and return -EBUSY
|
||||
* if this move would require sleeping
|
||||
* @new_mem: the new memory region receiving the buffer
|
||||
*
|
||||
* Move a buffer between two memory regions.
|
||||
*/
|
||||
int (*move) (struct ttm_buffer_object *bo,
|
||||
bool evict, bool interruptible,
|
||||
bool no_wait, struct ttm_mem_reg *new_mem);
|
||||
|
||||
/**
|
||||
* struct ttm_bo_driver_member verify_access
|
||||
*
|
||||
* @bo: Pointer to a buffer object.
|
||||
* @filp: Pointer to a struct file trying to access the object.
|
||||
*
|
||||
* Called from the map / write / read methods to verify that the
|
||||
* caller is permitted to access the buffer object.
|
||||
* This member may be set to NULL, which will refuse this kind of
|
||||
* access for all buffer objects.
|
||||
* This function should return 0 if access is granted, -EPERM otherwise.
|
||||
*/
|
||||
int (*verify_access) (struct ttm_buffer_object *bo,
|
||||
struct file *filp);
|
||||
|
||||
/**
|
||||
* In case a driver writer dislikes the TTM fence objects,
|
||||
* the driver writer can replace those with sync objects of
|
||||
* his / her own. If it turns out that no driver writer is
|
||||
* using these. I suggest we remove these hooks and plug in
|
||||
* fences directly. The bo driver needs the following functionality:
|
||||
* See the corresponding functions in the fence object API
|
||||
* documentation.
|
||||
*/
|
||||
|
||||
bool (*sync_obj_signaled) (void *sync_obj, void *sync_arg);
|
||||
int (*sync_obj_wait) (void *sync_obj, void *sync_arg,
|
||||
bool lazy, bool interruptible);
|
||||
int (*sync_obj_flush) (void *sync_obj, void *sync_arg);
|
||||
void (*sync_obj_unref) (void **sync_obj);
|
||||
void *(*sync_obj_ref) (void *sync_obj);
|
||||
};
|
||||
|
||||
#define TTM_NUM_MEM_TYPES 8
|
||||
|
||||
#define TTM_BO_PRIV_FLAG_MOVING 0 /* Buffer object is moving and needs
|
||||
idling before CPU mapping */
|
||||
#define TTM_BO_PRIV_FLAG_MAX 1
|
||||
/**
|
||||
* struct ttm_bo_device - Buffer object driver device-specific data.
|
||||
*
|
||||
* @mem_glob: Pointer to a struct ttm_mem_global object for accounting.
|
||||
* @driver: Pointer to a struct ttm_bo_driver struct setup by the driver.
|
||||
* @count: Current number of buffer object.
|
||||
* @pages: Current number of pinned pages.
|
||||
* @dummy_read_page: Pointer to a dummy page used for mapping requests
|
||||
* of unpopulated pages.
|
||||
* @shrink: A shrink callback object used for buffre object swap.
|
||||
* @ttm_bo_extra_size: Extra size (sizeof(struct ttm_buffer_object) excluded)
|
||||
* used by a buffer object. This is excluding page arrays and backing pages.
|
||||
* @ttm_bo_size: This is @ttm_bo_extra_size + sizeof(struct ttm_buffer_object).
|
||||
* @man: An array of mem_type_managers.
|
||||
* @addr_space_mm: Range manager for the device address space.
|
||||
* lru_lock: Spinlock that protects the buffer+device lru lists and
|
||||
* ddestroy lists.
|
||||
* @nice_mode: Try nicely to wait for buffer idle when cleaning a manager.
|
||||
* If a GPU lockup has been detected, this is forced to 0.
|
||||
* @dev_mapping: A pointer to the struct address_space representing the
|
||||
* device address space.
|
||||
* @wq: Work queue structure for the delayed delete workqueue.
|
||||
*
|
||||
*/
|
||||
|
||||
struct ttm_bo_device {
|
||||
|
||||
/*
|
||||
* Constant after bo device init / atomic.
|
||||
*/
|
||||
|
||||
struct ttm_mem_global *mem_glob;
|
||||
struct ttm_bo_driver *driver;
|
||||
struct page *dummy_read_page;
|
||||
struct ttm_mem_shrink shrink;
|
||||
|
||||
size_t ttm_bo_extra_size;
|
||||
size_t ttm_bo_size;
|
||||
|
||||
rwlock_t vm_lock;
|
||||
/*
|
||||
* Protected by the vm lock.
|
||||
*/
|
||||
struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES];
|
||||
struct rb_root addr_space_rb;
|
||||
struct drm_mm addr_space_mm;
|
||||
|
||||
/*
|
||||
* Might want to change this to one lock per manager.
|
||||
*/
|
||||
spinlock_t lru_lock;
|
||||
/*
|
||||
* Protected by the lru lock.
|
||||
*/
|
||||
struct list_head ddestroy;
|
||||
struct list_head swap_lru;
|
||||
|
||||
/*
|
||||
* Protected by load / firstopen / lastclose /unload sync.
|
||||
*/
|
||||
|
||||
bool nice_mode;
|
||||
struct address_space *dev_mapping;
|
||||
|
||||
/*
|
||||
* Internal protection.
|
||||
*/
|
||||
|
||||
struct delayed_work wq;
|
||||
};
|
||||
|
||||
/**
|
||||
* ttm_flag_masked
|
||||
*
|
||||
* @old: Pointer to the result and original value.
|
||||
* @new: New value of bits.
|
||||
* @mask: Mask of bits to change.
|
||||
*
|
||||
* Convenience function to change a number of bits identified by a mask.
|
||||
*/
|
||||
|
||||
static inline uint32_t
|
||||
ttm_flag_masked(uint32_t *old, uint32_t new, uint32_t mask)
|
||||
{
|
||||
*old ^= (*old ^ new) & mask;
|
||||
return *old;
|
||||
}
|
||||
|
||||
/**
|
||||
* ttm_tt_create
|
||||
*
|
||||
* @bdev: pointer to a struct ttm_bo_device:
|
||||
* @size: Size of the data needed backing.
|
||||
* @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
|
||||
* @dummy_read_page: See struct ttm_bo_device.
|
||||
*
|
||||
* Create a struct ttm_tt to back data with system memory pages.
|
||||
* No pages are actually allocated.
|
||||
* Returns:
|
||||
* NULL: Out of memory.
|
||||
*/
|
||||
extern struct ttm_tt *ttm_tt_create(struct ttm_bo_device *bdev,
|
||||
unsigned long size,
|
||||
uint32_t page_flags,
|
||||
struct page *dummy_read_page);
|
||||
|
||||
/**
|
||||
* ttm_tt_set_user:
|
||||
*
|
||||
* @ttm: The struct ttm_tt to populate.
|
||||
* @tsk: A struct task_struct for which @start is a valid user-space address.
|
||||
* @start: A valid user-space address.
|
||||
* @num_pages: Size in pages of the user memory area.
|
||||
*
|
||||
* Populate a struct ttm_tt with a user-space memory area after first pinning
|
||||
* the pages backing it.
|
||||
* Returns:
|
||||
* !0: Error.
|
||||
*/
|
||||
|
||||
extern int ttm_tt_set_user(struct ttm_tt *ttm,
|
||||
struct task_struct *tsk,
|
||||
unsigned long start, unsigned long num_pages);
|
||||
|
||||
/**
|
||||
* ttm_ttm_bind:
|
||||
*
|
||||
* @ttm: The struct ttm_tt containing backing pages.
|
||||
* @bo_mem: The struct ttm_mem_reg identifying the binding location.
|
||||
*
|
||||
* Bind the pages of @ttm to an aperture location identified by @bo_mem
|
||||
*/
|
||||
extern int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem);
|
||||
|
||||
/**
|
||||
* ttm_ttm_destroy:
|
||||
*
|
||||
* @ttm: The struct ttm_tt.
|
||||
*
|
||||
* Unbind, unpopulate and destroy a struct ttm_tt.
|
||||
*/
|
||||
extern void ttm_tt_destroy(struct ttm_tt *ttm);
|
||||
|
||||
/**
|
||||
* ttm_ttm_unbind:
|
||||
*
|
||||
* @ttm: The struct ttm_tt.
|
||||
*
|
||||
* Unbind a struct ttm_tt.
|
||||
*/
|
||||
extern void ttm_tt_unbind(struct ttm_tt *ttm);
|
||||
|
||||
/**
|
||||
* ttm_ttm_destroy:
|
||||
*
|
||||
* @ttm: The struct ttm_tt.
|
||||
* @index: Index of the desired page.
|
||||
*
|
||||
* Return a pointer to the struct page backing @ttm at page
|
||||
* index @index. If the page is unpopulated, one will be allocated to
|
||||
* populate that index.
|
||||
*
|
||||
* Returns:
|
||||
* NULL on OOM.
|
||||
*/
|
||||
extern struct page *ttm_tt_get_page(struct ttm_tt *ttm, int index);
|
||||
|
||||
/**
|
||||
* ttm_tt_cache_flush:
|
||||
*
|
||||
* @pages: An array of pointers to struct page:s to flush.
|
||||
* @num_pages: Number of pages to flush.
|
||||
*
|
||||
* Flush the data of the indicated pages from the cpu caches.
|
||||
* This is used when changing caching attributes of the pages from
|
||||
* cache-coherent.
|
||||
*/
|
||||
extern void ttm_tt_cache_flush(struct page *pages[], unsigned long num_pages);
|
||||
|
||||
/**
|
||||
* ttm_tt_set_placement_caching:
|
||||
*
|
||||
* @ttm A struct ttm_tt the backing pages of which will change caching policy.
|
||||
* @placement: Flag indicating the desired caching policy.
|
||||
*
|
||||
* This function will change caching policy of any default kernel mappings of
|
||||
* the pages backing @ttm. If changing from cached to uncached or
|
||||
* write-combined,
|
||||
* all CPU caches will first be flushed to make sure the data of the pages
|
||||
* hit RAM. This function may be very costly as it involves global TLB
|
||||
* and cache flushes and potential page splitting / combining.
|
||||
*/
|
||||
extern int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement);
|
||||
extern int ttm_tt_swapout(struct ttm_tt *ttm,
|
||||
struct file *persistant_swap_storage);
|
||||
|
||||
/*
|
||||
* ttm_bo.c
|
||||
*/
|
||||
|
||||
/**
|
||||
* ttm_mem_reg_is_pci
|
||||
*
|
||||
* @bdev: Pointer to a struct ttm_bo_device.
|
||||
* @mem: A valid struct ttm_mem_reg.
|
||||
*
|
||||
* Returns true if the memory described by @mem is PCI memory,
|
||||
* false otherwise.
|
||||
*/
|
||||
extern bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev,
|
||||
struct ttm_mem_reg *mem);
|
||||
|
||||
/**
|
||||
* ttm_bo_mem_space
|
||||
*
|
||||
* @bo: Pointer to a struct ttm_buffer_object. the data of which
|
||||
* we want to allocate space for.
|
||||
* @proposed_placement: Proposed new placement for the buffer object.
|
||||
* @mem: A struct ttm_mem_reg.
|
||||
* @interruptible: Sleep interruptible when sliping.
|
||||
* @no_wait: Don't sleep waiting for space to become available.
|
||||
*
|
||||
* Allocate memory space for the buffer object pointed to by @bo, using
|
||||
* the placement flags in @mem, potentially evicting other idle buffer objects.
|
||||
* This function may sleep while waiting for space to become available.
|
||||
* Returns:
|
||||
* -EBUSY: No space available (only if no_wait == 1).
|
||||
* -ENOMEM: Could not allocate memory for the buffer object, either due to
|
||||
* fragmentation or concurrent allocators.
|
||||
* -ERESTART: An interruptible sleep was interrupted by a signal.
|
||||
*/
|
||||
extern int ttm_bo_mem_space(struct ttm_buffer_object *bo,
|
||||
uint32_t proposed_placement,
|
||||
struct ttm_mem_reg *mem,
|
||||
bool interruptible, bool no_wait);
|
||||
/**
|
||||
* ttm_bo_wait_for_cpu
|
||||
*
|
||||
* @bo: Pointer to a struct ttm_buffer_object.
|
||||
* @no_wait: Don't sleep while waiting.
|
||||
*
|
||||
* Wait until a buffer object is no longer sync'ed for CPU access.
|
||||
* Returns:
|
||||
* -EBUSY: Buffer object was sync'ed for CPU access. (only if no_wait == 1).
|
||||
* -ERESTART: An interruptible sleep was interrupted by a signal.
|
||||
*/
|
||||
|
||||
extern int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait);
|
||||
|
||||
/**
|
||||
* ttm_bo_pci_offset - Get the PCI offset for the buffer object memory.
|
||||
*
|
||||
* @bo Pointer to a struct ttm_buffer_object.
|
||||
* @bus_base On return the base of the PCI region
|
||||
* @bus_offset On return the byte offset into the PCI region
|
||||
* @bus_size On return the byte size of the buffer object or zero if
|
||||
* the buffer object memory is not accessible through a PCI region.
|
||||
*
|
||||
* Returns:
|
||||
* -EINVAL if the buffer object is currently not mappable.
|
||||
* 0 otherwise.
|
||||
*/
|
||||
|
||||
extern int ttm_bo_pci_offset(struct ttm_bo_device *bdev,
|
||||
struct ttm_mem_reg *mem,
|
||||
unsigned long *bus_base,
|
||||
unsigned long *bus_offset,
|
||||
unsigned long *bus_size);
|
||||
|
||||
extern int ttm_bo_device_release(struct ttm_bo_device *bdev);
|
||||
|
||||
/**
|
||||
* ttm_bo_device_init
|
||||
*
|
||||
* @bdev: A pointer to a struct ttm_bo_device to initialize.
|
||||
* @mem_global: A pointer to an initialized struct ttm_mem_global.
|
||||
* @driver: A pointer to a struct ttm_bo_driver set up by the caller.
|
||||
* @file_page_offset: Offset into the device address space that is available
|
||||
* for buffer data. This ensures compatibility with other users of the
|
||||
* address space.
|
||||
*
|
||||
* Initializes a struct ttm_bo_device:
|
||||
* Returns:
|
||||
* !0: Failure.
|
||||
*/
|
||||
extern int ttm_bo_device_init(struct ttm_bo_device *bdev,
|
||||
struct ttm_mem_global *mem_glob,
|
||||
struct ttm_bo_driver *driver,
|
||||
uint64_t file_page_offset);
|
||||
|
||||
/**
|
||||
* ttm_bo_reserve:
|
||||
*
|
||||
* @bo: A pointer to a struct ttm_buffer_object.
|
||||
* @interruptible: Sleep interruptible if waiting.
|
||||
* @no_wait: Don't sleep while trying to reserve, rather return -EBUSY.
|
||||
* @use_sequence: If @bo is already reserved, Only sleep waiting for
|
||||
* it to become unreserved if @sequence < (@bo)->sequence.
|
||||
*
|
||||
* Locks a buffer object for validation. (Or prevents other processes from
|
||||
* locking it for validation) and removes it from lru lists, while taking
|
||||
* a number of measures to prevent deadlocks.
|
||||
*
|
||||
* Deadlocks may occur when two processes try to reserve multiple buffers in
|
||||
* different order, either by will or as a result of a buffer being evicted
|
||||
* to make room for a buffer already reserved. (Buffers are reserved before
|
||||
* they are evicted). The following algorithm prevents such deadlocks from
|
||||
* occuring:
|
||||
* 1) Buffers are reserved with the lru spinlock held. Upon successful
|
||||
* reservation they are removed from the lru list. This stops a reserved buffer
|
||||
* from being evicted. However the lru spinlock is released between the time
|
||||
* a buffer is selected for eviction and the time it is reserved.
|
||||
* Therefore a check is made when a buffer is reserved for eviction, that it
|
||||
* is still the first buffer in the lru list, before it is removed from the
|
||||
* list. @check_lru == 1 forces this check. If it fails, the function returns
|
||||
* -EINVAL, and the caller should then choose a new buffer to evict and repeat
|
||||
* the procedure.
|
||||
* 2) Processes attempting to reserve multiple buffers other than for eviction,
|
||||
* (typically execbuf), should first obtain a unique 32-bit
|
||||
* validation sequence number,
|
||||
* and call this function with @use_sequence == 1 and @sequence == the unique
|
||||
* sequence number. If upon call of this function, the buffer object is already
|
||||
* reserved, the validation sequence is checked against the validation
|
||||
* sequence of the process currently reserving the buffer,
|
||||
* and if the current validation sequence is greater than that of the process
|
||||
* holding the reservation, the function returns -EAGAIN. Otherwise it sleeps
|
||||
* waiting for the buffer to become unreserved, after which it retries
|
||||
* reserving.
|
||||
* The caller should, when receiving an -EAGAIN error
|
||||
* release all its buffer reservations, wait for @bo to become unreserved, and
|
||||
* then rerun the validation with the same validation sequence. This procedure
|
||||
* will always guarantee that the process with the lowest validation sequence
|
||||
* will eventually succeed, preventing both deadlocks and starvation.
|
||||
*
|
||||
* Returns:
|
||||
* -EAGAIN: The reservation may cause a deadlock.
|
||||
* Release all buffer reservations, wait for @bo to become unreserved and
|
||||
* try again. (only if use_sequence == 1).
|
||||
* -ERESTART: A wait for the buffer to become unreserved was interrupted by
|
||||
* a signal. Release all buffer reservations and return to user-space.
|
||||
*/
|
||||
extern int ttm_bo_reserve(struct ttm_buffer_object *bo,
|
||||
bool interruptible,
|
||||
bool no_wait, bool use_sequence, uint32_t sequence);
|
||||
|
||||
/**
|
||||
* ttm_bo_unreserve
|
||||
*
|
||||
* @bo: A pointer to a struct ttm_buffer_object.
|
||||
*
|
||||
* Unreserve a previous reservation of @bo.
|
||||
*/
|
||||
extern void ttm_bo_unreserve(struct ttm_buffer_object *bo);
|
||||
|
||||
/**
|
||||
* ttm_bo_wait_unreserved
|
||||
*
|
||||
* @bo: A pointer to a struct ttm_buffer_object.
|
||||
*
|
||||
* Wait for a struct ttm_buffer_object to become unreserved.
|
||||
* This is typically used in the execbuf code to relax cpu-usage when
|
||||
* a potential deadlock condition backoff.
|
||||
*/
|
||||
extern int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo,
|
||||
bool interruptible);
|
||||
|
||||
/**
|
||||
* ttm_bo_block_reservation
|
||||
*
|
||||
* @bo: A pointer to a struct ttm_buffer_object.
|
||||
* @interruptible: Use interruptible sleep when waiting.
|
||||
* @no_wait: Don't sleep, but rather return -EBUSY.
|
||||
*
|
||||
* Block reservation for validation by simply reserving the buffer.
|
||||
* This is intended for single buffer use only without eviction,
|
||||
* and thus needs no deadlock protection.
|
||||
*
|
||||
* Returns:
|
||||
* -EBUSY: If no_wait == 1 and the buffer is already reserved.
|
||||
* -ERESTART: If interruptible == 1 and the process received a signal
|
||||
* while sleeping.
|
||||
*/
|
||||
extern int ttm_bo_block_reservation(struct ttm_buffer_object *bo,
|
||||
bool interruptible, bool no_wait);
|
||||
|
||||
/**
|
||||
* ttm_bo_unblock_reservation
|
||||
*
|
||||
* @bo: A pointer to a struct ttm_buffer_object.
|
||||
*
|
||||
* Unblocks reservation leaving lru lists untouched.
|
||||
*/
|
||||
extern void ttm_bo_unblock_reservation(struct ttm_buffer_object *bo);
|
||||
|
||||
/*
|
||||
* ttm_bo_util.c
|
||||
*/
|
||||
|
||||
/**
|
||||
* ttm_bo_move_ttm
|
||||
*
|
||||
* @bo: A pointer to a struct ttm_buffer_object.
|
||||
* @evict: 1: This is an eviction. Don't try to pipeline.
|
||||
* @no_wait: Never sleep, but rather return with -EBUSY.
|
||||
* @new_mem: struct ttm_mem_reg indicating where to move.
|
||||
*
|
||||
* Optimized move function for a buffer object with both old and
|
||||
* new placement backed by a TTM. The function will, if successful,
|
||||
* free any old aperture space, and set (@new_mem)->mm_node to NULL,
|
||||
* and update the (@bo)->mem placement flags. If unsuccessful, the old
|
||||
* data remains untouched, and it's up to the caller to free the
|
||||
* memory space indicated by @new_mem.
|
||||
* Returns:
|
||||
* !0: Failure.
|
||||
*/
|
||||
|
||||
extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
|
||||
bool evict, bool no_wait,
|
||||
struct ttm_mem_reg *new_mem);
|
||||
|
||||
/**
|
||||
* ttm_bo_move_memcpy
|
||||
*
|
||||
* @bo: A pointer to a struct ttm_buffer_object.
|
||||
* @evict: 1: This is an eviction. Don't try to pipeline.
|
||||
* @no_wait: Never sleep, but rather return with -EBUSY.
|
||||
* @new_mem: struct ttm_mem_reg indicating where to move.
|
||||
*
|
||||
* Fallback move function for a mappable buffer object in mappable memory.
|
||||
* The function will, if successful,
|
||||
* free any old aperture space, and set (@new_mem)->mm_node to NULL,
|
||||
* and update the (@bo)->mem placement flags. If unsuccessful, the old
|
||||
* data remains untouched, and it's up to the caller to free the
|
||||
* memory space indicated by @new_mem.
|
||||
* Returns:
|
||||
* !0: Failure.
|
||||
*/
|
||||
|
||||
extern int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
|
||||
bool evict,
|
||||
bool no_wait, struct ttm_mem_reg *new_mem);
|
||||
|
||||
/**
|
||||
* ttm_bo_free_old_node
|
||||
*
|
||||
* @bo: A pointer to a struct ttm_buffer_object.
|
||||
*
|
||||
* Utility function to free an old placement after a successful move.
|
||||
*/
|
||||
extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo);
|
||||
|
||||
/**
|
||||
* ttm_bo_move_accel_cleanup.
|
||||
*
|
||||
* @bo: A pointer to a struct ttm_buffer_object.
|
||||
* @sync_obj: A sync object that signals when moving is complete.
|
||||
* @sync_obj_arg: An argument to pass to the sync object idle / wait
|
||||
* functions.
|
||||
* @evict: This is an evict move. Don't return until the buffer is idle.
|
||||
* @no_wait: Never sleep, but rather return with -EBUSY.
|
||||
* @new_mem: struct ttm_mem_reg indicating where to move.
|
||||
*
|
||||
* Accelerated move function to be called when an accelerated move
|
||||
* has been scheduled. The function will create a new temporary buffer object
|
||||
* representing the old placement, and put the sync object on both buffer
|
||||
* objects. After that the newly created buffer object is unref'd to be
|
||||
* destroyed when the move is complete. This will help pipeline
|
||||
* buffer moves.
|
||||
*/
|
||||
|
||||
extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
|
||||
void *sync_obj,
|
||||
void *sync_obj_arg,
|
||||
bool evict, bool no_wait,
|
||||
struct ttm_mem_reg *new_mem);
|
||||
/**
|
||||
* ttm_io_prot
|
||||
*
|
||||
* @c_state: Caching state.
|
||||
* @tmp: Page protection flag for a normal, cached mapping.
|
||||
*
|
||||
* Utility function that returns the pgprot_t that should be used for
|
||||
* setting up a PTE with the caching model indicated by @c_state.
|
||||
*/
|
||||
extern pgprot_t ttm_io_prot(enum ttm_caching_state c_state, pgprot_t tmp);
|
||||
|
||||
#if (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE)))
|
||||
#define TTM_HAS_AGP
|
||||
#include <linux/agp_backend.h>
|
||||
|
||||
/**
|
||||
* ttm_agp_backend_init
|
||||
*
|
||||
* @bdev: Pointer to a struct ttm_bo_device.
|
||||
* @bridge: The agp bridge this device is sitting on.
|
||||
*
|
||||
* Create a TTM backend that uses the indicated AGP bridge as an aperture
|
||||
* for TT memory. This function uses the linux agpgart interface to
|
||||
* bind and unbind memory backing a ttm_tt.
|
||||
*/
|
||||
extern struct ttm_backend *ttm_agp_backend_init(struct ttm_bo_device *bdev,
|
||||
struct agp_bridge_data *bridge);
|
||||
#endif
|
||||
|
||||
#endif
|
|
@ -0,0 +1,153 @@
|
|||
/**************************************************************************
|
||||
*
|
||||
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
|
||||
#ifndef TTM_MEMORY_H
|
||||
#define TTM_MEMORY_H
|
||||
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/wait.h>
|
||||
#include <linux/errno.h>
|
||||
|
||||
/**
|
||||
* struct ttm_mem_shrink - callback to shrink TTM memory usage.
|
||||
*
|
||||
* @do_shrink: The callback function.
|
||||
*
|
||||
* Arguments to the do_shrink functions are intended to be passed using
|
||||
* inheritance. That is, the argument class derives from struct ttm_mem_srink,
|
||||
* and can be accessed using container_of().
|
||||
*/
|
||||
|
||||
struct ttm_mem_shrink {
|
||||
int (*do_shrink) (struct ttm_mem_shrink *);
|
||||
};
|
||||
|
||||
/**
|
||||
* struct ttm_mem_global - Global memory accounting structure.
|
||||
*
|
||||
* @shrink: A single callback to shrink TTM memory usage. Extend this
|
||||
* to a linked list to be able to handle multiple callbacks when needed.
|
||||
* @swap_queue: A workqueue to handle shrinking in low memory situations. We
|
||||
* need a separate workqueue since it will spend a lot of time waiting
|
||||
* for the GPU, and this will otherwise block other workqueue tasks(?)
|
||||
* At this point we use only a single-threaded workqueue.
|
||||
* @work: The workqueue callback for the shrink queue.
|
||||
* @queue: Wait queue for processes suspended waiting for memory.
|
||||
* @lock: Lock to protect the @shrink - and the memory accounting members,
|
||||
* that is, essentially the whole structure with some exceptions.
|
||||
* @emer_memory: Lowmem memory limit available for root.
|
||||
* @max_memory: Lowmem memory limit available for non-root.
|
||||
* @swap_limit: Lowmem memory limit where the shrink workqueue kicks in.
|
||||
* @used_memory: Currently used lowmem memory.
|
||||
* @used_total_memory: Currently used total (lowmem + highmem) memory.
|
||||
* @total_memory_swap_limit: Total memory limit where the shrink workqueue
|
||||
* kicks in.
|
||||
* @max_total_memory: Total memory available to non-root processes.
|
||||
* @emer_total_memory: Total memory available to root processes.
|
||||
*
|
||||
* Note that this structure is not per device. It should be global for all
|
||||
* graphics devices.
|
||||
*/
|
||||
|
||||
struct ttm_mem_global {
|
||||
struct ttm_mem_shrink *shrink;
|
||||
struct workqueue_struct *swap_queue;
|
||||
struct work_struct work;
|
||||
wait_queue_head_t queue;
|
||||
spinlock_t lock;
|
||||
uint64_t emer_memory;
|
||||
uint64_t max_memory;
|
||||
uint64_t swap_limit;
|
||||
uint64_t used_memory;
|
||||
uint64_t used_total_memory;
|
||||
uint64_t total_memory_swap_limit;
|
||||
uint64_t max_total_memory;
|
||||
uint64_t emer_total_memory;
|
||||
};
|
||||
|
||||
/**
|
||||
* ttm_mem_init_shrink - initialize a struct ttm_mem_shrink object
|
||||
*
|
||||
* @shrink: The object to initialize.
|
||||
* @func: The callback function.
|
||||
*/
|
||||
|
||||
static inline void ttm_mem_init_shrink(struct ttm_mem_shrink *shrink,
|
||||
int (*func) (struct ttm_mem_shrink *))
|
||||
{
|
||||
shrink->do_shrink = func;
|
||||
}
|
||||
|
||||
/**
|
||||
* ttm_mem_register_shrink - register a struct ttm_mem_shrink object.
|
||||
*
|
||||
* @glob: The struct ttm_mem_global object to register with.
|
||||
* @shrink: An initialized struct ttm_mem_shrink object to register.
|
||||
*
|
||||
* Returns:
|
||||
* -EBUSY: There's already a callback registered. (May change).
|
||||
*/
|
||||
|
||||
static inline int ttm_mem_register_shrink(struct ttm_mem_global *glob,
|
||||
struct ttm_mem_shrink *shrink)
|
||||
{
|
||||
spin_lock(&glob->lock);
|
||||
if (glob->shrink != NULL) {
|
||||
spin_unlock(&glob->lock);
|
||||
return -EBUSY;
|
||||
}
|
||||
glob->shrink = shrink;
|
||||
spin_unlock(&glob->lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* ttm_mem_unregister_shrink - unregister a struct ttm_mem_shrink object.
|
||||
*
|
||||
* @glob: The struct ttm_mem_global object to unregister from.
|
||||
* @shrink: A previously registert struct ttm_mem_shrink object.
|
||||
*
|
||||
*/
|
||||
|
||||
static inline void ttm_mem_unregister_shrink(struct ttm_mem_global *glob,
|
||||
struct ttm_mem_shrink *shrink)
|
||||
{
|
||||
spin_lock(&glob->lock);
|
||||
BUG_ON(glob->shrink != shrink);
|
||||
glob->shrink = NULL;
|
||||
spin_unlock(&glob->lock);
|
||||
}
|
||||
|
||||
extern int ttm_mem_global_init(struct ttm_mem_global *glob);
|
||||
extern void ttm_mem_global_release(struct ttm_mem_global *glob);
|
||||
extern int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
|
||||
bool no_wait, bool interruptible, bool himem);
|
||||
extern void ttm_mem_global_free(struct ttm_mem_global *glob,
|
||||
uint64_t amount, bool himem);
|
||||
extern size_t ttm_round_pot(size_t size);
|
||||
#endif
|
|
@ -0,0 +1,58 @@
|
|||
/**************************************************************************
|
||||
*
|
||||
* Copyright 2008-2009 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
/*
|
||||
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
|
||||
*/
|
||||
|
||||
#ifndef _TTM_MODULE_H_
|
||||
#define _TTM_MODULE_H_
|
||||
|
||||
#include <linux/kernel.h>
|
||||
|
||||
#define TTM_PFX "[TTM]"
|
||||
|
||||
enum ttm_global_types {
|
||||
TTM_GLOBAL_TTM_MEM = 0,
|
||||
TTM_GLOBAL_TTM_BO,
|
||||
TTM_GLOBAL_TTM_OBJECT,
|
||||
TTM_GLOBAL_NUM
|
||||
};
|
||||
|
||||
struct ttm_global_reference {
|
||||
enum ttm_global_types global_type;
|
||||
size_t size;
|
||||
void *object;
|
||||
int (*init) (struct ttm_global_reference *);
|
||||
void (*release) (struct ttm_global_reference *);
|
||||
};
|
||||
|
||||
extern void ttm_global_init(void);
|
||||
extern void ttm_global_release(void);
|
||||
extern int ttm_global_item_ref(struct ttm_global_reference *ref);
|
||||
extern void ttm_global_item_unref(struct ttm_global_reference *ref);
|
||||
|
||||
#endif /* _TTM_MODULE_H_ */
|
|
@ -0,0 +1,92 @@
|
|||
/**************************************************************************
|
||||
*
|
||||
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
/*
|
||||
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
|
||||
*/
|
||||
|
||||
#ifndef _TTM_PLACEMENT_H_
|
||||
#define _TTM_PLACEMENT_H_
|
||||
/*
|
||||
* Memory regions for data placement.
|
||||
*/
|
||||
|
||||
#define TTM_PL_SYSTEM 0
|
||||
#define TTM_PL_TT 1
|
||||
#define TTM_PL_VRAM 2
|
||||
#define TTM_PL_PRIV0 3
|
||||
#define TTM_PL_PRIV1 4
|
||||
#define TTM_PL_PRIV2 5
|
||||
#define TTM_PL_PRIV3 6
|
||||
#define TTM_PL_PRIV4 7
|
||||
#define TTM_PL_PRIV5 8
|
||||
#define TTM_PL_SWAPPED 15
|
||||
|
||||
#define TTM_PL_FLAG_SYSTEM (1 << TTM_PL_SYSTEM)
|
||||
#define TTM_PL_FLAG_TT (1 << TTM_PL_TT)
|
||||
#define TTM_PL_FLAG_VRAM (1 << TTM_PL_VRAM)
|
||||
#define TTM_PL_FLAG_PRIV0 (1 << TTM_PL_PRIV0)
|
||||
#define TTM_PL_FLAG_PRIV1 (1 << TTM_PL_PRIV1)
|
||||
#define TTM_PL_FLAG_PRIV2 (1 << TTM_PL_PRIV2)
|
||||
#define TTM_PL_FLAG_PRIV3 (1 << TTM_PL_PRIV3)
|
||||
#define TTM_PL_FLAG_PRIV4 (1 << TTM_PL_PRIV4)
|
||||
#define TTM_PL_FLAG_PRIV5 (1 << TTM_PL_PRIV5)
|
||||
#define TTM_PL_FLAG_SWAPPED (1 << TTM_PL_SWAPPED)
|
||||
#define TTM_PL_MASK_MEM 0x0000FFFF
|
||||
|
||||
/*
|
||||
* Other flags that affects data placement.
|
||||
* TTM_PL_FLAG_CACHED indicates cache-coherent mappings
|
||||
* if available.
|
||||
* TTM_PL_FLAG_SHARED means that another application may
|
||||
* reference the buffer.
|
||||
* TTM_PL_FLAG_NO_EVICT means that the buffer may never
|
||||
* be evicted to make room for other buffers.
|
||||
*/
|
||||
|
||||
#define TTM_PL_FLAG_CACHED (1 << 16)
|
||||
#define TTM_PL_FLAG_UNCACHED (1 << 17)
|
||||
#define TTM_PL_FLAG_WC (1 << 18)
|
||||
#define TTM_PL_FLAG_SHARED (1 << 20)
|
||||
#define TTM_PL_FLAG_NO_EVICT (1 << 21)
|
||||
|
||||
#define TTM_PL_MASK_CACHING (TTM_PL_FLAG_CACHED | \
|
||||
TTM_PL_FLAG_UNCACHED | \
|
||||
TTM_PL_FLAG_WC)
|
||||
|
||||
#define TTM_PL_MASK_MEMTYPE (TTM_PL_MASK_MEM | TTM_PL_MASK_CACHING)
|
||||
|
||||
/*
|
||||
* Access flags to be used for CPU- and GPU- mappings.
|
||||
* The idea is that the TTM synchronization mechanism will
|
||||
* allow concurrent READ access and exclusive write access.
|
||||
* Currently GPU- and CPU accesses are exclusive.
|
||||
*/
|
||||
|
||||
#define TTM_ACCESS_READ (1 << 0)
|
||||
#define TTM_ACCESS_WRITE (1 << 1)
|
||||
|
||||
#endif
|
Loading…
Reference in New Issue