drm/radeon: remove UMS support

It's been deprecated behind a kconfig option for almost
two years and hasn't really been supported for years before
that.  DDX support was dropped more than three years ago.

Acked-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
Alex Deucher 2015-11-23 13:13:45 -05:00
parent 80d69009ef
commit 8333f607a6
14 changed files with 0 additions and 13506 deletions

View File

@ -5,12 +5,3 @@ config DRM_RADEON_USERPTR
help
This option selects CONFIG_MMU_NOTIFIER if it isn't already
selected to enabled full userptr support.
config DRM_RADEON_UMS
bool "Enable userspace modesetting on radeon (DEPRECATED)"
depends on DRM_RADEON
help
Choose this option if you still need userspace modesetting.
Userspace modesetting is deprecated for quite some time now, so
enable this only if you have ancient versions of the DDX drivers.

View File

@ -58,10 +58,6 @@ $(obj)/evergreen_cs.o: $(obj)/evergreen_reg_safe.h $(obj)/cayman_reg_safe.h
radeon-y := radeon_drv.o
# add UMS driver
radeon-$(CONFIG_DRM_RADEON_UMS)+= radeon_cp.o radeon_state.o radeon_mem.o \
radeon_irq.o r300_cmdbuf.o r600_cp.o r600_blit.o drm_buffer.o
# add KMS driver
radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \
radeon_atombios.o radeon_agp.o atombios_crtc.o radeon_combios.o \

View File

@ -1,177 +0,0 @@
/**************************************************************************
*
* Copyright 2010 Pauli Nieminen.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*
**************************************************************************/
/*
* Multipart buffer for coping data which is larger than the page size.
*
* Authors:
* Pauli Nieminen <suokkos-at-gmail-dot-com>
*/
#include <linux/export.h>
#include "drm_buffer.h"
/**
* Allocate the drm buffer object.
*
* buf: Pointer to a pointer where the object is stored.
* size: The number of bytes to allocate.
*/
int drm_buffer_alloc(struct drm_buffer **buf, int size)
{
int nr_pages = size / PAGE_SIZE + 1;
int idx;
/* Allocating pointer table to end of structure makes drm_buffer
* variable sized */
*buf = kzalloc(sizeof(struct drm_buffer) + nr_pages*sizeof(char *),
GFP_KERNEL);
if (*buf == NULL) {
DRM_ERROR("Failed to allocate drm buffer object to hold"
" %d bytes in %d pages.\n",
size, nr_pages);
return -ENOMEM;
}
(*buf)->size = size;
for (idx = 0; idx < nr_pages; ++idx) {
(*buf)->data[idx] =
kmalloc(min(PAGE_SIZE, size - idx * PAGE_SIZE),
GFP_KERNEL);
if ((*buf)->data[idx] == NULL) {
DRM_ERROR("Failed to allocate %dth page for drm"
" buffer with %d bytes and %d pages.\n",
idx + 1, size, nr_pages);
goto error_out;
}
}
return 0;
error_out:
for (; idx >= 0; --idx)
kfree((*buf)->data[idx]);
kfree(*buf);
return -ENOMEM;
}
/**
* Copy the user data to the begin of the buffer and reset the processing
* iterator.
*
* user_data: A pointer the data that is copied to the buffer.
* size: The Number of bytes to copy.
*/
int drm_buffer_copy_from_user(struct drm_buffer *buf,
void __user *user_data, int size)
{
int nr_pages = size / PAGE_SIZE + 1;
int idx;
if (size > buf->size) {
DRM_ERROR("Requesting to copy %d bytes to a drm buffer with"
" %d bytes space\n",
size, buf->size);
return -EFAULT;
}
for (idx = 0; idx < nr_pages; ++idx) {
if (copy_from_user(buf->data[idx],
user_data + idx * PAGE_SIZE,
min(PAGE_SIZE, size - idx * PAGE_SIZE))) {
DRM_ERROR("Failed to copy user data (%p) to drm buffer"
" (%p) %dth page.\n",
user_data, buf, idx);
return -EFAULT;
}
}
buf->iterator = 0;
return 0;
}
/**
* Free the drm buffer object
*/
void drm_buffer_free(struct drm_buffer *buf)
{
if (buf != NULL) {
int nr_pages = buf->size / PAGE_SIZE + 1;
int idx;
for (idx = 0; idx < nr_pages; ++idx)
kfree(buf->data[idx]);
kfree(buf);
}
}
/**
* Read an object from buffer that may be split to multiple parts. If object
* is not split function just returns the pointer to object in buffer. But in
* case of split object data is copied to given stack object that is suplied
* by caller.
*
* The processing location of the buffer is also advanced to the next byte
* after the object.
*
* objsize: The size of the objet in bytes.
* stack_obj: A pointer to a memory location where object can be copied.
*/
void *drm_buffer_read_object(struct drm_buffer *buf,
int objsize, void *stack_obj)
{
int idx = drm_buffer_index(buf);
int page = drm_buffer_page(buf);
void *obj = NULL;
if (idx + objsize <= PAGE_SIZE) {
obj = &buf->data[page][idx];
} else {
/* The object is split which forces copy to temporary object.*/
int beginsz = PAGE_SIZE - idx;
memcpy(stack_obj, &buf->data[page][idx], beginsz);
memcpy(stack_obj + beginsz, &buf->data[page + 1][0],
objsize - beginsz);
obj = stack_obj;
}
drm_buffer_advance(buf, objsize);
return obj;
}

View File

@ -1,148 +0,0 @@
/**************************************************************************
*
* Copyright 2010 Pauli Nieminen.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*
**************************************************************************/
/*
* Multipart buffer for coping data which is larger than the page size.
*
* Authors:
* Pauli Nieminen <suokkos-at-gmail-dot-com>
*/
#ifndef _DRM_BUFFER_H_
#define _DRM_BUFFER_H_
#include <drm/drmP.h>
struct drm_buffer {
int iterator;
int size;
char *data[];
};
/**
* Return the index of page that buffer is currently pointing at.
*/
static inline int drm_buffer_page(struct drm_buffer *buf)
{
return buf->iterator / PAGE_SIZE;
}
/**
* Return the index of the current byte in the page
*/
static inline int drm_buffer_index(struct drm_buffer *buf)
{
return buf->iterator & (PAGE_SIZE - 1);
}
/**
* Return number of bytes that is left to process
*/
static inline int drm_buffer_unprocessed(struct drm_buffer *buf)
{
return buf->size - buf->iterator;
}
/**
* Advance the buffer iterator number of bytes that is given.
*/
static inline void drm_buffer_advance(struct drm_buffer *buf, int bytes)
{
buf->iterator += bytes;
}
/**
* Allocate the drm buffer object.
*
* buf: A pointer to a pointer where the object is stored.
* size: The number of bytes to allocate.
*/
extern int drm_buffer_alloc(struct drm_buffer **buf, int size);
/**
* Copy the user data to the begin of the buffer and reset the processing
* iterator.
*
* user_data: A pointer the data that is copied to the buffer.
* size: The Number of bytes to copy.
*/
extern int drm_buffer_copy_from_user(struct drm_buffer *buf,
void __user *user_data, int size);
/**
* Free the drm buffer object
*/
extern void drm_buffer_free(struct drm_buffer *buf);
/**
* Read an object from buffer that may be split to multiple parts. If object
* is not split function just returns the pointer to object in buffer. But in
* case of split object data is copied to given stack object that is suplied
* by caller.
*
* The processing location of the buffer is also advanced to the next byte
* after the object.
*
* objsize: The size of the objet in bytes.
* stack_obj: A pointer to a memory location where object can be copied.
*/
extern void *drm_buffer_read_object(struct drm_buffer *buf,
int objsize, void *stack_obj);
/**
* Returns the pointer to the dword which is offset number of elements from the
* current processing location.
*
* Caller must make sure that dword is not split in the buffer. This
* requirement is easily met if all the sizes of objects in buffer are
* multiples of dword and PAGE_SIZE is multiple dword.
*
* Call to this function doesn't change the processing location.
*
* offset: The index of the dword relative to the internat iterator.
*/
static inline void *drm_buffer_pointer_to_dword(struct drm_buffer *buffer,
int offset)
{
int iter = buffer->iterator + offset * 4;
return &buffer->data[iter / PAGE_SIZE][iter & (PAGE_SIZE - 1)];
}
/**
* Returns the pointer to the dword which is offset number of elements from
* the current processing location.
*
* Call to this function doesn't change the processing location.
*
* offset: The index of the byte relative to the internat iterator.
*/
static inline void *drm_buffer_pointer_to_byte(struct drm_buffer *buffer,
int offset)
{
int iter = buffer->iterator + offset;
return &buffer->data[iter / PAGE_SIZE][iter & (PAGE_SIZE - 1)];
}
#endif

File diff suppressed because it is too large Load Diff

View File

@ -1,874 +0,0 @@
/*
* Copyright 2009 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors:
* Alex Deucher <alexander.deucher@amd.com>
*
* ------------------------ This file is DEPRECATED! -------------------------
*/
#include <drm/drmP.h>
#include <drm/radeon_drm.h>
#include "radeon_drv.h"
#include "r600_blit_shaders.h"
/* 23 bits of float fractional data */
#define I2F_FRAC_BITS 23
#define I2F_MASK ((1 << I2F_FRAC_BITS) - 1)
/*
* Converts unsigned integer into 32-bit IEEE floating point representation.
* Will be exact from 0 to 2^24. Above that, we round towards zero
* as the fractional bits will not fit in a float. (It would be better to
* round towards even as the fpu does, but that is slower.)
*/
static __pure uint32_t int2float(uint32_t x)
{
uint32_t msb, exponent, fraction;
/* Zero is special */
if (!x) return 0;
/* Get location of the most significant bit */
msb = __fls(x);
/*
* Use a rotate instead of a shift because that works both leftwards
* and rightwards due to the mod(32) behaviour. This means we don't
* need to check to see if we are above 2^24 or not.
*/
fraction = ror32(x, (msb - I2F_FRAC_BITS) & 0x1f) & I2F_MASK;
exponent = (127 + msb) << I2F_FRAC_BITS;
return fraction + exponent;
}
#define DI_PT_RECTLIST 0x11
#define DI_INDEX_SIZE_16_BIT 0x0
#define DI_SRC_SEL_AUTO_INDEX 0x2
#define FMT_8 0x1
#define FMT_5_6_5 0x8
#define FMT_8_8_8_8 0x1a
#define COLOR_8 0x1
#define COLOR_5_6_5 0x8
#define COLOR_8_8_8_8 0x1a
static void
set_render_target(drm_radeon_private_t *dev_priv, int format, int w, int h, u64 gpu_addr)
{
u32 cb_color_info;
int pitch, slice;
RING_LOCALS;
DRM_DEBUG("\n");
h = ALIGN(h, 8);
if (h < 8)
h = 8;
cb_color_info = ((format << 2) | (1 << 27));
pitch = (w / 8) - 1;
slice = ((w * h) / 64) - 1;
if (((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_R600) &&
((dev_priv->flags & RADEON_FAMILY_MASK) < CHIP_RV770)) {
BEGIN_RING(21 + 2);
OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
OUT_RING((R600_CB_COLOR0_BASE - R600_SET_CONTEXT_REG_OFFSET) >> 2);
OUT_RING(gpu_addr >> 8);
OUT_RING(CP_PACKET3(R600_IT_SURFACE_BASE_UPDATE, 0));
OUT_RING(2 << 0);
} else {
BEGIN_RING(21);
OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
OUT_RING((R600_CB_COLOR0_BASE - R600_SET_CONTEXT_REG_OFFSET) >> 2);
OUT_RING(gpu_addr >> 8);
}
OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
OUT_RING((R600_CB_COLOR0_SIZE - R600_SET_CONTEXT_REG_OFFSET) >> 2);
OUT_RING((pitch << 0) | (slice << 10));
OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
OUT_RING((R600_CB_COLOR0_VIEW - R600_SET_CONTEXT_REG_OFFSET) >> 2);
OUT_RING(0);
OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
OUT_RING((R600_CB_COLOR0_INFO - R600_SET_CONTEXT_REG_OFFSET) >> 2);
OUT_RING(cb_color_info);
OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
OUT_RING((R600_CB_COLOR0_TILE - R600_SET_CONTEXT_REG_OFFSET) >> 2);
OUT_RING(0);
OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
OUT_RING((R600_CB_COLOR0_FRAG - R600_SET_CONTEXT_REG_OFFSET) >> 2);
OUT_RING(0);
OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
OUT_RING((R600_CB_COLOR0_MASK - R600_SET_CONTEXT_REG_OFFSET) >> 2);
OUT_RING(0);
ADVANCE_RING();
}
static void
cp_set_surface_sync(drm_radeon_private_t *dev_priv,
u32 sync_type, u32 size, u64 mc_addr)
{
u32 cp_coher_size;
RING_LOCALS;
DRM_DEBUG("\n");
if (size == 0xffffffff)
cp_coher_size = 0xffffffff;
else
cp_coher_size = ((size + 255) >> 8);
BEGIN_RING(5);
OUT_RING(CP_PACKET3(R600_IT_SURFACE_SYNC, 3));
OUT_RING(sync_type);
OUT_RING(cp_coher_size);
OUT_RING((mc_addr >> 8));
OUT_RING(10); /* poll interval */
ADVANCE_RING();
}
static void
set_shaders(struct drm_device *dev)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
u64 gpu_addr;
int i;
u32 *vs, *ps;
uint32_t sq_pgm_resources;
RING_LOCALS;
DRM_DEBUG("\n");
/* load shaders */
vs = (u32 *) ((char *)dev->agp_buffer_map->handle + dev_priv->blit_vb->offset);
ps = (u32 *) ((char *)dev->agp_buffer_map->handle + dev_priv->blit_vb->offset + 256);
for (i = 0; i < r6xx_vs_size; i++)
vs[i] = cpu_to_le32(r6xx_vs[i]);
for (i = 0; i < r6xx_ps_size; i++)
ps[i] = cpu_to_le32(r6xx_ps[i]);
dev_priv->blit_vb->used = 512;
gpu_addr = dev_priv->gart_buffers_offset + dev_priv->blit_vb->offset;
/* setup shader regs */
sq_pgm_resources = (1 << 0);
BEGIN_RING(9 + 12);
/* VS */
OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
OUT_RING((R600_SQ_PGM_START_VS - R600_SET_CONTEXT_REG_OFFSET) >> 2);
OUT_RING(gpu_addr >> 8);
OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
OUT_RING((R600_SQ_PGM_RESOURCES_VS - R600_SET_CONTEXT_REG_OFFSET) >> 2);
OUT_RING(sq_pgm_resources);
OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
OUT_RING((R600_SQ_PGM_CF_OFFSET_VS - R600_SET_CONTEXT_REG_OFFSET) >> 2);
OUT_RING(0);
/* PS */
OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
OUT_RING((R600_SQ_PGM_START_PS - R600_SET_CONTEXT_REG_OFFSET) >> 2);
OUT_RING((gpu_addr + 256) >> 8);
OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
OUT_RING((R600_SQ_PGM_RESOURCES_PS - R600_SET_CONTEXT_REG_OFFSET) >> 2);
OUT_RING(sq_pgm_resources | (1 << 28));
OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
OUT_RING((R600_SQ_PGM_EXPORTS_PS - R600_SET_CONTEXT_REG_OFFSET) >> 2);
OUT_RING(2);
OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
OUT_RING((R600_SQ_PGM_CF_OFFSET_PS - R600_SET_CONTEXT_REG_OFFSET) >> 2);
OUT_RING(0);
ADVANCE_RING();
cp_set_surface_sync(dev_priv,
R600_SH_ACTION_ENA, 512, gpu_addr);
}
static void
set_vtx_resource(drm_radeon_private_t *dev_priv, u64 gpu_addr)
{
uint32_t sq_vtx_constant_word2;
RING_LOCALS;
DRM_DEBUG("\n");
sq_vtx_constant_word2 = (((gpu_addr >> 32) & 0xff) | (16 << 8));
#ifdef __BIG_ENDIAN
sq_vtx_constant_word2 |= (2 << 30);
#endif
BEGIN_RING(9);
OUT_RING(CP_PACKET3(R600_IT_SET_RESOURCE, 7));
OUT_RING(0x460);
OUT_RING(gpu_addr & 0xffffffff);
OUT_RING(48 - 1);
OUT_RING(sq_vtx_constant_word2);
OUT_RING(1 << 0);
OUT_RING(0);
OUT_RING(0);
OUT_RING(R600_SQ_TEX_VTX_VALID_BUFFER << 30);
ADVANCE_RING();
if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV610) ||
((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV620) ||
((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS780) ||
((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS880) ||
((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV710))
cp_set_surface_sync(dev_priv,
R600_TC_ACTION_ENA, 48, gpu_addr);
else
cp_set_surface_sync(dev_priv,
R600_VC_ACTION_ENA, 48, gpu_addr);
}
static void
set_tex_resource(drm_radeon_private_t *dev_priv,
int format, int w, int h, int pitch, u64 gpu_addr)
{
uint32_t sq_tex_resource_word0, sq_tex_resource_word1, sq_tex_resource_word4;
RING_LOCALS;
DRM_DEBUG("\n");
if (h < 1)
h = 1;
sq_tex_resource_word0 = (1 << 0);
sq_tex_resource_word0 |= ((((pitch >> 3) - 1) << 8) |
((w - 1) << 19));
sq_tex_resource_word1 = (format << 26);
sq_tex_resource_word1 |= ((h - 1) << 0);
sq_tex_resource_word4 = ((1 << 14) |
(0 << 16) |
(1 << 19) |
(2 << 22) |
(3 << 25));
BEGIN_RING(9);
OUT_RING(CP_PACKET3(R600_IT_SET_RESOURCE, 7));
OUT_RING(0);
OUT_RING(sq_tex_resource_word0);
OUT_RING(sq_tex_resource_word1);
OUT_RING(gpu_addr >> 8);
OUT_RING(gpu_addr >> 8);
OUT_RING(sq_tex_resource_word4);
OUT_RING(0);
OUT_RING(R600_SQ_TEX_VTX_VALID_TEXTURE << 30);
ADVANCE_RING();
}
static void
set_scissors(drm_radeon_private_t *dev_priv, int x1, int y1, int x2, int y2)
{
RING_LOCALS;
DRM_DEBUG("\n");
BEGIN_RING(12);
OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 2));
OUT_RING((R600_PA_SC_SCREEN_SCISSOR_TL - R600_SET_CONTEXT_REG_OFFSET) >> 2);
OUT_RING((x1 << 0) | (y1 << 16));
OUT_RING((x2 << 0) | (y2 << 16));
OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 2));
OUT_RING((R600_PA_SC_GENERIC_SCISSOR_TL - R600_SET_CONTEXT_REG_OFFSET) >> 2);
OUT_RING((x1 << 0) | (y1 << 16) | (1 << 31));
OUT_RING((x2 << 0) | (y2 << 16));
OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 2));
OUT_RING((R600_PA_SC_WINDOW_SCISSOR_TL - R600_SET_CONTEXT_REG_OFFSET) >> 2);
OUT_RING((x1 << 0) | (y1 << 16) | (1 << 31));
OUT_RING((x2 << 0) | (y2 << 16));
ADVANCE_RING();
}
static void
draw_auto(drm_radeon_private_t *dev_priv)
{
RING_LOCALS;
DRM_DEBUG("\n");
BEGIN_RING(10);
OUT_RING(CP_PACKET3(R600_IT_SET_CONFIG_REG, 1));
OUT_RING((R600_VGT_PRIMITIVE_TYPE - R600_SET_CONFIG_REG_OFFSET) >> 2);
OUT_RING(DI_PT_RECTLIST);
OUT_RING(CP_PACKET3(R600_IT_INDEX_TYPE, 0));
#ifdef __BIG_ENDIAN
OUT_RING((2 << 2) | DI_INDEX_SIZE_16_BIT);
#else
OUT_RING(DI_INDEX_SIZE_16_BIT);
#endif
OUT_RING(CP_PACKET3(R600_IT_NUM_INSTANCES, 0));
OUT_RING(1);
OUT_RING(CP_PACKET3(R600_IT_DRAW_INDEX_AUTO, 1));
OUT_RING(3);
OUT_RING(DI_SRC_SEL_AUTO_INDEX);
ADVANCE_RING();
COMMIT_RING();
}
static void
set_default_state(drm_radeon_private_t *dev_priv)
{
int i;
u32 sq_config, sq_gpr_resource_mgmt_1, sq_gpr_resource_mgmt_2;
u32 sq_thread_resource_mgmt, sq_stack_resource_mgmt_1, sq_stack_resource_mgmt_2;
int num_ps_gprs, num_vs_gprs, num_temp_gprs, num_gs_gprs, num_es_gprs;
int num_ps_threads, num_vs_threads, num_gs_threads, num_es_threads;
int num_ps_stack_entries, num_vs_stack_entries, num_gs_stack_entries, num_es_stack_entries;
RING_LOCALS;
switch ((dev_priv->flags & RADEON_FAMILY_MASK)) {
case CHIP_R600:
num_ps_gprs = 192;
num_vs_gprs = 56;
num_temp_gprs = 4;
num_gs_gprs = 0;
num_es_gprs = 0;
num_ps_threads = 136;
num_vs_threads = 48;
num_gs_threads = 4;
num_es_threads = 4;
num_ps_stack_entries = 128;
num_vs_stack_entries = 128;
num_gs_stack_entries = 0;
num_es_stack_entries = 0;
break;
case CHIP_RV630:
case CHIP_RV635:
num_ps_gprs = 84;
num_vs_gprs = 36;
num_temp_gprs = 4;
num_gs_gprs = 0;
num_es_gprs = 0;
num_ps_threads = 144;
num_vs_threads = 40;
num_gs_threads = 4;
num_es_threads = 4;
num_ps_stack_entries = 40;
num_vs_stack_entries = 40;
num_gs_stack_entries = 32;
num_es_stack_entries = 16;
break;
case CHIP_RV610:
case CHIP_RV620:
case CHIP_RS780:
case CHIP_RS880:
default:
num_ps_gprs = 84;
num_vs_gprs = 36;
num_temp_gprs = 4;
num_gs_gprs = 0;
num_es_gprs = 0;
num_ps_threads = 136;
num_vs_threads = 48;
num_gs_threads = 4;
num_es_threads = 4;
num_ps_stack_entries = 40;
num_vs_stack_entries = 40;
num_gs_stack_entries = 32;
num_es_stack_entries = 16;
break;
case CHIP_RV670:
num_ps_gprs = 144;
num_vs_gprs = 40;
num_temp_gprs = 4;
num_gs_gprs = 0;
num_es_gprs = 0;
num_ps_threads = 136;
num_vs_threads = 48;
num_gs_threads = 4;
num_es_threads = 4;
num_ps_stack_entries = 40;
num_vs_stack_entries = 40;
num_gs_stack_entries = 32;
num_es_stack_entries = 16;
break;
case CHIP_RV770:
num_ps_gprs = 192;
num_vs_gprs = 56;
num_temp_gprs = 4;
num_gs_gprs = 0;
num_es_gprs = 0;
num_ps_threads = 188;
num_vs_threads = 60;
num_gs_threads = 0;
num_es_threads = 0;
num_ps_stack_entries = 256;
num_vs_stack_entries = 256;
num_gs_stack_entries = 0;
num_es_stack_entries = 0;
break;
case CHIP_RV730:
case CHIP_RV740:
num_ps_gprs = 84;
num_vs_gprs = 36;
num_temp_gprs = 4;
num_gs_gprs = 0;
num_es_gprs = 0;
num_ps_threads = 188;
num_vs_threads = 60;
num_gs_threads = 0;
num_es_threads = 0;
num_ps_stack_entries = 128;
num_vs_stack_entries = 128;
num_gs_stack_entries = 0;
num_es_stack_entries = 0;
break;
case CHIP_RV710:
num_ps_gprs = 192;
num_vs_gprs = 56;
num_temp_gprs = 4;
num_gs_gprs = 0;
num_es_gprs = 0;
num_ps_threads = 144;
num_vs_threads = 48;
num_gs_threads = 0;
num_es_threads = 0;
num_ps_stack_entries = 128;
num_vs_stack_entries = 128;
num_gs_stack_entries = 0;
num_es_stack_entries = 0;
break;
}
if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV610) ||
((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV620) ||
((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS780) ||
((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS880) ||
((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV710))
sq_config = 0;
else
sq_config = R600_VC_ENABLE;
sq_config |= (R600_DX9_CONSTS |
R600_ALU_INST_PREFER_VECTOR |
R600_PS_PRIO(0) |
R600_VS_PRIO(1) |
R600_GS_PRIO(2) |
R600_ES_PRIO(3));
sq_gpr_resource_mgmt_1 = (R600_NUM_PS_GPRS(num_ps_gprs) |
R600_NUM_VS_GPRS(num_vs_gprs) |
R600_NUM_CLAUSE_TEMP_GPRS(num_temp_gprs));
sq_gpr_resource_mgmt_2 = (R600_NUM_GS_GPRS(num_gs_gprs) |
R600_NUM_ES_GPRS(num_es_gprs));
sq_thread_resource_mgmt = (R600_NUM_PS_THREADS(num_ps_threads) |
R600_NUM_VS_THREADS(num_vs_threads) |
R600_NUM_GS_THREADS(num_gs_threads) |
R600_NUM_ES_THREADS(num_es_threads));
sq_stack_resource_mgmt_1 = (R600_NUM_PS_STACK_ENTRIES(num_ps_stack_entries) |
R600_NUM_VS_STACK_ENTRIES(num_vs_stack_entries));
sq_stack_resource_mgmt_2 = (R600_NUM_GS_STACK_ENTRIES(num_gs_stack_entries) |
R600_NUM_ES_STACK_ENTRIES(num_es_stack_entries));
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV770) {
BEGIN_RING(r7xx_default_size + 10);
for (i = 0; i < r7xx_default_size; i++)
OUT_RING(r7xx_default_state[i]);
} else {
BEGIN_RING(r6xx_default_size + 10);
for (i = 0; i < r6xx_default_size; i++)
OUT_RING(r6xx_default_state[i]);
}
OUT_RING(CP_PACKET3(R600_IT_EVENT_WRITE, 0));
OUT_RING(R600_CACHE_FLUSH_AND_INV_EVENT);
/* SQ config */
OUT_RING(CP_PACKET3(R600_IT_SET_CONFIG_REG, 6));
OUT_RING((R600_SQ_CONFIG - R600_SET_CONFIG_REG_OFFSET) >> 2);
OUT_RING(sq_config);
OUT_RING(sq_gpr_resource_mgmt_1);
OUT_RING(sq_gpr_resource_mgmt_2);
OUT_RING(sq_thread_resource_mgmt);
OUT_RING(sq_stack_resource_mgmt_1);
OUT_RING(sq_stack_resource_mgmt_2);
ADVANCE_RING();
}
static int r600_nomm_get_vb(struct drm_device *dev)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
dev_priv->blit_vb = radeon_freelist_get(dev);
if (!dev_priv->blit_vb) {
DRM_ERROR("Unable to allocate vertex buffer for blit\n");
return -EAGAIN;
}
return 0;
}
static void r600_nomm_put_vb(struct drm_device *dev)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
dev_priv->blit_vb->used = 0;
radeon_cp_discard_buffer(dev, dev_priv->blit_vb->file_priv->master, dev_priv->blit_vb);
}
static void *r600_nomm_get_vb_ptr(struct drm_device *dev)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
return (((char *)dev->agp_buffer_map->handle +
dev_priv->blit_vb->offset + dev_priv->blit_vb->used));
}
int
r600_prepare_blit_copy(struct drm_device *dev, struct drm_file *file_priv)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
int ret;
DRM_DEBUG("\n");
ret = r600_nomm_get_vb(dev);
if (ret)
return ret;
dev_priv->blit_vb->file_priv = file_priv;
set_default_state(dev_priv);
set_shaders(dev);
return 0;
}
void
r600_done_blit_copy(struct drm_device *dev)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
RING_LOCALS;
DRM_DEBUG("\n");
BEGIN_RING(5);
OUT_RING(CP_PACKET3(R600_IT_EVENT_WRITE, 0));
OUT_RING(R600_CACHE_FLUSH_AND_INV_EVENT);
/* wait for 3D idle clean */
OUT_RING(CP_PACKET3(R600_IT_SET_CONFIG_REG, 1));
OUT_RING((R600_WAIT_UNTIL - R600_SET_CONFIG_REG_OFFSET) >> 2);
OUT_RING(RADEON_WAIT_3D_IDLE | RADEON_WAIT_3D_IDLECLEAN);
ADVANCE_RING();
COMMIT_RING();
r600_nomm_put_vb(dev);
}
void
r600_blit_copy(struct drm_device *dev,
uint64_t src_gpu_addr, uint64_t dst_gpu_addr,
int size_bytes)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
int max_bytes;
u64 vb_addr;
u32 *vb;
vb = r600_nomm_get_vb_ptr(dev);
if ((size_bytes & 3) || (src_gpu_addr & 3) || (dst_gpu_addr & 3)) {
max_bytes = 8192;
while (size_bytes) {
int cur_size = size_bytes;
int src_x = src_gpu_addr & 255;
int dst_x = dst_gpu_addr & 255;
int h = 1;
src_gpu_addr = src_gpu_addr & ~255;
dst_gpu_addr = dst_gpu_addr & ~255;
if (!src_x && !dst_x) {
h = (cur_size / max_bytes);
if (h > 8192)
h = 8192;
if (h == 0)
h = 1;
else
cur_size = max_bytes;
} else {
if (cur_size > max_bytes)
cur_size = max_bytes;
if (cur_size > (max_bytes - dst_x))
cur_size = (max_bytes - dst_x);
if (cur_size > (max_bytes - src_x))
cur_size = (max_bytes - src_x);
}
if ((dev_priv->blit_vb->used + 48) > dev_priv->blit_vb->total) {
r600_nomm_put_vb(dev);
r600_nomm_get_vb(dev);
if (!dev_priv->blit_vb)
return;
set_shaders(dev);
vb = r600_nomm_get_vb_ptr(dev);
}
vb[0] = int2float(dst_x);
vb[1] = 0;
vb[2] = int2float(src_x);
vb[3] = 0;
vb[4] = int2float(dst_x);
vb[5] = int2float(h);
vb[6] = int2float(src_x);
vb[7] = int2float(h);
vb[8] = int2float(dst_x + cur_size);
vb[9] = int2float(h);
vb[10] = int2float(src_x + cur_size);
vb[11] = int2float(h);
/* src */
set_tex_resource(dev_priv, FMT_8,
src_x + cur_size, h, src_x + cur_size,
src_gpu_addr);
cp_set_surface_sync(dev_priv,
R600_TC_ACTION_ENA, (src_x + cur_size * h), src_gpu_addr);
/* dst */
set_render_target(dev_priv, COLOR_8,
dst_x + cur_size, h,
dst_gpu_addr);
/* scissors */
set_scissors(dev_priv, dst_x, 0, dst_x + cur_size, h);
/* Vertex buffer setup */
vb_addr = dev_priv->gart_buffers_offset +
dev_priv->blit_vb->offset +
dev_priv->blit_vb->used;
set_vtx_resource(dev_priv, vb_addr);
/* draw */
draw_auto(dev_priv);
cp_set_surface_sync(dev_priv,
R600_CB_ACTION_ENA | R600_CB0_DEST_BASE_ENA,
cur_size * h, dst_gpu_addr);
vb += 12;
dev_priv->blit_vb->used += 12 * 4;
src_gpu_addr += cur_size * h;
dst_gpu_addr += cur_size * h;
size_bytes -= cur_size * h;
}
} else {
max_bytes = 8192 * 4;
while (size_bytes) {
int cur_size = size_bytes;
int src_x = (src_gpu_addr & 255);
int dst_x = (dst_gpu_addr & 255);
int h = 1;
src_gpu_addr = src_gpu_addr & ~255;
dst_gpu_addr = dst_gpu_addr & ~255;
if (!src_x && !dst_x) {
h = (cur_size / max_bytes);
if (h > 8192)
h = 8192;
if (h == 0)
h = 1;
else
cur_size = max_bytes;
} else {
if (cur_size > max_bytes)
cur_size = max_bytes;
if (cur_size > (max_bytes - dst_x))
cur_size = (max_bytes - dst_x);
if (cur_size > (max_bytes - src_x))
cur_size = (max_bytes - src_x);
}
if ((dev_priv->blit_vb->used + 48) > dev_priv->blit_vb->total) {
r600_nomm_put_vb(dev);
r600_nomm_get_vb(dev);
if (!dev_priv->blit_vb)
return;
set_shaders(dev);
vb = r600_nomm_get_vb_ptr(dev);
}
vb[0] = int2float(dst_x / 4);
vb[1] = 0;
vb[2] = int2float(src_x / 4);
vb[3] = 0;
vb[4] = int2float(dst_x / 4);
vb[5] = int2float(h);
vb[6] = int2float(src_x / 4);
vb[7] = int2float(h);
vb[8] = int2float((dst_x + cur_size) / 4);
vb[9] = int2float(h);
vb[10] = int2float((src_x + cur_size) / 4);
vb[11] = int2float(h);
/* src */
set_tex_resource(dev_priv, FMT_8_8_8_8,
(src_x + cur_size) / 4,
h, (src_x + cur_size) / 4,
src_gpu_addr);
cp_set_surface_sync(dev_priv,
R600_TC_ACTION_ENA, (src_x + cur_size * h), src_gpu_addr);
/* dst */
set_render_target(dev_priv, COLOR_8_8_8_8,
(dst_x + cur_size) / 4, h,
dst_gpu_addr);
/* scissors */
set_scissors(dev_priv, (dst_x / 4), 0, (dst_x + cur_size / 4), h);
/* Vertex buffer setup */
vb_addr = dev_priv->gart_buffers_offset +
dev_priv->blit_vb->offset +
dev_priv->blit_vb->used;
set_vtx_resource(dev_priv, vb_addr);
/* draw */
draw_auto(dev_priv);
cp_set_surface_sync(dev_priv,
R600_CB_ACTION_ENA | R600_CB0_DEST_BASE_ENA,
cur_size * h, dst_gpu_addr);
vb += 12;
dev_priv->blit_vb->used += 12 * 4;
src_gpu_addr += cur_size * h;
dst_gpu_addr += cur_size * h;
size_bytes -= cur_size * h;
}
}
}
void
r600_blit_swap(struct drm_device *dev,
uint64_t src_gpu_addr, uint64_t dst_gpu_addr,
int sx, int sy, int dx, int dy,
int w, int h, int src_pitch, int dst_pitch, int cpp)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
int cb_format, tex_format;
int sx2, sy2, dx2, dy2;
u64 vb_addr;
u32 *vb;
if ((dev_priv->blit_vb->used + 48) > dev_priv->blit_vb->total) {
r600_nomm_put_vb(dev);
r600_nomm_get_vb(dev);
if (!dev_priv->blit_vb)
return;
set_shaders(dev);
}
vb = r600_nomm_get_vb_ptr(dev);
sx2 = sx + w;
sy2 = sy + h;
dx2 = dx + w;
dy2 = dy + h;
vb[0] = int2float(dx);
vb[1] = int2float(dy);
vb[2] = int2float(sx);
vb[3] = int2float(sy);
vb[4] = int2float(dx);
vb[5] = int2float(dy2);
vb[6] = int2float(sx);
vb[7] = int2float(sy2);
vb[8] = int2float(dx2);
vb[9] = int2float(dy2);
vb[10] = int2float(sx2);
vb[11] = int2float(sy2);
switch(cpp) {
case 4:
cb_format = COLOR_8_8_8_8;
tex_format = FMT_8_8_8_8;
break;
case 2:
cb_format = COLOR_5_6_5;
tex_format = FMT_5_6_5;
break;
default:
cb_format = COLOR_8;
tex_format = FMT_8;
break;
}
/* src */
set_tex_resource(dev_priv, tex_format,
src_pitch / cpp,
sy2, src_pitch / cpp,
src_gpu_addr);
cp_set_surface_sync(dev_priv,
R600_TC_ACTION_ENA, src_pitch * sy2, src_gpu_addr);
/* dst */
set_render_target(dev_priv, cb_format,
dst_pitch / cpp, dy2,
dst_gpu_addr);
/* scissors */
set_scissors(dev_priv, dx, dy, dx2, dy2);
/* Vertex buffer setup */
vb_addr = dev_priv->gart_buffers_offset +
dev_priv->blit_vb->offset +
dev_priv->blit_vb->used;
set_vtx_resource(dev_priv, vb_addr);
/* draw */
draw_auto(dev_priv);
cp_set_surface_sync(dev_priv,
R600_CB_ACTION_ENA | R600_CB0_DEST_BASE_ENA,
dst_pitch * dy2, dst_gpu_addr);
dev_priv->blit_vb->used += 12 * 4;
}

File diff suppressed because it is too large Load Diff

View File

@ -2328,101 +2328,6 @@ int r600_cs_parse(struct radeon_cs_parser *p)
return 0;
}
#ifdef CONFIG_DRM_RADEON_UMS
/**
* cs_parser_fini() - clean parser states
* @parser: parser structure holding parsing context.
* @error: error number
*
* If error is set than unvalidate buffer, otherwise just free memory
* used by parsing context.
**/
static void r600_cs_parser_fini(struct radeon_cs_parser *parser, int error)
{
unsigned i;
kfree(parser->relocs);
for (i = 0; i < parser->nchunks; i++)
drm_free_large(parser->chunks[i].kdata);
kfree(parser->chunks);
kfree(parser->chunks_array);
}
static int r600_cs_parser_relocs_legacy(struct radeon_cs_parser *p)
{
if (p->chunk_relocs == NULL) {
return 0;
}
p->relocs = kzalloc(sizeof(struct radeon_bo_list), GFP_KERNEL);
if (p->relocs == NULL) {
return -ENOMEM;
}
return 0;
}
int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
unsigned family, u32 *ib, int *l)
{
struct radeon_cs_parser parser;
struct radeon_cs_chunk *ib_chunk;
struct r600_cs_track *track;
int r;
/* initialize tracker */
track = kzalloc(sizeof(*track), GFP_KERNEL);
if (track == NULL)
return -ENOMEM;
r600_cs_track_init(track);
r600_cs_legacy_get_tiling_conf(dev, &track->npipes, &track->nbanks, &track->group_size);
/* initialize parser */
memset(&parser, 0, sizeof(struct radeon_cs_parser));
parser.filp = filp;
parser.dev = &dev->pdev->dev;
parser.rdev = NULL;
parser.family = family;
parser.track = track;
parser.ib.ptr = ib;
r = radeon_cs_parser_init(&parser, data);
if (r) {
DRM_ERROR("Failed to initialize parser !\n");
r600_cs_parser_fini(&parser, r);
return r;
}
r = r600_cs_parser_relocs_legacy(&parser);
if (r) {
DRM_ERROR("Failed to parse relocation !\n");
r600_cs_parser_fini(&parser, r);
return r;
}
/* Copy the packet into the IB, the parser will read from the
* input memory (cached) and write to the IB (which can be
* uncached). */
ib_chunk = parser.chunk_ib;
parser.ib.length_dw = ib_chunk->length_dw;
*l = parser.ib.length_dw;
if (copy_from_user(ib, ib_chunk->user_ptr, ib_chunk->length_dw * 4)) {
r = -EFAULT;
r600_cs_parser_fini(&parser, r);
return r;
}
r = r600_cs_parse(&parser);
if (r) {
DRM_ERROR("Invalid command stream !\n");
r600_cs_parser_fini(&parser, r);
return r;
}
r600_cs_parser_fini(&parser, r);
return r;
}
void r600_cs_legacy_init(void)
{
r600_nomm = 1;
}
#endif
/*
* DMA
*/

File diff suppressed because it is too large Load Diff

View File

@ -291,88 +291,6 @@ static struct pci_device_id pciidlist[] = {
MODULE_DEVICE_TABLE(pci, pciidlist);
#ifdef CONFIG_DRM_RADEON_UMS
static int radeon_suspend(struct drm_device *dev, pm_message_t state)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
return 0;
/* Disable *all* interrupts */
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS600)
RADEON_WRITE(R500_DxMODE_INT_MASK, 0);
RADEON_WRITE(RADEON_GEN_INT_CNTL, 0);
return 0;
}
static int radeon_resume(struct drm_device *dev)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
return 0;
/* Restore interrupt registers */
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS600)
RADEON_WRITE(R500_DxMODE_INT_MASK, dev_priv->r500_disp_irq_reg);
RADEON_WRITE(RADEON_GEN_INT_CNTL, dev_priv->irq_enable_reg);
return 0;
}
static const struct file_operations radeon_driver_old_fops = {
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
.unlocked_ioctl = drm_ioctl,
.mmap = drm_legacy_mmap,
.poll = drm_poll,
.read = drm_read,
#ifdef CONFIG_COMPAT
.compat_ioctl = radeon_compat_ioctl,
#endif
.llseek = noop_llseek,
};
static struct drm_driver driver_old = {
.driver_features =
DRIVER_USE_AGP | DRIVER_PCI_DMA | DRIVER_SG |
DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED,
.dev_priv_size = sizeof(drm_radeon_buf_priv_t),
.load = radeon_driver_load,
.firstopen = radeon_driver_firstopen,
.open = radeon_driver_open,
.preclose = radeon_driver_preclose,
.postclose = radeon_driver_postclose,
.lastclose = radeon_driver_lastclose,
.set_busid = drm_pci_set_busid,
.unload = radeon_driver_unload,
.suspend = radeon_suspend,
.resume = radeon_resume,
.get_vblank_counter = radeon_get_vblank_counter,
.enable_vblank = radeon_enable_vblank,
.disable_vblank = radeon_disable_vblank,
.master_create = radeon_master_create,
.master_destroy = radeon_master_destroy,
.irq_preinstall = radeon_driver_irq_preinstall,
.irq_postinstall = radeon_driver_irq_postinstall,
.irq_uninstall = radeon_driver_irq_uninstall,
.irq_handler = radeon_driver_irq_handler,
.ioctls = radeon_ioctls,
.dma_ioctl = radeon_cp_buffers,
.fops = &radeon_driver_old_fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL,
};
#endif
static struct drm_driver kms_driver;
static int radeon_kick_out_firmware_fb(struct pci_dev *pdev)
@ -619,13 +537,6 @@ static struct drm_driver kms_driver = {
static struct drm_driver *driver;
static struct pci_driver *pdriver;
#ifdef CONFIG_DRM_RADEON_UMS
static struct pci_driver radeon_pci_driver = {
.name = DRIVER_NAME,
.id_table = pciidlist,
};
#endif
static struct pci_driver radeon_kms_pci_driver = {
.name = DRIVER_NAME,
.id_table = pciidlist,
@ -655,16 +566,8 @@ static int __init radeon_init(void)
radeon_register_atpx_handler();
} else {
#ifdef CONFIG_DRM_RADEON_UMS
DRM_INFO("radeon userspace modesetting enabled.\n");
driver = &driver_old;
pdriver = &radeon_pci_driver;
driver->driver_features &= ~DRIVER_MODESET;
driver->num_ioctls = radeon_max_ioctl;
#else
DRM_ERROR("No UMS support in radeon module!\n");
return -EINVAL;
#endif
}
radeon_kfd_init();

File diff suppressed because it is too large Load Diff

View File

@ -1,402 +0,0 @@
/* radeon_irq.c -- IRQ handling for radeon -*- linux-c -*- */
/*
* Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
*
* The Weather Channel (TM) funded Tungsten Graphics to develop the
* initial release of the Radeon 8500 driver under the XFree86 license.
* This notice must be preserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors:
* Keith Whitwell <keith@tungstengraphics.com>
* Michel D<EFBFBD>zer <michel@daenzer.net>
*
* ------------------------ This file is DEPRECATED! -------------------------
*/
#include <drm/drmP.h>
#include <drm/radeon_drm.h>
#include "radeon_drv.h"
void radeon_irq_set_state(struct drm_device *dev, u32 mask, int state)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
if (state)
dev_priv->irq_enable_reg |= mask;
else
dev_priv->irq_enable_reg &= ~mask;
if (dev->irq_enabled)
RADEON_WRITE(RADEON_GEN_INT_CNTL, dev_priv->irq_enable_reg);
}
static void r500_vbl_irq_set_state(struct drm_device *dev, u32 mask, int state)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
if (state)
dev_priv->r500_disp_irq_reg |= mask;
else
dev_priv->r500_disp_irq_reg &= ~mask;
if (dev->irq_enabled)
RADEON_WRITE(R500_DxMODE_INT_MASK, dev_priv->r500_disp_irq_reg);
}
int radeon_enable_vblank(struct drm_device *dev, unsigned int pipe)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS600) {
switch (pipe) {
case 0:
r500_vbl_irq_set_state(dev, R500_D1MODE_INT_MASK, 1);
break;
case 1:
r500_vbl_irq_set_state(dev, R500_D2MODE_INT_MASK, 1);
break;
default:
DRM_ERROR("tried to enable vblank on non-existent crtc %u\n",
pipe);
return -EINVAL;
}
} else {
switch (pipe) {
case 0:
radeon_irq_set_state(dev, RADEON_CRTC_VBLANK_MASK, 1);
break;
case 1:
radeon_irq_set_state(dev, RADEON_CRTC2_VBLANK_MASK, 1);
break;
default:
DRM_ERROR("tried to enable vblank on non-existent crtc %u\n",
pipe);
return -EINVAL;
}
}
return 0;
}
void radeon_disable_vblank(struct drm_device *dev, unsigned int pipe)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS600) {
switch (pipe) {
case 0:
r500_vbl_irq_set_state(dev, R500_D1MODE_INT_MASK, 0);
break;
case 1:
r500_vbl_irq_set_state(dev, R500_D2MODE_INT_MASK, 0);
break;
default:
DRM_ERROR("tried to enable vblank on non-existent crtc %u\n",
pipe);
break;
}
} else {
switch (pipe) {
case 0:
radeon_irq_set_state(dev, RADEON_CRTC_VBLANK_MASK, 0);
break;
case 1:
radeon_irq_set_state(dev, RADEON_CRTC2_VBLANK_MASK, 0);
break;
default:
DRM_ERROR("tried to enable vblank on non-existent crtc %u\n",
pipe);
break;
}
}
}
static u32 radeon_acknowledge_irqs(drm_radeon_private_t *dev_priv, u32 *r500_disp_int)
{
u32 irqs = RADEON_READ(RADEON_GEN_INT_STATUS);
u32 irq_mask = RADEON_SW_INT_TEST;
*r500_disp_int = 0;
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS600) {
/* vbl interrupts in a different place */
if (irqs & R500_DISPLAY_INT_STATUS) {
/* if a display interrupt */
u32 disp_irq;
disp_irq = RADEON_READ(R500_DISP_INTERRUPT_STATUS);
*r500_disp_int = disp_irq;
if (disp_irq & R500_D1_VBLANK_INTERRUPT)
RADEON_WRITE(R500_D1MODE_VBLANK_STATUS, R500_VBLANK_ACK);
if (disp_irq & R500_D2_VBLANK_INTERRUPT)
RADEON_WRITE(R500_D2MODE_VBLANK_STATUS, R500_VBLANK_ACK);
}
irq_mask |= R500_DISPLAY_INT_STATUS;
} else
irq_mask |= RADEON_CRTC_VBLANK_STAT | RADEON_CRTC2_VBLANK_STAT;
irqs &= irq_mask;
if (irqs)
RADEON_WRITE(RADEON_GEN_INT_STATUS, irqs);
return irqs;
}
/* Interrupts - Used for device synchronization and flushing in the
* following circumstances:
*
* - Exclusive FB access with hw idle:
* - Wait for GUI Idle (?) interrupt, then do normal flush.
*
* - Frame throttling, NV_fence:
* - Drop marker irq's into command stream ahead of time.
* - Wait on irq's with lock *not held*
* - Check each for termination condition
*
* - Internally in cp_getbuffer, etc:
* - as above, but wait with lock held???
*
* NOTE: These functions are misleadingly named -- the irq's aren't
* tied to dma at all, this is just a hangover from dri prehistory.
*/
irqreturn_t radeon_driver_irq_handler(int irq, void *arg)
{
struct drm_device *dev = (struct drm_device *) arg;
drm_radeon_private_t *dev_priv =
(drm_radeon_private_t *) dev->dev_private;
u32 stat;
u32 r500_disp_int;
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
return IRQ_NONE;
/* Only consider the bits we're interested in - others could be used
* outside the DRM
*/
stat = radeon_acknowledge_irqs(dev_priv, &r500_disp_int);
if (!stat)
return IRQ_NONE;
stat &= dev_priv->irq_enable_reg;
/* SW interrupt */
if (stat & RADEON_SW_INT_TEST)
wake_up(&dev_priv->swi_queue);
/* VBLANK interrupt */
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS600) {
if (r500_disp_int & R500_D1_VBLANK_INTERRUPT)
drm_handle_vblank(dev, 0);
if (r500_disp_int & R500_D2_VBLANK_INTERRUPT)
drm_handle_vblank(dev, 1);
} else {
if (stat & RADEON_CRTC_VBLANK_STAT)
drm_handle_vblank(dev, 0);
if (stat & RADEON_CRTC2_VBLANK_STAT)
drm_handle_vblank(dev, 1);
}
return IRQ_HANDLED;
}
static int radeon_emit_irq(struct drm_device * dev)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
unsigned int ret;
RING_LOCALS;
atomic_inc(&dev_priv->swi_emitted);
ret = atomic_read(&dev_priv->swi_emitted);
BEGIN_RING(4);
OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
OUT_RING_REG(RADEON_GEN_INT_STATUS, RADEON_SW_INT_FIRE);
ADVANCE_RING();
COMMIT_RING();
return ret;
}
static int radeon_wait_irq(struct drm_device * dev, int swi_nr)
{
drm_radeon_private_t *dev_priv =
(drm_radeon_private_t *) dev->dev_private;
int ret = 0;
if (RADEON_READ(RADEON_LAST_SWI_REG) >= swi_nr)
return 0;
dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
DRM_WAIT_ON(ret, dev_priv->swi_queue, 3 * HZ,
RADEON_READ(RADEON_LAST_SWI_REG) >= swi_nr);
return ret;
}
u32 radeon_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
if (!dev_priv) {
DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
if (pipe > 1) {
DRM_ERROR("Invalid crtc %u\n", pipe);
return -EINVAL;
}
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS600) {
if (pipe == 0)
return RADEON_READ(R500_D1CRTC_FRAME_COUNT);
else
return RADEON_READ(R500_D2CRTC_FRAME_COUNT);
} else {
if (pipe == 0)
return RADEON_READ(RADEON_CRTC_CRNT_FRAME);
else
return RADEON_READ(RADEON_CRTC2_CRNT_FRAME);
}
}
/* Needs the lock as it touches the ring.
*/
int radeon_irq_emit(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
drm_radeon_irq_emit_t *emit = data;
int result;
if (!dev_priv) {
DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
return -EINVAL;
LOCK_TEST_WITH_RETURN(dev, file_priv);
result = radeon_emit_irq(dev);
if (copy_to_user(emit->irq_seq, &result, sizeof(int))) {
DRM_ERROR("copy_to_user\n");
return -EFAULT;
}
return 0;
}
/* Doesn't need the hardware lock.
*/
int radeon_irq_wait(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
drm_radeon_irq_wait_t *irqwait = data;
if (!dev_priv) {
DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
return -EINVAL;
return radeon_wait_irq(dev, irqwait->irq_seq);
}
/* drm_dma.h hooks
*/
void radeon_driver_irq_preinstall(struct drm_device * dev)
{
drm_radeon_private_t *dev_priv =
(drm_radeon_private_t *) dev->dev_private;
u32 dummy;
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
return;
/* Disable *all* interrupts */
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS600)
RADEON_WRITE(R500_DxMODE_INT_MASK, 0);
RADEON_WRITE(RADEON_GEN_INT_CNTL, 0);
/* Clear bits if they're already high */
radeon_acknowledge_irqs(dev_priv, &dummy);
}
int radeon_driver_irq_postinstall(struct drm_device *dev)
{
drm_radeon_private_t *dev_priv =
(drm_radeon_private_t *) dev->dev_private;
atomic_set(&dev_priv->swi_emitted, 0);
init_waitqueue_head(&dev_priv->swi_queue);
dev->max_vblank_count = 0x001fffff;
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
return 0;
radeon_irq_set_state(dev, RADEON_SW_INT_ENABLE, 1);
return 0;
}
void radeon_driver_irq_uninstall(struct drm_device * dev)
{
drm_radeon_private_t *dev_priv =
(drm_radeon_private_t *) dev->dev_private;
if (!dev_priv)
return;
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
return;
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS600)
RADEON_WRITE(R500_DxMODE_INT_MASK, 0);
/* Disable *all* interrupts */
RADEON_WRITE(RADEON_GEN_INT_CNTL, 0);
}
int radeon_vblank_crtc_get(struct drm_device *dev)
{
drm_radeon_private_t *dev_priv = (drm_radeon_private_t *) dev->dev_private;
return dev_priv->vblank_crtc;
}
int radeon_vblank_crtc_set(struct drm_device *dev, int64_t value)
{
drm_radeon_private_t *dev_priv = (drm_radeon_private_t *) dev->dev_private;
if (value & ~(DRM_RADEON_VBLANK_CRTC1 | DRM_RADEON_VBLANK_CRTC2)) {
DRM_ERROR("called with invalid crtc 0x%x\n", (unsigned int)value);
return -EINVAL;
}
dev_priv->vblank_crtc = (unsigned int)value;
return 0;
}

View File

@ -1,302 +0,0 @@
/* radeon_mem.c -- Simple GART/fb memory manager for radeon -*- linux-c -*- */
/*
* Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
*
* The Weather Channel (TM) funded Tungsten Graphics to develop the
* initial release of the Radeon 8500 driver under the XFree86 license.
* This notice must be preserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors:
* Keith Whitwell <keith@tungstengraphics.com>
*
* ------------------------ This file is DEPRECATED! -------------------------
*/
#include <drm/drmP.h>
#include <drm/radeon_drm.h>
#include "radeon_drv.h"
/* Very simple allocator for GART memory, working on a static range
* already mapped into each client's address space.
*/
static struct mem_block *split_block(struct mem_block *p, int start, int size,
struct drm_file *file_priv)
{
/* Maybe cut off the start of an existing block */
if (start > p->start) {
struct mem_block *newblock = kmalloc(sizeof(*newblock),
GFP_KERNEL);
if (!newblock)
goto out;
newblock->start = start;
newblock->size = p->size - (start - p->start);
newblock->file_priv = NULL;
newblock->next = p->next;
newblock->prev = p;
p->next->prev = newblock;
p->next = newblock;
p->size -= newblock->size;
p = newblock;
}
/* Maybe cut off the end of an existing block */
if (size < p->size) {
struct mem_block *newblock = kmalloc(sizeof(*newblock),
GFP_KERNEL);
if (!newblock)
goto out;
newblock->start = start + size;
newblock->size = p->size - size;
newblock->file_priv = NULL;
newblock->next = p->next;
newblock->prev = p;
p->next->prev = newblock;
p->next = newblock;
p->size = size;
}
out:
/* Our block is in the middle */
p->file_priv = file_priv;
return p;
}
static struct mem_block *alloc_block(struct mem_block *heap, int size,
int align2, struct drm_file *file_priv)
{
struct mem_block *p;
int mask = (1 << align2) - 1;
list_for_each(p, heap) {
int start = (p->start + mask) & ~mask;
if (p->file_priv == NULL && start + size <= p->start + p->size)
return split_block(p, start, size, file_priv);
}
return NULL;
}
static struct mem_block *find_block(struct mem_block *heap, int start)
{
struct mem_block *p;
list_for_each(p, heap)
if (p->start == start)
return p;
return NULL;
}
static void free_block(struct mem_block *p)
{
p->file_priv = NULL;
/* Assumes a single contiguous range. Needs a special file_priv in
* 'heap' to stop it being subsumed.
*/
if (p->next->file_priv == NULL) {
struct mem_block *q = p->next;
p->size += q->size;
p->next = q->next;
p->next->prev = p;
kfree(q);
}
if (p->prev->file_priv == NULL) {
struct mem_block *q = p->prev;
q->size += p->size;
q->next = p->next;
q->next->prev = q;
kfree(p);
}
}
/* Initialize. How to check for an uninitialized heap?
*/
static int init_heap(struct mem_block **heap, int start, int size)
{
struct mem_block *blocks = kmalloc(sizeof(*blocks), GFP_KERNEL);
if (!blocks)
return -ENOMEM;
*heap = kzalloc(sizeof(**heap), GFP_KERNEL);
if (!*heap) {
kfree(blocks);
return -ENOMEM;
}
blocks->start = start;
blocks->size = size;
blocks->file_priv = NULL;
blocks->next = blocks->prev = *heap;
(*heap)->file_priv = (struct drm_file *) - 1;
(*heap)->next = (*heap)->prev = blocks;
return 0;
}
/* Free all blocks associated with the releasing file.
*/
void radeon_mem_release(struct drm_file *file_priv, struct mem_block *heap)
{
struct mem_block *p;
if (!heap || !heap->next)
return;
list_for_each(p, heap) {
if (p->file_priv == file_priv)
p->file_priv = NULL;
}
/* Assumes a single contiguous range. Needs a special file_priv in
* 'heap' to stop it being subsumed.
*/
list_for_each(p, heap) {
while (p->file_priv == NULL && p->next->file_priv == NULL) {
struct mem_block *q = p->next;
p->size += q->size;
p->next = q->next;
p->next->prev = p;
kfree(q);
}
}
}
/* Shutdown.
*/
void radeon_mem_takedown(struct mem_block **heap)
{
struct mem_block *p;
if (!*heap)
return;
for (p = (*heap)->next; p != *heap;) {
struct mem_block *q = p;
p = p->next;
kfree(q);
}
kfree(*heap);
*heap = NULL;
}
/* IOCTL HANDLERS */
static struct mem_block **get_heap(drm_radeon_private_t * dev_priv, int region)
{
switch (region) {
case RADEON_MEM_REGION_GART:
return &dev_priv->gart_heap;
case RADEON_MEM_REGION_FB:
return &dev_priv->fb_heap;
default:
return NULL;
}
}
int radeon_mem_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
drm_radeon_mem_alloc_t *alloc = data;
struct mem_block *block, **heap;
if (!dev_priv) {
DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
heap = get_heap(dev_priv, alloc->region);
if (!heap || !*heap)
return -EFAULT;
/* Make things easier on ourselves: all allocations at least
* 4k aligned.
*/
if (alloc->alignment < 12)
alloc->alignment = 12;
block = alloc_block(*heap, alloc->size, alloc->alignment, file_priv);
if (!block)
return -ENOMEM;
if (copy_to_user(alloc->region_offset, &block->start,
sizeof(int))) {
DRM_ERROR("copy_to_user\n");
return -EFAULT;
}
return 0;
}
int radeon_mem_free(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
drm_radeon_mem_free_t *memfree = data;
struct mem_block *block, **heap;
if (!dev_priv) {
DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
heap = get_heap(dev_priv, memfree->region);
if (!heap || !*heap)
return -EFAULT;
block = find_block(*heap, memfree->region_offset);
if (!block)
return -EFAULT;
if (block->file_priv != file_priv)
return -EPERM;
free_block(block);
return 0;
}
int radeon_mem_init_heap(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
drm_radeon_mem_init_heap_t *initheap = data;
struct mem_block **heap;
if (!dev_priv) {
DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
heap = get_heap(dev_priv, initheap->region);
if (!heap)
return -EFAULT;
if (*heap) {
DRM_ERROR("heap already initialized?");
return -EFAULT;
}
return init_heap(heap, initheap->start, initheap->size);
}

File diff suppressed because it is too large Load Diff