Merge branch 'drm-forlinus' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6
This commit is contained in:
commit
37ef4399a6
|
@ -3,7 +3,7 @@
|
|||
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
|
||||
|
||||
drm-objs := drm_auth.o drm_bufs.o drm_context.o drm_dma.o drm_drawable.o \
|
||||
drm_drv.o drm_fops.o drm_init.o drm_ioctl.o drm_irq.o \
|
||||
drm_drv.o drm_fops.o drm_ioctl.o drm_irq.o \
|
||||
drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \
|
||||
drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \
|
||||
drm_sysfs.o
|
||||
|
@ -18,7 +18,7 @@ radeon-objs := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o radeon_irq.o
|
|||
ffb-objs := ffb_drv.o ffb_context.o
|
||||
sis-objs := sis_drv.o sis_ds.o sis_mm.o
|
||||
savage-objs := savage_drv.o savage_bci.o savage_state.o
|
||||
via-objs := via_irq.o via_drv.o via_ds.o via_map.o via_mm.o via_dma.o via_verifier.o via_video.o
|
||||
via-objs := via_irq.o via_drv.o via_ds.o via_map.o via_mm.o via_dma.o via_verifier.o via_video.o via_dmablit.o
|
||||
|
||||
ifeq ($(CONFIG_COMPAT),y)
|
||||
drm-objs += drm_ioc32.o
|
||||
|
|
|
@ -52,7 +52,7 @@
|
|||
# define ATI_MAX_PCIGART_PAGES 8192 /**< 32 MB aperture, 4K pages */
|
||||
# define ATI_PCIGART_PAGE_SIZE 4096 /**< PCI GART page size */
|
||||
|
||||
static unsigned long drm_ati_alloc_pcigart_table(void)
|
||||
static void *drm_ati_alloc_pcigart_table(void)
|
||||
{
|
||||
unsigned long address;
|
||||
struct page *page;
|
||||
|
@ -72,27 +72,26 @@ static unsigned long drm_ati_alloc_pcigart_table(void)
|
|||
}
|
||||
|
||||
DRM_DEBUG("%s: returning 0x%08lx\n", __FUNCTION__, address);
|
||||
return address;
|
||||
return (void *)address;
|
||||
}
|
||||
|
||||
static void drm_ati_free_pcigart_table(unsigned long address)
|
||||
static void drm_ati_free_pcigart_table(void *address)
|
||||
{
|
||||
struct page *page;
|
||||
int i;
|
||||
DRM_DEBUG("%s\n", __FUNCTION__);
|
||||
|
||||
page = virt_to_page(address);
|
||||
page = virt_to_page((unsigned long)address);
|
||||
|
||||
for (i = 0; i < ATI_PCIGART_TABLE_PAGES; i++, page++) {
|
||||
__put_page(page);
|
||||
ClearPageReserved(page);
|
||||
}
|
||||
|
||||
free_pages(address, ATI_PCIGART_TABLE_ORDER);
|
||||
free_pages((unsigned long)address, ATI_PCIGART_TABLE_ORDER);
|
||||
}
|
||||
|
||||
int drm_ati_pcigart_cleanup(drm_device_t * dev,
|
||||
drm_ati_pcigart_info * gart_info)
|
||||
int drm_ati_pcigart_cleanup(drm_device_t *dev, drm_ati_pcigart_info *gart_info)
|
||||
{
|
||||
drm_sg_mem_t *entry = dev->sg;
|
||||
unsigned long pages;
|
||||
|
@ -136,10 +135,10 @@ int drm_ati_pcigart_cleanup(drm_device_t * dev,
|
|||
|
||||
EXPORT_SYMBOL(drm_ati_pcigart_cleanup);
|
||||
|
||||
int drm_ati_pcigart_init(drm_device_t * dev, drm_ati_pcigart_info * gart_info)
|
||||
int drm_ati_pcigart_init(drm_device_t *dev, drm_ati_pcigart_info *gart_info)
|
||||
{
|
||||
drm_sg_mem_t *entry = dev->sg;
|
||||
unsigned long address = 0;
|
||||
void *address = NULL;
|
||||
unsigned long pages;
|
||||
u32 *pci_gart, page_base, bus_address = 0;
|
||||
int i, j, ret = 0;
|
||||
|
@ -163,7 +162,7 @@ int drm_ati_pcigart_init(drm_device_t * dev, drm_ati_pcigart_info * gart_info)
|
|||
goto done;
|
||||
}
|
||||
|
||||
bus_address = pci_map_single(dev->pdev, (void *)address,
|
||||
bus_address = pci_map_single(dev->pdev, address,
|
||||
ATI_PCIGART_TABLE_PAGES *
|
||||
PAGE_SIZE, PCI_DMA_TODEVICE);
|
||||
if (bus_address == 0) {
|
||||
|
@ -176,7 +175,7 @@ int drm_ati_pcigart_init(drm_device_t * dev, drm_ati_pcigart_info * gart_info)
|
|||
address = gart_info->addr;
|
||||
bus_address = gart_info->bus_addr;
|
||||
DRM_DEBUG("PCI: Gart Table: VRAM %08X mapped at %08lX\n",
|
||||
bus_address, address);
|
||||
bus_address, (unsigned long)address);
|
||||
}
|
||||
|
||||
pci_gart = (u32 *) address;
|
||||
|
@ -195,7 +194,7 @@ int drm_ati_pcigart_init(drm_device_t * dev, drm_ati_pcigart_info * gart_info)
|
|||
if (entry->busaddr[i] == 0) {
|
||||
DRM_ERROR("unable to map PCIGART pages!\n");
|
||||
drm_ati_pcigart_cleanup(dev, gart_info);
|
||||
address = 0;
|
||||
address = NULL;
|
||||
bus_address = 0;
|
||||
goto done;
|
||||
}
|
||||
|
|
|
@ -90,8 +90,8 @@
|
|||
#define DRM_MAX_ORDER 22 /**< Up to 2^22 bytes = 4MB */
|
||||
#define DRM_RAM_PERCENT 10 /**< How much system ram can we lock? */
|
||||
|
||||
#define _DRM_LOCK_HELD 0x80000000 /**< Hardware lock is held */
|
||||
#define _DRM_LOCK_CONT 0x40000000 /**< Hardware lock is contended */
|
||||
#define _DRM_LOCK_HELD 0x80000000U /**< Hardware lock is held */
|
||||
#define _DRM_LOCK_CONT 0x40000000U /**< Hardware lock is contended */
|
||||
#define _DRM_LOCK_IS_HELD(lock) ((lock) & _DRM_LOCK_HELD)
|
||||
#define _DRM_LOCK_IS_CONT(lock) ((lock) & _DRM_LOCK_CONT)
|
||||
#define _DRM_LOCKING_CONTEXT(lock) ((lock) & ~(_DRM_LOCK_HELD|_DRM_LOCK_CONT))
|
||||
|
|
|
@ -144,20 +144,6 @@
|
|||
/** \name Backward compatibility section */
|
||||
/*@{*/
|
||||
|
||||
#ifndef MODULE_LICENSE
|
||||
#define MODULE_LICENSE(x)
|
||||
#endif
|
||||
|
||||
#ifndef preempt_disable
|
||||
#define preempt_disable()
|
||||
#define preempt_enable()
|
||||
#endif
|
||||
|
||||
#ifndef pte_offset_map
|
||||
#define pte_offset_map pte_offset
|
||||
#define pte_unmap(pte)
|
||||
#endif
|
||||
|
||||
#define DRM_RPR_ARG(vma) vma,
|
||||
|
||||
#define VM_OFFSET(vma) ((vma)->vm_pgoff << PAGE_SHIFT)
|
||||
|
@ -286,10 +272,13 @@ typedef int drm_ioctl_t(struct inode *inode, struct file *filp,
|
|||
typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
|
||||
unsigned long arg);
|
||||
|
||||
#define DRM_AUTH 0x1
|
||||
#define DRM_MASTER 0x2
|
||||
#define DRM_ROOT_ONLY 0x4
|
||||
|
||||
typedef struct drm_ioctl_desc {
|
||||
drm_ioctl_t *func;
|
||||
int auth_needed;
|
||||
int root_only;
|
||||
int flags;
|
||||
} drm_ioctl_desc_t;
|
||||
|
||||
typedef struct drm_devstate {
|
||||
|
@ -384,6 +373,7 @@ typedef struct drm_buf_entry {
|
|||
/** File private data */
|
||||
typedef struct drm_file {
|
||||
int authenticated;
|
||||
int master;
|
||||
int minor;
|
||||
pid_t pid;
|
||||
uid_t uid;
|
||||
|
@ -532,8 +522,9 @@ typedef struct drm_vbl_sig {
|
|||
typedef struct ati_pcigart_info {
|
||||
int gart_table_location;
|
||||
int is_pcie;
|
||||
unsigned long addr;
|
||||
void *addr;
|
||||
dma_addr_t bus_addr;
|
||||
drm_local_map_t mapping;
|
||||
} drm_ati_pcigart_info;
|
||||
|
||||
/**
|
||||
|
@ -544,16 +535,14 @@ typedef struct ati_pcigart_info {
|
|||
struct drm_device;
|
||||
|
||||
struct drm_driver {
|
||||
int (*preinit) (struct drm_device *, unsigned long flags);
|
||||
void (*prerelease) (struct drm_device *, struct file * filp);
|
||||
void (*pretakedown) (struct drm_device *);
|
||||
int (*postcleanup) (struct drm_device *);
|
||||
int (*presetup) (struct drm_device *);
|
||||
int (*postsetup) (struct drm_device *);
|
||||
int (*load) (struct drm_device *, unsigned long flags);
|
||||
int (*firstopen) (struct drm_device *);
|
||||
int (*open) (struct drm_device *, drm_file_t *);
|
||||
void (*preclose) (struct drm_device *, struct file * filp);
|
||||
void (*postclose) (struct drm_device *, drm_file_t *);
|
||||
void (*lastclose) (struct drm_device *);
|
||||
int (*unload) (struct drm_device *);
|
||||
int (*dma_ioctl) (DRM_IOCTL_ARGS);
|
||||
int (*open_helper) (struct drm_device *, drm_file_t *);
|
||||
void (*free_filp_priv) (struct drm_device *, drm_file_t *);
|
||||
void (*release) (struct drm_device *, struct file * filp);
|
||||
void (*dma_ready) (struct drm_device *);
|
||||
int (*dma_quiescent) (struct drm_device *);
|
||||
int (*context_ctor) (struct drm_device * dev, int context);
|
||||
|
@ -561,8 +550,9 @@ struct drm_driver {
|
|||
int (*kernel_context_switch) (struct drm_device * dev, int old,
|
||||
int new);
|
||||
void (*kernel_context_switch_unlock) (struct drm_device * dev,
|
||||
drm_lock_t * lock);
|
||||
drm_lock_t *lock);
|
||||
int (*vblank_wait) (struct drm_device * dev, unsigned int *sequence);
|
||||
int (*dri_library_name) (struct drm_device *dev, char *buf);
|
||||
|
||||
/**
|
||||
* Called by \c drm_device_is_agp. Typically used to determine if a
|
||||
|
@ -579,16 +569,24 @@ struct drm_driver {
|
|||
|
||||
/* these have to be filled in */
|
||||
|
||||
int (*postinit) (struct drm_device *, unsigned long flags);
|
||||
irqreturn_t(*irq_handler) (DRM_IRQ_ARGS);
|
||||
irqreturn_t(*irq_handler) (DRM_IRQ_ARGS);
|
||||
void (*irq_preinstall) (struct drm_device * dev);
|
||||
void (*irq_postinstall) (struct drm_device * dev);
|
||||
void (*irq_uninstall) (struct drm_device * dev);
|
||||
void (*reclaim_buffers) (struct drm_device * dev, struct file * filp);
|
||||
void (*reclaim_buffers_locked) (struct drm_device *dev,
|
||||
struct file *filp);
|
||||
unsigned long (*get_map_ofs) (drm_map_t * map);
|
||||
unsigned long (*get_reg_ofs) (struct drm_device * dev);
|
||||
void (*set_version) (struct drm_device * dev, drm_set_version_t * sv);
|
||||
int (*version) (drm_version_t * version);
|
||||
|
||||
int major;
|
||||
int minor;
|
||||
int patchlevel;
|
||||
char *name;
|
||||
char *desc;
|
||||
char *date;
|
||||
|
||||
u32 driver_features;
|
||||
int dev_priv_size;
|
||||
drm_ioctl_desc_t *ioctls;
|
||||
|
@ -752,19 +750,43 @@ static inline int drm_core_has_MTRR(struct drm_device *dev)
|
|||
{
|
||||
return drm_core_check_feature(dev, DRIVER_USE_MTRR);
|
||||
}
|
||||
|
||||
#define DRM_MTRR_WC MTRR_TYPE_WRCOMB
|
||||
|
||||
static inline int drm_mtrr_add(unsigned long offset, unsigned long size,
|
||||
unsigned int flags)
|
||||
{
|
||||
return mtrr_add(offset, size, flags, 1);
|
||||
}
|
||||
|
||||
static inline int drm_mtrr_del(int handle, unsigned long offset,
|
||||
unsigned long size, unsigned int flags)
|
||||
{
|
||||
return mtrr_del(handle, offset, size);
|
||||
}
|
||||
|
||||
#else
|
||||
#define drm_core_has_MTRR(dev) (0)
|
||||
|
||||
#define DRM_MTRR_WC 0
|
||||
|
||||
static inline int drm_mtrr_add(unsigned long offset, unsigned long size,
|
||||
unsigned int flags)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int drm_mtrr_del(int handle, unsigned long offset,
|
||||
unsigned long size, unsigned int flags)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
/******************************************************************/
|
||||
/** \name Internal function definitions */
|
||||
/*@{*/
|
||||
|
||||
/* Misc. support (drm_init.h) */
|
||||
extern int drm_flags;
|
||||
extern void drm_parse_options(char *s);
|
||||
extern int drm_cpu_valid(void);
|
||||
|
||||
/* Driver support (drm_drv.h) */
|
||||
extern int drm_init(struct drm_driver *driver);
|
||||
extern void drm_exit(struct drm_driver *driver);
|
||||
|
@ -772,12 +794,11 @@ extern int drm_ioctl(struct inode *inode, struct file *filp,
|
|||
unsigned int cmd, unsigned long arg);
|
||||
extern long drm_compat_ioctl(struct file *filp,
|
||||
unsigned int cmd, unsigned long arg);
|
||||
extern int drm_takedown(drm_device_t * dev);
|
||||
extern int drm_lastclose(drm_device_t *dev);
|
||||
|
||||
/* Device support (drm_fops.h) */
|
||||
extern int drm_open(struct inode *inode, struct file *filp);
|
||||
extern int drm_stub_open(struct inode *inode, struct file *filp);
|
||||
extern int drm_flush(struct file *filp);
|
||||
extern int drm_fasync(int fd, struct file *filp, int on);
|
||||
extern int drm_release(struct inode *inode, struct file *filp);
|
||||
|
||||
|
@ -819,6 +840,8 @@ extern int drm_getstats(struct inode *inode, struct file *filp,
|
|||
unsigned int cmd, unsigned long arg);
|
||||
extern int drm_setversion(struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg);
|
||||
extern int drm_noop(struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg);
|
||||
|
||||
/* Context IOCTL support (drm_context.h) */
|
||||
extern int drm_resctx(struct inode *inode, struct file *filp,
|
||||
|
@ -857,10 +880,6 @@ extern int drm_getmagic(struct inode *inode, struct file *filp,
|
|||
extern int drm_authmagic(struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg);
|
||||
|
||||
/* Placeholder for ioctls past */
|
||||
extern int drm_noop(struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg);
|
||||
|
||||
/* Locking IOCTL support (drm_lock.h) */
|
||||
extern int drm_lock(struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg);
|
||||
|
@ -873,6 +892,7 @@ extern int drm_lock_free(drm_device_t * dev,
|
|||
/* Buffer management support (drm_bufs.h) */
|
||||
extern int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request);
|
||||
extern int drm_addbufs_pci(drm_device_t * dev, drm_buf_desc_t * request);
|
||||
extern int drm_addbufs_fb(drm_device_t *dev, drm_buf_desc_t *request);
|
||||
extern int drm_addmap(drm_device_t * dev, unsigned int offset,
|
||||
unsigned int size, drm_map_type_t type,
|
||||
drm_map_flags_t flags, drm_local_map_t ** map_ptr);
|
||||
|
@ -908,8 +928,8 @@ extern void drm_core_reclaim_buffers(drm_device_t * dev, struct file *filp);
|
|||
/* IRQ support (drm_irq.h) */
|
||||
extern int drm_control(struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg);
|
||||
extern int drm_irq_uninstall(drm_device_t * dev);
|
||||
extern irqreturn_t drm_irq_handler(DRM_IRQ_ARGS);
|
||||
extern int drm_irq_uninstall(drm_device_t * dev);
|
||||
extern void drm_driver_irq_preinstall(drm_device_t * dev);
|
||||
extern void drm_driver_irq_postinstall(drm_device_t * dev);
|
||||
extern void drm_driver_irq_uninstall(drm_device_t * dev);
|
||||
|
@ -933,13 +953,17 @@ extern int drm_agp_enable_ioctl(struct inode *inode, struct file *filp,
|
|||
extern int drm_agp_info(drm_device_t * dev, drm_agp_info_t * info);
|
||||
extern int drm_agp_info_ioctl(struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg);
|
||||
extern int drm_agp_alloc(struct inode *inode, struct file *filp,
|
||||
extern int drm_agp_alloc(drm_device_t *dev, drm_agp_buffer_t *request);
|
||||
extern int drm_agp_alloc_ioctl(struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg);
|
||||
extern int drm_agp_free(struct inode *inode, struct file *filp,
|
||||
extern int drm_agp_free(drm_device_t *dev, drm_agp_buffer_t *request);
|
||||
extern int drm_agp_free_ioctl(struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg);
|
||||
extern int drm_agp_unbind(struct inode *inode, struct file *filp,
|
||||
extern int drm_agp_unbind(drm_device_t *dev, drm_agp_binding_t *request);
|
||||
extern int drm_agp_unbind_ioctl(struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg);
|
||||
extern int drm_agp_bind(struct inode *inode, struct file *filp,
|
||||
extern int drm_agp_bind(drm_device_t *dev, drm_agp_binding_t *request);
|
||||
extern int drm_agp_bind_ioctl(struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg);
|
||||
extern DRM_AGP_MEM *drm_agp_allocate_memory(struct agp_bridge_data *bridge,
|
||||
size_t pages, u32 type);
|
||||
|
@ -991,10 +1015,8 @@ extern struct drm_sysfs_class *drm_sysfs_create(struct module *owner,
|
|||
char *name);
|
||||
extern void drm_sysfs_destroy(struct drm_sysfs_class *cs);
|
||||
extern struct class_device *drm_sysfs_device_add(struct drm_sysfs_class *cs,
|
||||
dev_t dev,
|
||||
struct device *device,
|
||||
const char *fmt, ...);
|
||||
extern void drm_sysfs_device_remove(dev_t dev);
|
||||
drm_head_t *head);
|
||||
extern void drm_sysfs_device_remove(struct class_device *class_dev);
|
||||
|
||||
/* Inline replacements for DRM_IOREMAP macros */
|
||||
static __inline__ void drm_core_ioremap(struct drm_map *map,
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/**
|
||||
* \file drm_agpsupport.h
|
||||
* \file drm_agpsupport.c
|
||||
* DRM support for AGP/GART backend
|
||||
*
|
||||
* \author Rickard E. (Rik) Faith <faith@valinux.com>
|
||||
|
@ -91,7 +91,7 @@ int drm_agp_info_ioctl(struct inode *inode, struct file *filp,
|
|||
/**
|
||||
* Acquire the AGP device.
|
||||
*
|
||||
* \param dev DRM device that is to acquire AGP
|
||||
* \param dev DRM device that is to acquire AGP.
|
||||
* \return zero on success or a negative number on failure.
|
||||
*
|
||||
* Verifies the AGP device hasn't been acquired before and calls
|
||||
|
@ -134,7 +134,7 @@ int drm_agp_acquire_ioctl(struct inode *inode, struct file *filp,
|
|||
/**
|
||||
* Release the AGP device.
|
||||
*
|
||||
* \param dev DRM device that is to release AGP
|
||||
* \param dev DRM device that is to release AGP.
|
||||
* \return zero on success or a negative number on failure.
|
||||
*
|
||||
* Verifies the AGP device has been acquired and calls \c agp_backend_release.
|
||||
|
@ -147,7 +147,6 @@ int drm_agp_release(drm_device_t * dev)
|
|||
dev->agp->acquired = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(drm_agp_release);
|
||||
|
||||
int drm_agp_release_ioctl(struct inode *inode, struct file *filp,
|
||||
|
@ -208,30 +207,22 @@ int drm_agp_enable_ioctl(struct inode *inode, struct file *filp,
|
|||
* Verifies the AGP device is present and has been acquired, allocates the
|
||||
* memory via alloc_agp() and creates a drm_agp_mem entry for it.
|
||||
*/
|
||||
int drm_agp_alloc(struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg)
|
||||
int drm_agp_alloc(drm_device_t *dev, drm_agp_buffer_t *request)
|
||||
{
|
||||
drm_file_t *priv = filp->private_data;
|
||||
drm_device_t *dev = priv->head->dev;
|
||||
drm_agp_buffer_t request;
|
||||
drm_agp_mem_t *entry;
|
||||
DRM_AGP_MEM *memory;
|
||||
unsigned long pages;
|
||||
u32 type;
|
||||
drm_agp_buffer_t __user *argp = (void __user *)arg;
|
||||
|
||||
if (!dev->agp || !dev->agp->acquired)
|
||||
return -EINVAL;
|
||||
if (copy_from_user(&request, argp, sizeof(request)))
|
||||
return -EFAULT;
|
||||
if (!(entry = drm_alloc(sizeof(*entry), DRM_MEM_AGPLISTS)))
|
||||
return -ENOMEM;
|
||||
|
||||
memset(entry, 0, sizeof(*entry));
|
||||
|
||||
pages = (request.size + PAGE_SIZE - 1) / PAGE_SIZE;
|
||||
type = (u32) request.type;
|
||||
|
||||
pages = (request->size + PAGE_SIZE - 1) / PAGE_SIZE;
|
||||
type = (u32) request->type;
|
||||
if (!(memory = drm_alloc_agp(dev, pages, type))) {
|
||||
drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS);
|
||||
return -ENOMEM;
|
||||
|
@ -247,16 +238,39 @@ int drm_agp_alloc(struct inode *inode, struct file *filp,
|
|||
dev->agp->memory->prev = entry;
|
||||
dev->agp->memory = entry;
|
||||
|
||||
request.handle = entry->handle;
|
||||
request.physical = memory->physical;
|
||||
request->handle = entry->handle;
|
||||
request->physical = memory->physical;
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_agp_alloc);
|
||||
|
||||
int drm_agp_alloc_ioctl(struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
drm_file_t *priv = filp->private_data;
|
||||
drm_device_t *dev = priv->head->dev;
|
||||
drm_agp_buffer_t request;
|
||||
drm_agp_buffer_t __user *argp = (void __user *)arg;
|
||||
int err;
|
||||
|
||||
if (copy_from_user(&request, argp, sizeof(request)))
|
||||
return -EFAULT;
|
||||
|
||||
err = drm_agp_alloc(dev, &request);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (copy_to_user(argp, &request, sizeof(request))) {
|
||||
drm_agp_mem_t *entry = dev->agp->memory;
|
||||
|
||||
dev->agp->memory = entry->next;
|
||||
dev->agp->memory->prev = NULL;
|
||||
drm_free_agp(memory, pages);
|
||||
drm_free_agp(entry->memory, entry->pages);
|
||||
drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -293,21 +307,14 @@ static drm_agp_mem_t *drm_agp_lookup_entry(drm_device_t * dev,
|
|||
* Verifies the AGP device is present and acquired, looks-up the AGP memory
|
||||
* entry and passes it to the unbind_agp() function.
|
||||
*/
|
||||
int drm_agp_unbind(struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg)
|
||||
int drm_agp_unbind(drm_device_t *dev, drm_agp_binding_t *request)
|
||||
{
|
||||
drm_file_t *priv = filp->private_data;
|
||||
drm_device_t *dev = priv->head->dev;
|
||||
drm_agp_binding_t request;
|
||||
drm_agp_mem_t *entry;
|
||||
int ret;
|
||||
|
||||
if (!dev->agp || !dev->agp->acquired)
|
||||
return -EINVAL;
|
||||
if (copy_from_user
|
||||
(&request, (drm_agp_binding_t __user *) arg, sizeof(request)))
|
||||
return -EFAULT;
|
||||
if (!(entry = drm_agp_lookup_entry(dev, request.handle)))
|
||||
if (!(entry = drm_agp_lookup_entry(dev, request->handle)))
|
||||
return -EINVAL;
|
||||
if (!entry->bound)
|
||||
return -EINVAL;
|
||||
|
@ -316,6 +323,21 @@ int drm_agp_unbind(struct inode *inode, struct file *filp,
|
|||
entry->bound = 0;
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_agp_unbind);
|
||||
|
||||
int drm_agp_unbind_ioctl(struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
drm_file_t *priv = filp->private_data;
|
||||
drm_device_t *dev = priv->head->dev;
|
||||
drm_agp_binding_t request;
|
||||
|
||||
if (copy_from_user
|
||||
(&request, (drm_agp_binding_t __user *) arg, sizeof(request)))
|
||||
return -EFAULT;
|
||||
|
||||
return drm_agp_unbind(dev, &request);
|
||||
}
|
||||
|
||||
/**
|
||||
* Bind AGP memory into the GATT (ioctl)
|
||||
|
@ -330,26 +352,19 @@ int drm_agp_unbind(struct inode *inode, struct file *filp,
|
|||
* is currently bound into the GATT. Looks-up the AGP memory entry and passes
|
||||
* it to bind_agp() function.
|
||||
*/
|
||||
int drm_agp_bind(struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg)
|
||||
int drm_agp_bind(drm_device_t *dev, drm_agp_binding_t *request)
|
||||
{
|
||||
drm_file_t *priv = filp->private_data;
|
||||
drm_device_t *dev = priv->head->dev;
|
||||
drm_agp_binding_t request;
|
||||
drm_agp_mem_t *entry;
|
||||
int retcode;
|
||||
int page;
|
||||
|
||||
if (!dev->agp || !dev->agp->acquired)
|
||||
return -EINVAL;
|
||||
if (copy_from_user
|
||||
(&request, (drm_agp_binding_t __user *) arg, sizeof(request)))
|
||||
return -EFAULT;
|
||||
if (!(entry = drm_agp_lookup_entry(dev, request.handle)))
|
||||
if (!(entry = drm_agp_lookup_entry(dev, request->handle)))
|
||||
return -EINVAL;
|
||||
if (entry->bound)
|
||||
return -EINVAL;
|
||||
page = (request.offset + PAGE_SIZE - 1) / PAGE_SIZE;
|
||||
page = (request->offset + PAGE_SIZE - 1) / PAGE_SIZE;
|
||||
if ((retcode = drm_bind_agp(entry->memory, page)))
|
||||
return retcode;
|
||||
entry->bound = dev->agp->base + (page << PAGE_SHIFT);
|
||||
|
@ -357,6 +372,21 @@ int drm_agp_bind(struct inode *inode, struct file *filp,
|
|||
dev->agp->base, entry->bound);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_agp_bind);
|
||||
|
||||
int drm_agp_bind_ioctl(struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
drm_file_t *priv = filp->private_data;
|
||||
drm_device_t *dev = priv->head->dev;
|
||||
drm_agp_binding_t request;
|
||||
|
||||
if (copy_from_user
|
||||
(&request, (drm_agp_binding_t __user *) arg, sizeof(request)))
|
||||
return -EFAULT;
|
||||
|
||||
return drm_agp_bind(dev, &request);
|
||||
}
|
||||
|
||||
/**
|
||||
* Free AGP memory (ioctl).
|
||||
|
@ -372,20 +402,13 @@ int drm_agp_bind(struct inode *inode, struct file *filp,
|
|||
* unbind_agp(). Frees it via free_agp() as well as the entry itself
|
||||
* and unlinks from the doubly linked list it's inserted in.
|
||||
*/
|
||||
int drm_agp_free(struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg)
|
||||
int drm_agp_free(drm_device_t *dev, drm_agp_buffer_t *request)
|
||||
{
|
||||
drm_file_t *priv = filp->private_data;
|
||||
drm_device_t *dev = priv->head->dev;
|
||||
drm_agp_buffer_t request;
|
||||
drm_agp_mem_t *entry;
|
||||
|
||||
if (!dev->agp || !dev->agp->acquired)
|
||||
return -EINVAL;
|
||||
if (copy_from_user
|
||||
(&request, (drm_agp_buffer_t __user *) arg, sizeof(request)))
|
||||
return -EFAULT;
|
||||
if (!(entry = drm_agp_lookup_entry(dev, request.handle)))
|
||||
if (!(entry = drm_agp_lookup_entry(dev, request->handle)))
|
||||
return -EINVAL;
|
||||
if (entry->bound)
|
||||
drm_unbind_agp(entry->memory);
|
||||
|
@ -402,12 +425,30 @@ int drm_agp_free(struct inode *inode, struct file *filp,
|
|||
drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_agp_free);
|
||||
|
||||
int drm_agp_free_ioctl(struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
drm_file_t *priv = filp->private_data;
|
||||
drm_device_t *dev = priv->head->dev;
|
||||
drm_agp_buffer_t request;
|
||||
|
||||
if (copy_from_user
|
||||
(&request, (drm_agp_buffer_t __user *) arg, sizeof(request)))
|
||||
return -EFAULT;
|
||||
|
||||
return drm_agp_free(dev, &request);
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize the AGP resources.
|
||||
*
|
||||
* \return pointer to a drm_agp_head structure.
|
||||
*
|
||||
* Gets the drm_agp_t structure which is made available by the agpgart module
|
||||
* via the inter_module_* functions. Creates and initializes a drm_agp_head
|
||||
* structure.
|
||||
*/
|
||||
drm_agp_head_t *drm_agp_init(drm_device_t * dev)
|
||||
{
|
||||
|
|
|
@ -36,22 +36,21 @@
|
|||
#include <linux/vmalloc.h>
|
||||
#include "drmP.h"
|
||||
|
||||
unsigned long drm_get_resource_start(drm_device_t * dev, unsigned int resource)
|
||||
unsigned long drm_get_resource_start(drm_device_t *dev, unsigned int resource)
|
||||
{
|
||||
return pci_resource_start(dev->pdev, resource);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(drm_get_resource_start);
|
||||
|
||||
unsigned long drm_get_resource_len(drm_device_t * dev, unsigned int resource)
|
||||
unsigned long drm_get_resource_len(drm_device_t *dev, unsigned int resource)
|
||||
{
|
||||
return pci_resource_len(dev->pdev, resource);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(drm_get_resource_len);
|
||||
|
||||
static drm_map_list_t *drm_find_matching_map(drm_device_t * dev,
|
||||
drm_local_map_t * map)
|
||||
static drm_map_list_t *drm_find_matching_map(drm_device_t *dev,
|
||||
drm_local_map_t *map)
|
||||
{
|
||||
struct list_head *list;
|
||||
|
||||
|
@ -74,7 +73,7 @@ static drm_map_list_t *drm_find_matching_map(drm_device_t * dev,
|
|||
|
||||
#ifdef _LP64
|
||||
static __inline__ unsigned int HandleID(unsigned long lhandle,
|
||||
drm_device_t * dev)
|
||||
drm_device_t *dev)
|
||||
{
|
||||
static unsigned int map32_handle = START_RANGE;
|
||||
unsigned int hash;
|
||||
|
@ -155,7 +154,7 @@ static int drm_addmap_core(drm_device_t * dev, unsigned int offset,
|
|||
case _DRM_REGISTERS:
|
||||
case _DRM_FRAME_BUFFER:
|
||||
#if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__)
|
||||
if (map->offset + map->size < map->offset ||
|
||||
if (map->offset + (map->size-1) < map->offset ||
|
||||
map->offset < virt_to_phys(high_memory)) {
|
||||
drm_free(map, sizeof(*map), DRM_MEM_MAPS);
|
||||
return -EINVAL;
|
||||
|
@ -301,6 +300,9 @@ int drm_addmap_ioctl(struct inode *inode, struct file *filp,
|
|||
return -EFAULT;
|
||||
}
|
||||
|
||||
if (!(capable(CAP_SYS_ADMIN) || map.type == _DRM_AGP))
|
||||
return -EPERM;
|
||||
|
||||
err = drm_addmap_core(dev, map.offset, map.size, map.type, map.flags,
|
||||
&maplist);
|
||||
|
||||
|
@ -332,7 +334,7 @@ int drm_addmap_ioctl(struct inode *inode, struct file *filp,
|
|||
*
|
||||
* \sa drm_addmap
|
||||
*/
|
||||
int drm_rmmap_locked(drm_device_t * dev, drm_local_map_t * map)
|
||||
int drm_rmmap_locked(drm_device_t *dev, drm_local_map_t *map)
|
||||
{
|
||||
struct list_head *list;
|
||||
drm_map_list_t *r_list = NULL;
|
||||
|
@ -384,10 +386,9 @@ int drm_rmmap_locked(drm_device_t * dev, drm_local_map_t * map)
|
|||
|
||||
return 0;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(drm_rmmap_locked);
|
||||
|
||||
int drm_rmmap(drm_device_t * dev, drm_local_map_t * map)
|
||||
int drm_rmmap(drm_device_t *dev, drm_local_map_t *map)
|
||||
{
|
||||
int ret;
|
||||
|
||||
|
@ -397,7 +398,6 @@ int drm_rmmap(drm_device_t * dev, drm_local_map_t * map)
|
|||
|
||||
return ret;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(drm_rmmap);
|
||||
|
||||
/* The rmmap ioctl appears to be unnecessary. All mappings are torn down on
|
||||
|
@ -548,7 +548,7 @@ int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request)
|
|||
DRM_DEBUG("count: %d\n", count);
|
||||
DRM_DEBUG("order: %d\n", order);
|
||||
DRM_DEBUG("size: %d\n", size);
|
||||
DRM_DEBUG("agp_offset: %lu\n", agp_offset);
|
||||
DRM_DEBUG("agp_offset: %lx\n", agp_offset);
|
||||
DRM_DEBUG("alignment: %d\n", alignment);
|
||||
DRM_DEBUG("page_order: %d\n", page_order);
|
||||
DRM_DEBUG("total: %d\n", total);
|
||||
|
@ -649,6 +649,8 @@ int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request)
|
|||
}
|
||||
|
||||
dma->buf_count += entry->buf_count;
|
||||
dma->seg_count += entry->seg_count;
|
||||
dma->page_count += byte_count >> PAGE_SHIFT;
|
||||
dma->byte_count += byte_count;
|
||||
|
||||
DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
|
||||
|
@ -664,7 +666,6 @@ int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request)
|
|||
atomic_dec(&dev->buf_alloc);
|
||||
return 0;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(drm_addbufs_agp);
|
||||
#endif /* __OS_HAS_AGP */
|
||||
|
||||
|
@ -689,9 +690,13 @@ int drm_addbufs_pci(drm_device_t * dev, drm_buf_desc_t * request)
|
|||
|
||||
if (!drm_core_check_feature(dev, DRIVER_PCI_DMA))
|
||||
return -EINVAL;
|
||||
|
||||
if (!dma)
|
||||
return -EINVAL;
|
||||
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
return -EPERM;
|
||||
|
||||
count = request->count;
|
||||
order = drm_order(request->size);
|
||||
size = 1 << order;
|
||||
|
@ -882,7 +887,6 @@ int drm_addbufs_pci(drm_device_t * dev, drm_buf_desc_t * request)
|
|||
return 0;
|
||||
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(drm_addbufs_pci);
|
||||
|
||||
static int drm_addbufs_sg(drm_device_t * dev, drm_buf_desc_t * request)
|
||||
|
@ -908,6 +912,9 @@ static int drm_addbufs_sg(drm_device_t * dev, drm_buf_desc_t * request)
|
|||
if (!dma)
|
||||
return -EINVAL;
|
||||
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
return -EPERM;
|
||||
|
||||
count = request->count;
|
||||
order = drm_order(request->size);
|
||||
size = 1 << order;
|
||||
|
@ -1026,6 +1033,8 @@ static int drm_addbufs_sg(drm_device_t * dev, drm_buf_desc_t * request)
|
|||
}
|
||||
|
||||
dma->buf_count += entry->buf_count;
|
||||
dma->seg_count += entry->seg_count;
|
||||
dma->page_count += byte_count >> PAGE_SHIFT;
|
||||
dma->byte_count += byte_count;
|
||||
|
||||
DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
|
||||
|
@ -1042,7 +1051,7 @@ static int drm_addbufs_sg(drm_device_t * dev, drm_buf_desc_t * request)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int drm_addbufs_fb(drm_device_t * dev, drm_buf_desc_t * request)
|
||||
int drm_addbufs_fb(drm_device_t * dev, drm_buf_desc_t * request)
|
||||
{
|
||||
drm_device_dma_t *dma = dev->dma;
|
||||
drm_buf_entry_t *entry;
|
||||
|
@ -1065,6 +1074,9 @@ static int drm_addbufs_fb(drm_device_t * dev, drm_buf_desc_t * request)
|
|||
if (!dma)
|
||||
return -EINVAL;
|
||||
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
return -EPERM;
|
||||
|
||||
count = request->count;
|
||||
order = drm_order(request->size);
|
||||
size = 1 << order;
|
||||
|
@ -1181,6 +1193,8 @@ static int drm_addbufs_fb(drm_device_t * dev, drm_buf_desc_t * request)
|
|||
}
|
||||
|
||||
dma->buf_count += entry->buf_count;
|
||||
dma->seg_count += entry->seg_count;
|
||||
dma->page_count += byte_count >> PAGE_SHIFT;
|
||||
dma->byte_count += byte_count;
|
||||
|
||||
DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
|
||||
|
@ -1196,6 +1210,8 @@ static int drm_addbufs_fb(drm_device_t * dev, drm_buf_desc_t * request)
|
|||
atomic_dec(&dev->buf_alloc);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_addbufs_fb);
|
||||
|
||||
|
||||
/**
|
||||
* Add buffers for DMA transfers (ioctl).
|
||||
|
@ -1577,5 +1593,6 @@ int drm_order(unsigned long size)
|
|||
|
||||
return order;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(drm_order);
|
||||
|
||||
|
||||
|
|
|
@ -433,7 +433,7 @@ int drm_addctx(struct inode *inode, struct file *filp,
|
|||
if (ctx.handle != DRM_KERNEL_CONTEXT) {
|
||||
if (dev->driver->context_ctor)
|
||||
if (!dev->driver->context_ctor(dev, ctx.handle)) {
|
||||
DRM_DEBUG( "Running out of ctxs or memory.\n");
|
||||
DRM_DEBUG("Running out of ctxs or memory.\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -24,11 +24,11 @@
|
|||
|
||||
#define CORE_NAME "drm"
|
||||
#define CORE_DESC "DRM shared core routines"
|
||||
#define CORE_DATE "20040925"
|
||||
#define CORE_DATE "20051102"
|
||||
|
||||
#define DRM_IF_MAJOR 1
|
||||
#define DRM_IF_MINOR 2
|
||||
|
||||
#define CORE_MAJOR 1
|
||||
#define CORE_MINOR 0
|
||||
#define CORE_PATCHLEVEL 0
|
||||
#define CORE_PATCHLEVEL 1
|
||||
|
|
|
@ -56,66 +56,66 @@ static int drm_version(struct inode *inode, struct file *filp,
|
|||
|
||||
/** Ioctl table */
|
||||
static drm_ioctl_desc_t drm_ioctls[] = {
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_VERSION)] = {drm_version, 0, 0},
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE)] = {drm_getunique, 0, 0},
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_GET_MAGIC)] = {drm_getmagic, 0, 0},
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_IRQ_BUSID)] = {drm_irq_by_busid, 0, 1},
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_GET_MAP)] = {drm_getmap, 0, 0},
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_GET_CLIENT)] = {drm_getclient, 0, 0},
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_GET_STATS)] = {drm_getstats, 0, 0},
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_SET_VERSION)] = {drm_setversion, 0, 1},
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_VERSION)] = {drm_version, 0},
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE)] = {drm_getunique, 0},
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_GET_MAGIC)] = {drm_getmagic, 0},
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_IRQ_BUSID)] = {drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY},
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_GET_MAP)] = {drm_getmap, 0},
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_GET_CLIENT)] = {drm_getclient, 0},
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_GET_STATS)] = {drm_getstats, 0},
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_SET_VERSION)] = {drm_setversion, DRM_MASTER|DRM_ROOT_ONLY},
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE)] = {drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_BLOCK)] = {drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)] = {drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)] = {drm_authmagic, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
|
||||
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE)] = {drm_setunique, 1, 1},
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_BLOCK)] = {drm_noop, 1, 1},
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)] = {drm_noop, 1, 1},
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)] = {drm_authmagic, 1, 1},
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)] = {drm_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_RM_MAP)] = {drm_rmmap_ioctl, DRM_AUTH},
|
||||
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)] = {drm_addmap_ioctl, 1, 1},
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_RM_MAP)] = {drm_rmmap_ioctl, 1, 0},
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_SET_SAREA_CTX)] = {drm_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_GET_SAREA_CTX)] = {drm_getsareactx, DRM_AUTH},
|
||||
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_SET_SAREA_CTX)] = {drm_setsareactx, 1, 1},
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_GET_SAREA_CTX)] = {drm_getsareactx, 1, 0},
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)] = {drm_addctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)] = {drm_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)] = {drm_modctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_GET_CTX)] = {drm_getctx, DRM_AUTH},
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX)] = {drm_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_NEW_CTX)] = {drm_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_RES_CTX)] = {drm_resctx, DRM_AUTH},
|
||||
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)] = {drm_addctx, 1, 1},
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)] = {drm_rmctx, 1, 1},
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)] = {drm_modctx, 1, 1},
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_GET_CTX)] = {drm_getctx, 1, 0},
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX)] = {drm_switchctx, 1, 1},
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_NEW_CTX)] = {drm_newctx, 1, 1},
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_RES_CTX)] = {drm_resctx, 1, 0},
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_ADD_DRAW)] = {drm_adddraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_RM_DRAW)] = {drm_rmdraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
|
||||
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_ADD_DRAW)] = {drm_adddraw, 1, 1},
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_RM_DRAW)] = {drm_rmdraw, 1, 1},
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_LOCK)] = {drm_lock, DRM_AUTH},
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_UNLOCK)] = {drm_unlock, DRM_AUTH},
|
||||
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_LOCK)] = {drm_lock, 1, 0},
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_UNLOCK)] = {drm_unlock, 1, 0},
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_FINISH)] = {drm_noop, DRM_AUTH},
|
||||
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_FINISH)] = {drm_noop, 1, 0},
|
||||
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_ADD_BUFS)] = {drm_addbufs, 1, 1},
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_MARK_BUFS)] = {drm_markbufs, 1, 1},
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_INFO_BUFS)] = {drm_infobufs, 1, 0},
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_MAP_BUFS)] = {drm_mapbufs, 1, 0},
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_FREE_BUFS)] = {drm_freebufs, 1, 0},
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_ADD_BUFS)] = {drm_addbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_MARK_BUFS)] = {drm_markbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_INFO_BUFS)] = {drm_infobufs, DRM_AUTH},
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_MAP_BUFS)] = {drm_mapbufs, DRM_AUTH},
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_FREE_BUFS)] = {drm_freebufs, DRM_AUTH},
|
||||
/* The DRM_IOCTL_DMA ioctl should be defined by the driver. */
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_DMA)] = {NULL, DRM_AUTH},
|
||||
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_CONTROL)] = {drm_control, 1, 1},
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_CONTROL)] = {drm_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
|
||||
|
||||
#if __OS_HAS_AGP
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_AGP_ACQUIRE)] = {drm_agp_acquire_ioctl, 1, 1},
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_AGP_RELEASE)] = {drm_agp_release_ioctl, 1, 1},
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE)] = {drm_agp_enable_ioctl, 1, 1},
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO)] = {drm_agp_info_ioctl, 1, 0},
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC)] = {drm_agp_alloc, 1, 1},
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE)] = {drm_agp_free, 1, 1},
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND)] = {drm_agp_bind, 1, 1},
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND)] = {drm_agp_unbind, 1, 1},
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_AGP_ACQUIRE)] = {drm_agp_acquire_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_AGP_RELEASE)] = {drm_agp_release_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE)] = {drm_agp_enable_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO)] = {drm_agp_info_ioctl, DRM_AUTH},
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC)] = {drm_agp_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE)] = {drm_agp_free_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND)] = {drm_agp_bind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND)] = {drm_agp_unbind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
|
||||
#endif
|
||||
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_SG_ALLOC)] = {drm_sg_alloc, 1, 1},
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_SG_FREE)] = {drm_sg_free, 1, 1},
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_SG_ALLOC)] = {drm_sg_alloc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_SG_FREE)] = {drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
|
||||
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK)] = {drm_wait_vblank, 0, 0},
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK)] = {drm_wait_vblank, 0},
|
||||
};
|
||||
|
||||
#define DRIVER_IOCTL_COUNT DRM_ARRAY_SIZE( drm_ioctls )
|
||||
|
@ -129,7 +129,7 @@ static drm_ioctl_desc_t drm_ioctls[] = {
|
|||
*
|
||||
* \sa drm_device
|
||||
*/
|
||||
int drm_takedown(drm_device_t * dev)
|
||||
int drm_lastclose(drm_device_t * dev)
|
||||
{
|
||||
drm_magic_entry_t *pt, *next;
|
||||
drm_map_list_t *r_list;
|
||||
|
@ -138,9 +138,9 @@ int drm_takedown(drm_device_t * dev)
|
|||
|
||||
DRM_DEBUG("\n");
|
||||
|
||||
if (dev->driver->pretakedown)
|
||||
dev->driver->pretakedown(dev);
|
||||
DRM_DEBUG("driver pretakedown completed\n");
|
||||
if (dev->driver->lastclose)
|
||||
dev->driver->lastclose(dev);
|
||||
DRM_DEBUG("driver lastclose completed\n");
|
||||
|
||||
if (dev->unique) {
|
||||
drm_free(dev->unique, strlen(dev->unique) + 1, DRM_MEM_DRIVER);
|
||||
|
@ -233,7 +233,7 @@ int drm_takedown(drm_device_t * dev)
|
|||
}
|
||||
up(&dev->struct_sem);
|
||||
|
||||
DRM_DEBUG("takedown completed\n");
|
||||
DRM_DEBUG("lastclose completed\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -281,7 +281,7 @@ EXPORT_SYMBOL(drm_init);
|
|||
/**
|
||||
* Called via cleanup_module() at module unload time.
|
||||
*
|
||||
* Cleans up all DRM device, calling takedown().
|
||||
* Cleans up all DRM device, calling drm_lastclose().
|
||||
*
|
||||
* \sa drm_init
|
||||
*/
|
||||
|
@ -294,7 +294,7 @@ static void drm_cleanup(drm_device_t * dev)
|
|||
return;
|
||||
}
|
||||
|
||||
drm_takedown(dev);
|
||||
drm_lastclose(dev);
|
||||
|
||||
if (dev->maplist) {
|
||||
drm_free(dev->maplist, sizeof(*dev->maplist), DRM_MEM_MAPS);
|
||||
|
@ -317,8 +317,8 @@ static void drm_cleanup(drm_device_t * dev)
|
|||
dev->agp = NULL;
|
||||
}
|
||||
|
||||
if (dev->driver->postcleanup)
|
||||
dev->driver->postcleanup(dev);
|
||||
if (dev->driver->unload)
|
||||
dev->driver->unload(dev);
|
||||
|
||||
drm_put_head(&dev->primary);
|
||||
if (drm_put_dev(dev))
|
||||
|
@ -342,12 +342,12 @@ void drm_exit(struct drm_driver *driver)
|
|||
if (head->dev->driver != driver)
|
||||
continue;
|
||||
dev = head->dev;
|
||||
}
|
||||
if (dev) {
|
||||
/* release the pci driver */
|
||||
if (dev->pdev)
|
||||
pci_dev_put(dev->pdev);
|
||||
drm_cleanup(dev);
|
||||
if (dev) {
|
||||
/* release the pci driver */
|
||||
if (dev->pdev)
|
||||
pci_dev_put(dev->pdev);
|
||||
drm_cleanup(dev);
|
||||
}
|
||||
}
|
||||
DRM_INFO("Module unloaded\n");
|
||||
}
|
||||
|
@ -432,14 +432,17 @@ static int drm_version(struct inode *inode, struct file *filp,
|
|||
drm_device_t *dev = priv->head->dev;
|
||||
drm_version_t __user *argp = (void __user *)arg;
|
||||
drm_version_t version;
|
||||
int ret;
|
||||
int len;
|
||||
|
||||
if (copy_from_user(&version, argp, sizeof(version)))
|
||||
return -EFAULT;
|
||||
|
||||
/* version is a required function to return the personality module version */
|
||||
if ((ret = dev->driver->version(&version)))
|
||||
return ret;
|
||||
version.version_major = dev->driver->major;
|
||||
version.version_minor = dev->driver->minor;
|
||||
version.version_patchlevel = dev->driver->patchlevel;
|
||||
DRM_COPY(version.name, dev->driver->name);
|
||||
DRM_COPY(version.date, dev->driver->date);
|
||||
DRM_COPY(version.desc, dev->driver->desc);
|
||||
|
||||
if (copy_to_user(argp, &version, sizeof(version)))
|
||||
return -EFAULT;
|
||||
|
@ -493,8 +496,9 @@ int drm_ioctl(struct inode *inode, struct file *filp,
|
|||
if (!func) {
|
||||
DRM_DEBUG("no function\n");
|
||||
retcode = -EINVAL;
|
||||
} else if ((ioctl->root_only && !capable(CAP_SYS_ADMIN)) ||
|
||||
(ioctl->auth_needed && !priv->authenticated)) {
|
||||
} else if (((ioctl->flags & DRM_ROOT_ONLY) && !capable(CAP_SYS_ADMIN)) ||
|
||||
((ioctl->flags & DRM_AUTH) && !priv->authenticated) ||
|
||||
((ioctl->flags & DRM_MASTER) && !priv->master)) {
|
||||
retcode = -EACCES;
|
||||
} else {
|
||||
retcode = func(inode, filp, cmd, arg);
|
||||
|
|
|
@ -35,6 +35,7 @@
|
|||
*/
|
||||
|
||||
#include "drmP.h"
|
||||
#include "drm_sarea.h"
|
||||
#include <linux/poll.h>
|
||||
|
||||
static int drm_open_helper(struct inode *inode, struct file *filp,
|
||||
|
@ -42,15 +43,21 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
|
|||
|
||||
static int drm_setup(drm_device_t * dev)
|
||||
{
|
||||
drm_local_map_t *map;
|
||||
int i;
|
||||
int ret;
|
||||
|
||||
if (dev->driver->presetup) {
|
||||
ret = dev->driver->presetup(dev);
|
||||
if (dev->driver->firstopen) {
|
||||
ret = dev->driver->firstopen(dev);
|
||||
if (ret != 0)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* prebuild the SAREA */
|
||||
i = drm_addmap(dev, 0, SAREA_MAX, _DRM_SHM, _DRM_CONTAINS_LOCK, &map);
|
||||
if (i != 0)
|
||||
return i;
|
||||
|
||||
atomic_set(&dev->ioctl_count, 0);
|
||||
atomic_set(&dev->vma_count, 0);
|
||||
dev->buf_use = 0;
|
||||
|
@ -109,8 +116,6 @@ static int drm_setup(drm_device_t * dev)
|
|||
* drm_select_queue fails between the time the interrupt is
|
||||
* initialized and the time the queues are initialized.
|
||||
*/
|
||||
if (dev->driver->postsetup)
|
||||
dev->driver->postsetup(dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -154,9 +159,167 @@ int drm_open(struct inode *inode, struct file *filp)
|
|||
|
||||
return retcode;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(drm_open);
|
||||
|
||||
/**
|
||||
* File \c open operation.
|
||||
*
|
||||
* \param inode device inode.
|
||||
* \param filp file pointer.
|
||||
*
|
||||
* Puts the dev->fops corresponding to the device minor number into
|
||||
* \p filp, call the \c open method, and restore the file operations.
|
||||
*/
|
||||
int drm_stub_open(struct inode *inode, struct file *filp)
|
||||
{
|
||||
drm_device_t *dev = NULL;
|
||||
int minor = iminor(inode);
|
||||
int err = -ENODEV;
|
||||
struct file_operations *old_fops;
|
||||
|
||||
DRM_DEBUG("\n");
|
||||
|
||||
if (!((minor >= 0) && (minor < drm_cards_limit)))
|
||||
return -ENODEV;
|
||||
|
||||
if (!drm_heads[minor])
|
||||
return -ENODEV;
|
||||
|
||||
if (!(dev = drm_heads[minor]->dev))
|
||||
return -ENODEV;
|
||||
|
||||
old_fops = filp->f_op;
|
||||
filp->f_op = fops_get(&dev->driver->fops);
|
||||
if (filp->f_op->open && (err = filp->f_op->open(inode, filp))) {
|
||||
fops_put(filp->f_op);
|
||||
filp->f_op = fops_get(old_fops);
|
||||
}
|
||||
fops_put(old_fops);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check whether DRI will run on this CPU.
|
||||
*
|
||||
* \return non-zero if the DRI will run on this CPU, or zero otherwise.
|
||||
*/
|
||||
static int drm_cpu_valid(void)
|
||||
{
|
||||
#if defined(__i386__)
|
||||
if (boot_cpu_data.x86 == 3)
|
||||
return 0; /* No cmpxchg on a 386 */
|
||||
#endif
|
||||
#if defined(__sparc__) && !defined(__sparc_v9__)
|
||||
return 0; /* No cmpxchg before v9 sparc. */
|
||||
#endif
|
||||
return 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* Called whenever a process opens /dev/drm.
|
||||
*
|
||||
* \param inode device inode.
|
||||
* \param filp file pointer.
|
||||
* \param dev device.
|
||||
* \return zero on success or a negative number on failure.
|
||||
*
|
||||
* Creates and initializes a drm_file structure for the file private data in \p
|
||||
* filp and add it into the double linked list in \p dev.
|
||||
*/
|
||||
static int drm_open_helper(struct inode *inode, struct file *filp,
|
||||
drm_device_t * dev)
|
||||
{
|
||||
int minor = iminor(inode);
|
||||
drm_file_t *priv;
|
||||
int ret;
|
||||
|
||||
if (filp->f_flags & O_EXCL)
|
||||
return -EBUSY; /* No exclusive opens */
|
||||
if (!drm_cpu_valid())
|
||||
return -EINVAL;
|
||||
|
||||
DRM_DEBUG("pid = %d, minor = %d\n", current->pid, minor);
|
||||
|
||||
priv = drm_alloc(sizeof(*priv), DRM_MEM_FILES);
|
||||
if (!priv)
|
||||
return -ENOMEM;
|
||||
|
||||
memset(priv, 0, sizeof(*priv));
|
||||
filp->private_data = priv;
|
||||
priv->uid = current->euid;
|
||||
priv->pid = current->pid;
|
||||
priv->minor = minor;
|
||||
priv->head = drm_heads[minor];
|
||||
priv->ioctl_count = 0;
|
||||
/* for compatibility root is always authenticated */
|
||||
priv->authenticated = capable(CAP_SYS_ADMIN);
|
||||
priv->lock_count = 0;
|
||||
|
||||
if (dev->driver->open) {
|
||||
ret = dev->driver->open(dev, priv);
|
||||
if (ret < 0)
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
down(&dev->struct_sem);
|
||||
if (!dev->file_last) {
|
||||
priv->next = NULL;
|
||||
priv->prev = NULL;
|
||||
dev->file_first = priv;
|
||||
dev->file_last = priv;
|
||||
/* first opener automatically becomes master */
|
||||
priv->master = 1;
|
||||
} else {
|
||||
priv->next = NULL;
|
||||
priv->prev = dev->file_last;
|
||||
dev->file_last->next = priv;
|
||||
dev->file_last = priv;
|
||||
}
|
||||
up(&dev->struct_sem);
|
||||
|
||||
#ifdef __alpha__
|
||||
/*
|
||||
* Default the hose
|
||||
*/
|
||||
if (!dev->hose) {
|
||||
struct pci_dev *pci_dev;
|
||||
pci_dev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, NULL);
|
||||
if (pci_dev) {
|
||||
dev->hose = pci_dev->sysdata;
|
||||
pci_dev_put(pci_dev);
|
||||
}
|
||||
if (!dev->hose) {
|
||||
struct pci_bus *b = pci_bus_b(pci_root_buses.next);
|
||||
if (b)
|
||||
dev->hose = b->sysdata;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
out_free:
|
||||
drm_free(priv, sizeof(*priv), DRM_MEM_FILES);
|
||||
filp->private_data = NULL;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/** No-op. */
|
||||
int drm_fasync(int fd, struct file *filp, int on)
|
||||
{
|
||||
drm_file_t *priv = filp->private_data;
|
||||
drm_device_t *dev = priv->head->dev;
|
||||
int retcode;
|
||||
|
||||
DRM_DEBUG("fd = %d, device = 0x%lx\n", fd,
|
||||
(long)old_encode_dev(priv->head->device));
|
||||
retcode = fasync_helper(fd, filp, on, &dev->buf_async);
|
||||
if (retcode < 0)
|
||||
return retcode;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_fasync);
|
||||
|
||||
/**
|
||||
* Release file.
|
||||
*
|
||||
|
@ -167,7 +330,7 @@ EXPORT_SYMBOL(drm_open);
|
|||
* If the hardware lock is held then free it, and take it again for the kernel
|
||||
* context since it's necessary to reclaim buffers. Unlink the file private
|
||||
* data from its list and free it. Decreases the open count and if it reaches
|
||||
* zero calls takedown().
|
||||
* zero calls drm_lastclose().
|
||||
*/
|
||||
int drm_release(struct inode *inode, struct file *filp)
|
||||
{
|
||||
|
@ -180,8 +343,8 @@ int drm_release(struct inode *inode, struct file *filp)
|
|||
|
||||
DRM_DEBUG("open_count = %d\n", dev->open_count);
|
||||
|
||||
if (dev->driver->prerelease)
|
||||
dev->driver->prerelease(dev, filp);
|
||||
if (dev->driver->preclose)
|
||||
dev->driver->preclose(dev, filp);
|
||||
|
||||
/* ========================================================
|
||||
* Begin inline drm_release
|
||||
|
@ -197,8 +360,8 @@ int drm_release(struct inode *inode, struct file *filp)
|
|||
DRM_DEBUG("File %p released, freeing lock for context %d\n",
|
||||
filp, _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
|
||||
|
||||
if (dev->driver->release)
|
||||
dev->driver->release(dev, filp);
|
||||
if (dev->driver->reclaim_buffers_locked)
|
||||
dev->driver->reclaim_buffers_locked(dev, filp);
|
||||
|
||||
drm_lock_free(dev, &dev->lock.hw_lock->lock,
|
||||
_DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
|
||||
|
@ -207,7 +370,7 @@ int drm_release(struct inode *inode, struct file *filp)
|
|||
hardware at this point, possibly
|
||||
processed via a callback to the X
|
||||
server. */
|
||||
} else if (dev->driver->release && priv->lock_count
|
||||
} else if (dev->driver->reclaim_buffers_locked && priv->lock_count
|
||||
&& dev->lock.hw_lock) {
|
||||
/* The lock is required to reclaim buffers */
|
||||
DECLARE_WAITQUEUE(entry, current);
|
||||
|
@ -237,15 +400,14 @@ int drm_release(struct inode *inode, struct file *filp)
|
|||
__set_current_state(TASK_RUNNING);
|
||||
remove_wait_queue(&dev->lock.lock_queue, &entry);
|
||||
if (!retcode) {
|
||||
if (dev->driver->release)
|
||||
dev->driver->release(dev, filp);
|
||||
dev->driver->reclaim_buffers_locked(dev, filp);
|
||||
drm_lock_free(dev, &dev->lock.hw_lock->lock,
|
||||
DRM_KERNEL_CONTEXT);
|
||||
}
|
||||
}
|
||||
|
||||
if (drm_core_check_feature(dev, DRIVER_HAVE_DMA)
|
||||
&& !dev->driver->release) {
|
||||
if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
|
||||
!dev->driver->reclaim_buffers_locked) {
|
||||
dev->driver->reclaim_buffers(dev, filp);
|
||||
}
|
||||
|
||||
|
@ -292,9 +454,8 @@ int drm_release(struct inode *inode, struct file *filp)
|
|||
}
|
||||
up(&dev->struct_sem);
|
||||
|
||||
if (dev->driver->free_filp_priv)
|
||||
dev->driver->free_filp_priv(dev, priv);
|
||||
|
||||
if (dev->driver->postclose)
|
||||
dev->driver->postclose(dev, priv);
|
||||
drm_free(priv, sizeof(*priv), DRM_MEM_FILES);
|
||||
|
||||
/* ========================================================
|
||||
|
@ -313,7 +474,7 @@ int drm_release(struct inode *inode, struct file *filp)
|
|||
}
|
||||
spin_unlock(&dev->count_lock);
|
||||
unlock_kernel();
|
||||
return drm_takedown(dev);
|
||||
return drm_lastclose(dev);
|
||||
}
|
||||
spin_unlock(&dev->count_lock);
|
||||
|
||||
|
@ -321,129 +482,11 @@ int drm_release(struct inode *inode, struct file *filp)
|
|||
|
||||
return retcode;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(drm_release);
|
||||
|
||||
/**
|
||||
* Called whenever a process opens /dev/drm.
|
||||
*
|
||||
* \param inode device inode.
|
||||
* \param filp file pointer.
|
||||
* \param dev device.
|
||||
* \return zero on success or a negative number on failure.
|
||||
*
|
||||
* Creates and initializes a drm_file structure for the file private data in \p
|
||||
* filp and add it into the double linked list in \p dev.
|
||||
*/
|
||||
static int drm_open_helper(struct inode *inode, struct file *filp,
|
||||
drm_device_t * dev)
|
||||
{
|
||||
int minor = iminor(inode);
|
||||
drm_file_t *priv;
|
||||
int ret;
|
||||
|
||||
if (filp->f_flags & O_EXCL)
|
||||
return -EBUSY; /* No exclusive opens */
|
||||
if (!drm_cpu_valid())
|
||||
return -EINVAL;
|
||||
|
||||
DRM_DEBUG("pid = %d, minor = %d\n", current->pid, minor);
|
||||
|
||||
priv = drm_alloc(sizeof(*priv), DRM_MEM_FILES);
|
||||
if (!priv)
|
||||
return -ENOMEM;
|
||||
|
||||
memset(priv, 0, sizeof(*priv));
|
||||
filp->private_data = priv;
|
||||
priv->uid = current->euid;
|
||||
priv->pid = current->pid;
|
||||
priv->minor = minor;
|
||||
priv->head = drm_heads[minor];
|
||||
priv->ioctl_count = 0;
|
||||
priv->authenticated = capable(CAP_SYS_ADMIN);
|
||||
priv->lock_count = 0;
|
||||
|
||||
if (dev->driver->open_helper) {
|
||||
ret = dev->driver->open_helper(dev, priv);
|
||||
if (ret < 0)
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
down(&dev->struct_sem);
|
||||
if (!dev->file_last) {
|
||||
priv->next = NULL;
|
||||
priv->prev = NULL;
|
||||
dev->file_first = priv;
|
||||
dev->file_last = priv;
|
||||
} else {
|
||||
priv->next = NULL;
|
||||
priv->prev = dev->file_last;
|
||||
dev->file_last->next = priv;
|
||||
dev->file_last = priv;
|
||||
}
|
||||
up(&dev->struct_sem);
|
||||
|
||||
#ifdef __alpha__
|
||||
/*
|
||||
* Default the hose
|
||||
*/
|
||||
if (!dev->hose) {
|
||||
struct pci_dev *pci_dev;
|
||||
pci_dev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, NULL);
|
||||
if (pci_dev) {
|
||||
dev->hose = pci_dev->sysdata;
|
||||
pci_dev_put(pci_dev);
|
||||
}
|
||||
if (!dev->hose) {
|
||||
struct pci_bus *b = pci_bus_b(pci_root_buses.next);
|
||||
if (b)
|
||||
dev->hose = b->sysdata;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
out_free:
|
||||
drm_free(priv, sizeof(*priv), DRM_MEM_FILES);
|
||||
filp->private_data = NULL;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/** No-op. */
|
||||
int drm_flush(struct file *filp)
|
||||
{
|
||||
drm_file_t *priv = filp->private_data;
|
||||
drm_device_t *dev = priv->head->dev;
|
||||
|
||||
DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
|
||||
current->pid, (long)old_encode_dev(priv->head->device),
|
||||
dev->open_count);
|
||||
return 0;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(drm_flush);
|
||||
|
||||
/** No-op. */
|
||||
int drm_fasync(int fd, struct file *filp, int on)
|
||||
{
|
||||
drm_file_t *priv = filp->private_data;
|
||||
drm_device_t *dev = priv->head->dev;
|
||||
int retcode;
|
||||
|
||||
DRM_DEBUG("fd = %d, device = 0x%lx\n", fd,
|
||||
(long)old_encode_dev(priv->head->device));
|
||||
retcode = fasync_helper(fd, filp, on, &dev->buf_async);
|
||||
if (retcode < 0)
|
||||
return retcode;
|
||||
return 0;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(drm_fasync);
|
||||
|
||||
/** No-op. */
|
||||
unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(drm_poll);
|
||||
|
|
|
@ -1,53 +0,0 @@
|
|||
/**
|
||||
* \file drm_init.c
|
||||
* Setup/Cleanup for DRM
|
||||
*
|
||||
* \author Rickard E. (Rik) Faith <faith@valinux.com>
|
||||
* \author Gareth Hughes <gareth@valinux.com>
|
||||
*/
|
||||
|
||||
/*
|
||||
* Created: Mon Jan 4 08:58:31 1999 by faith@valinux.com
|
||||
*
|
||||
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
|
||||
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "drmP.h"
|
||||
|
||||
/**
|
||||
* Check whether DRI will run on this CPU.
|
||||
*
|
||||
* \return non-zero if the DRI will run on this CPU, or zero otherwise.
|
||||
*/
|
||||
int drm_cpu_valid(void)
|
||||
{
|
||||
#if defined(__i386__)
|
||||
if (boot_cpu_data.x86 == 3)
|
||||
return 0; /* No cmpxchg on a 386 */
|
||||
#endif
|
||||
#if defined(__sparc__) && !defined(__sparc_v9__)
|
||||
return 0; /* No cmpxchg before v9 sparc. */
|
||||
#endif
|
||||
return 1;
|
||||
}
|
|
@ -137,17 +137,22 @@ int drm_setunique(struct inode *inode, struct file *filp,
|
|||
|
||||
static int drm_set_busid(drm_device_t * dev)
|
||||
{
|
||||
int len;
|
||||
|
||||
if (dev->unique != NULL)
|
||||
return EBUSY;
|
||||
|
||||
dev->unique_len = 20;
|
||||
dev->unique_len = 40;
|
||||
dev->unique = drm_alloc(dev->unique_len + 1, DRM_MEM_DRIVER);
|
||||
if (dev->unique == NULL)
|
||||
return ENOMEM;
|
||||
|
||||
snprintf(dev->unique, dev->unique_len, "pci:%04x:%02x:%02x.%d",
|
||||
len = snprintf(dev->unique, dev->unique_len, "pci:%04x:%02x:%02x.%d",
|
||||
dev->pci_domain, dev->pci_bus, dev->pci_slot, dev->pci_func);
|
||||
|
||||
if (len > dev->unique_len)
|
||||
DRM_ERROR("Unique buffer overflowed\n");
|
||||
|
||||
dev->devname =
|
||||
drm_alloc(strlen(dev->driver->pci_driver.name) + dev->unique_len +
|
||||
2, DRM_MEM_DRIVER);
|
||||
|
@ -239,7 +244,7 @@ int drm_getclient(struct inode *inode, struct file *filp,
|
|||
{
|
||||
drm_file_t *priv = filp->private_data;
|
||||
drm_device_t *dev = priv->head->dev;
|
||||
drm_client_t __user *argp = (void __user *)arg;
|
||||
drm_client_t __user *argp = (drm_client_t __user *)arg;
|
||||
drm_client_t client;
|
||||
drm_file_t *pt;
|
||||
int idx;
|
||||
|
@ -262,7 +267,7 @@ int drm_getclient(struct inode *inode, struct file *filp,
|
|||
client.iocs = pt->ioctl_count;
|
||||
up(&dev->struct_sem);
|
||||
|
||||
if (copy_to_user((drm_client_t __user *) arg, &client, sizeof(client)))
|
||||
if (copy_to_user(argp, &client, sizeof(client)))
|
||||
return -EFAULT;
|
||||
return 0;
|
||||
}
|
||||
|
@ -325,17 +330,13 @@ int drm_setversion(DRM_IOCTL_ARGS)
|
|||
drm_set_version_t retv;
|
||||
int if_version;
|
||||
drm_set_version_t __user *argp = (void __user *)data;
|
||||
drm_version_t version;
|
||||
|
||||
DRM_COPY_FROM_USER_IOCTL(sv, argp, sizeof(sv));
|
||||
|
||||
memset(&version, 0, sizeof(version));
|
||||
|
||||
dev->driver->version(&version);
|
||||
retv.drm_di_major = DRM_IF_MAJOR;
|
||||
retv.drm_di_minor = DRM_IF_MINOR;
|
||||
retv.drm_dd_major = version.version_major;
|
||||
retv.drm_dd_minor = version.version_minor;
|
||||
retv.drm_dd_major = dev->driver->major;
|
||||
retv.drm_dd_minor = dev->driver->minor;
|
||||
|
||||
DRM_COPY_TO_USER_IOCTL(argp, retv, sizeof(sv));
|
||||
|
||||
|
@ -343,7 +344,7 @@ int drm_setversion(DRM_IOCTL_ARGS)
|
|||
if (sv.drm_di_major != DRM_IF_MAJOR ||
|
||||
sv.drm_di_minor < 0 || sv.drm_di_minor > DRM_IF_MINOR)
|
||||
return EINVAL;
|
||||
if_version = DRM_IF_VERSION(sv.drm_di_major, sv.drm_dd_minor);
|
||||
if_version = DRM_IF_VERSION(sv.drm_di_major, sv.drm_di_minor);
|
||||
dev->if_version = DRM_MAX(if_version, dev->if_version);
|
||||
if (sv.drm_di_minor >= 1) {
|
||||
/*
|
||||
|
@ -354,9 +355,9 @@ int drm_setversion(DRM_IOCTL_ARGS)
|
|||
}
|
||||
|
||||
if (sv.drm_dd_major != -1) {
|
||||
if (sv.drm_dd_major != version.version_major ||
|
||||
if (sv.drm_dd_major != dev->driver->major ||
|
||||
sv.drm_dd_minor < 0
|
||||
|| sv.drm_dd_minor > version.version_minor)
|
||||
|| sv.drm_dd_minor > dev->driver->minor)
|
||||
return EINVAL;
|
||||
|
||||
if (dev->driver->set_version)
|
||||
|
|
|
@ -130,7 +130,6 @@ int drm_lock(struct inode *inode, struct file *filp,
|
|||
/* dev->driver->kernel_context_switch isn't used by any of the x86
|
||||
* drivers but is used by the Sparc driver.
|
||||
*/
|
||||
|
||||
if (dev->driver->kernel_context_switch &&
|
||||
dev->last_context != lock.context) {
|
||||
dev->driver->kernel_context_switch(dev, dev->last_context,
|
||||
|
|
|
@ -145,30 +145,22 @@ DRM_AGP_MEM *drm_alloc_agp(drm_device_t * dev, int pages, u32 type)
|
|||
return drm_agp_allocate_memory(dev->agp->bridge, pages, type);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(drm_alloc_agp);
|
||||
|
||||
/** Wrapper around agp_free_memory() */
|
||||
int drm_free_agp(DRM_AGP_MEM * handle, int pages)
|
||||
{
|
||||
return drm_agp_free_memory(handle) ? 0 : -EINVAL;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(drm_free_agp);
|
||||
|
||||
/** Wrapper around agp_bind_memory() */
|
||||
int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start)
|
||||
{
|
||||
return drm_agp_bind_memory(handle, start);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(drm_bind_agp);
|
||||
|
||||
/** Wrapper around agp_unbind_memory() */
|
||||
int drm_unbind_agp(DRM_AGP_MEM * handle)
|
||||
{
|
||||
return drm_agp_unbind_memory(handle);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(drm_unbind_agp);
|
||||
#endif /* agp */
|
||||
#endif /* debug_memory */
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/**
|
||||
* \file drm_memory.h
|
||||
* \file drm_memory_debug.h
|
||||
* Memory management wrappers for DRM.
|
||||
*
|
||||
* \author Rickard E. (Rik) Faith <faith@valinux.com>
|
||||
|
@ -43,42 +43,41 @@ typedef struct drm_mem_stats {
|
|||
unsigned long bytes_freed;
|
||||
} drm_mem_stats_t;
|
||||
|
||||
static DEFINE_SPINLOCK(DRM(mem_lock));
|
||||
static unsigned long DRM(ram_available) = 0; /* In pages */
|
||||
static unsigned long DRM(ram_used) = 0;
|
||||
static drm_mem_stats_t DRM(mem_stats)[] =
|
||||
static spinlock_t drm_mem_lock = SPIN_LOCK_UNLOCKED;
|
||||
static unsigned long drm_ram_available = 0; /* In pages */
|
||||
static unsigned long drm_ram_used = 0;
|
||||
static drm_mem_stats_t drm_mem_stats[] =
|
||||
{
|
||||
[DRM_MEM_DMA] = {
|
||||
"dmabufs"},[DRM_MEM_SAREA] = {
|
||||
"sareas"},[DRM_MEM_DRIVER] = {
|
||||
"driver"},[DRM_MEM_MAGIC] = {
|
||||
"magic"},[DRM_MEM_IOCTLS] = {
|
||||
"ioctltab"},[DRM_MEM_MAPS] = {
|
||||
"maplist"},[DRM_MEM_VMAS] = {
|
||||
"vmalist"},[DRM_MEM_BUFS] = {
|
||||
"buflist"},[DRM_MEM_SEGS] = {
|
||||
"seglist"},[DRM_MEM_PAGES] = {
|
||||
"pagelist"},[DRM_MEM_FILES] = {
|
||||
"files"},[DRM_MEM_QUEUES] = {
|
||||
"queues"},[DRM_MEM_CMDS] = {
|
||||
"commands"},[DRM_MEM_MAPPINGS] = {
|
||||
"mappings"},[DRM_MEM_BUFLISTS] = {
|
||||
"buflists"},[DRM_MEM_AGPLISTS] = {
|
||||
"agplist"},[DRM_MEM_SGLISTS] = {
|
||||
"sglist"},[DRM_MEM_TOTALAGP] = {
|
||||
"totalagp"},[DRM_MEM_BOUNDAGP] = {
|
||||
"boundagp"},[DRM_MEM_CTXBITMAP] = {
|
||||
"ctxbitmap"},[DRM_MEM_CTXLIST] = {
|
||||
"ctxlist"},[DRM_MEM_STUB] = {
|
||||
"stub"}, {
|
||||
NULL, 0,} /* Last entry must be null */
|
||||
[DRM_MEM_DMA] = {"dmabufs"},
|
||||
[DRM_MEM_SAREA] = {"sareas"},
|
||||
[DRM_MEM_DRIVER] = {"driver"},
|
||||
[DRM_MEM_MAGIC] = {"magic"},
|
||||
[DRM_MEM_IOCTLS] = {"ioctltab"},
|
||||
[DRM_MEM_MAPS] = {"maplist"},
|
||||
[DRM_MEM_VMAS] = {"vmalist"},
|
||||
[DRM_MEM_BUFS] = {"buflist"},
|
||||
[DRM_MEM_SEGS] = {"seglist"},
|
||||
[DRM_MEM_PAGES] = {"pagelist"},
|
||||
[DRM_MEM_FILES] = {"files"},
|
||||
[DRM_MEM_QUEUES] = {"queues"},
|
||||
[DRM_MEM_CMDS] = {"commands"},
|
||||
[DRM_MEM_MAPPINGS] = {"mappings"},
|
||||
[DRM_MEM_BUFLISTS] = {"buflists"},
|
||||
[DRM_MEM_AGPLISTS] = {"agplist"},
|
||||
[DRM_MEM_SGLISTS] = {"sglist"},
|
||||
[DRM_MEM_TOTALAGP] = {"totalagp"},
|
||||
[DRM_MEM_BOUNDAGP] = {"boundagp"},
|
||||
[DRM_MEM_CTXBITMAP] = {"ctxbitmap"},
|
||||
[DRM_MEM_CTXLIST] = {"ctxlist"},
|
||||
[DRM_MEM_STUB] = {"stub"},
|
||||
{NULL, 0,} /* Last entry must be null */
|
||||
};
|
||||
|
||||
void DRM(mem_init) (void) {
|
||||
void drm_mem_init (void) {
|
||||
drm_mem_stats_t *mem;
|
||||
struct sysinfo si;
|
||||
|
||||
for (mem = DRM(mem_stats); mem->name; ++mem) {
|
||||
for (mem = drm_mem_stats; mem->name; ++mem) {
|
||||
mem->succeed_count = 0;
|
||||
mem->free_count = 0;
|
||||
mem->fail_count = 0;
|
||||
|
@ -87,13 +86,13 @@ void DRM(mem_init) (void) {
|
|||
}
|
||||
|
||||
si_meminfo(&si);
|
||||
DRM(ram_available) = si.totalram;
|
||||
DRM(ram_used) = 0;
|
||||
drm_ram_available = si.totalram;
|
||||
drm_ram_used = 0;
|
||||
}
|
||||
|
||||
/* drm_mem_info is called whenever a process reads /dev/drm/mem. */
|
||||
|
||||
static int DRM(_mem_info) (char *buf, char **start, off_t offset,
|
||||
static int drm__mem_info (char *buf, char **start, off_t offset,
|
||||
int request, int *eof, void *data) {
|
||||
drm_mem_stats_t *pt;
|
||||
int len = 0;
|
||||
|
@ -112,11 +111,11 @@ static int DRM(_mem_info) (char *buf, char **start, off_t offset,
|
|||
" | allocs bytes\n\n");
|
||||
DRM_PROC_PRINT("%-9.9s %5d %5d %4d %10lu kB |\n",
|
||||
"system", 0, 0, 0,
|
||||
DRM(ram_available) << (PAGE_SHIFT - 10));
|
||||
drm_ram_available << (PAGE_SHIFT - 10));
|
||||
DRM_PROC_PRINT("%-9.9s %5d %5d %4d %10lu kB |\n",
|
||||
"locked", 0, 0, 0, DRM(ram_used) >> 10);
|
||||
"locked", 0, 0, 0, drm_ram_used >> 10);
|
||||
DRM_PROC_PRINT("\n");
|
||||
for (pt = DRM(mem_stats); pt->name; pt++) {
|
||||
for (pt = drm_mem_stats; pt->name; pt++) {
|
||||
DRM_PROC_PRINT("%-9.9s %5d %5d %4d %10lu %10lu | %6d %10ld\n",
|
||||
pt->name,
|
||||
pt->succeed_count,
|
||||
|
@ -135,17 +134,17 @@ static int DRM(_mem_info) (char *buf, char **start, off_t offset,
|
|||
return len - offset;
|
||||
}
|
||||
|
||||
int DRM(mem_info) (char *buf, char **start, off_t offset,
|
||||
int drm_mem_info (char *buf, char **start, off_t offset,
|
||||
int len, int *eof, void *data) {
|
||||
int ret;
|
||||
|
||||
spin_lock(&DRM(mem_lock));
|
||||
ret = DRM(_mem_info) (buf, start, offset, len, eof, data);
|
||||
spin_unlock(&DRM(mem_lock));
|
||||
spin_lock(&drm_mem_lock);
|
||||
ret = drm__mem_info (buf, start, offset, len, eof, data);
|
||||
spin_unlock(&drm_mem_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void *DRM(alloc) (size_t size, int area) {
|
||||
void *drm_alloc (size_t size, int area) {
|
||||
void *pt;
|
||||
|
||||
if (!size) {
|
||||
|
@ -154,41 +153,41 @@ void *DRM(alloc) (size_t size, int area) {
|
|||
}
|
||||
|
||||
if (!(pt = kmalloc(size, GFP_KERNEL))) {
|
||||
spin_lock(&DRM(mem_lock));
|
||||
++DRM(mem_stats)[area].fail_count;
|
||||
spin_unlock(&DRM(mem_lock));
|
||||
spin_lock(&drm_mem_lock);
|
||||
++drm_mem_stats[area].fail_count;
|
||||
spin_unlock(&drm_mem_lock);
|
||||
return NULL;
|
||||
}
|
||||
spin_lock(&DRM(mem_lock));
|
||||
++DRM(mem_stats)[area].succeed_count;
|
||||
DRM(mem_stats)[area].bytes_allocated += size;
|
||||
spin_unlock(&DRM(mem_lock));
|
||||
spin_lock(&drm_mem_lock);
|
||||
++drm_mem_stats[area].succeed_count;
|
||||
drm_mem_stats[area].bytes_allocated += size;
|
||||
spin_unlock(&drm_mem_lock);
|
||||
return pt;
|
||||
}
|
||||
|
||||
void *DRM(calloc) (size_t nmemb, size_t size, int area) {
|
||||
void *drm_calloc (size_t nmemb, size_t size, int area) {
|
||||
void *addr;
|
||||
|
||||
addr = DRM(alloc) (nmemb * size, area);
|
||||
addr = drm_alloc (nmemb * size, area);
|
||||
if (addr != NULL)
|
||||
memset((void *)addr, 0, size * nmemb);
|
||||
|
||||
return addr;
|
||||
}
|
||||
|
||||
void *DRM(realloc) (void *oldpt, size_t oldsize, size_t size, int area) {
|
||||
void *drm_realloc (void *oldpt, size_t oldsize, size_t size, int area) {
|
||||
void *pt;
|
||||
|
||||
if (!(pt = DRM(alloc) (size, area)))
|
||||
if (!(pt = drm_alloc (size, area)))
|
||||
return NULL;
|
||||
if (oldpt && oldsize) {
|
||||
memcpy(pt, oldpt, oldsize);
|
||||
DRM(free) (oldpt, oldsize, area);
|
||||
drm_free (oldpt, oldsize, area);
|
||||
}
|
||||
return pt;
|
||||
}
|
||||
|
||||
void DRM(free) (void *pt, size_t size, int area) {
|
||||
void drm_free (void *pt, size_t size, int area) {
|
||||
int alloc_count;
|
||||
int free_count;
|
||||
|
||||
|
@ -196,43 +195,43 @@ void DRM(free) (void *pt, size_t size, int area) {
|
|||
DRM_MEM_ERROR(area, "Attempt to free NULL pointer\n");
|
||||
else
|
||||
kfree(pt);
|
||||
spin_lock(&DRM(mem_lock));
|
||||
DRM(mem_stats)[area].bytes_freed += size;
|
||||
free_count = ++DRM(mem_stats)[area].free_count;
|
||||
alloc_count = DRM(mem_stats)[area].succeed_count;
|
||||
spin_unlock(&DRM(mem_lock));
|
||||
spin_lock(&drm_mem_lock);
|
||||
drm_mem_stats[area].bytes_freed += size;
|
||||
free_count = ++drm_mem_stats[area].free_count;
|
||||
alloc_count = drm_mem_stats[area].succeed_count;
|
||||
spin_unlock(&drm_mem_lock);
|
||||
if (free_count > alloc_count) {
|
||||
DRM_MEM_ERROR(area, "Excess frees: %d frees, %d allocs\n",
|
||||
free_count, alloc_count);
|
||||
}
|
||||
}
|
||||
|
||||
unsigned long DRM(alloc_pages) (int order, int area) {
|
||||
unsigned long drm_alloc_pages (int order, int area) {
|
||||
unsigned long address;
|
||||
unsigned long bytes = PAGE_SIZE << order;
|
||||
unsigned long addr;
|
||||
unsigned int sz;
|
||||
|
||||
spin_lock(&DRM(mem_lock));
|
||||
if ((DRM(ram_used) >> PAGE_SHIFT)
|
||||
> (DRM_RAM_PERCENT * DRM(ram_available)) / 100) {
|
||||
spin_unlock(&DRM(mem_lock));
|
||||
spin_lock(&drm_mem_lock);
|
||||
if ((drm_ram_used >> PAGE_SHIFT)
|
||||
> (DRM_RAM_PERCENT * drm_ram_available) / 100) {
|
||||
spin_unlock(&drm_mem_lock);
|
||||
return 0;
|
||||
}
|
||||
spin_unlock(&DRM(mem_lock));
|
||||
spin_unlock(&drm_mem_lock);
|
||||
|
||||
address = __get_free_pages(GFP_KERNEL|__GFP_COMP, order);
|
||||
if (!address) {
|
||||
spin_lock(&DRM(mem_lock));
|
||||
++DRM(mem_stats)[area].fail_count;
|
||||
spin_unlock(&DRM(mem_lock));
|
||||
spin_lock(&drm_mem_lock);
|
||||
++drm_mem_stats[area].fail_count;
|
||||
spin_unlock(&drm_mem_lock);
|
||||
return 0;
|
||||
}
|
||||
spin_lock(&DRM(mem_lock));
|
||||
++DRM(mem_stats)[area].succeed_count;
|
||||
DRM(mem_stats)[area].bytes_allocated += bytes;
|
||||
DRM(ram_used) += bytes;
|
||||
spin_unlock(&DRM(mem_lock));
|
||||
spin_lock(&drm_mem_lock);
|
||||
++drm_mem_stats[area].succeed_count;
|
||||
drm_mem_stats[area].bytes_allocated += bytes;
|
||||
drm_ram_used += bytes;
|
||||
spin_unlock(&drm_mem_lock);
|
||||
|
||||
/* Zero outside the lock */
|
||||
memset((void *)address, 0, bytes);
|
||||
|
@ -246,7 +245,7 @@ unsigned long DRM(alloc_pages) (int order, int area) {
|
|||
return address;
|
||||
}
|
||||
|
||||
void DRM(free_pages) (unsigned long address, int order, int area) {
|
||||
void drm_free_pages (unsigned long address, int order, int area) {
|
||||
unsigned long bytes = PAGE_SIZE << order;
|
||||
int alloc_count;
|
||||
int free_count;
|
||||
|
@ -264,12 +263,12 @@ void DRM(free_pages) (unsigned long address, int order, int area) {
|
|||
free_pages(address, order);
|
||||
}
|
||||
|
||||
spin_lock(&DRM(mem_lock));
|
||||
free_count = ++DRM(mem_stats)[area].free_count;
|
||||
alloc_count = DRM(mem_stats)[area].succeed_count;
|
||||
DRM(mem_stats)[area].bytes_freed += bytes;
|
||||
DRM(ram_used) -= bytes;
|
||||
spin_unlock(&DRM(mem_lock));
|
||||
spin_lock(&drm_mem_lock);
|
||||
free_count = ++drm_mem_stats[area].free_count;
|
||||
alloc_count = drm_mem_stats[area].succeed_count;
|
||||
drm_mem_stats[area].bytes_freed += bytes;
|
||||
drm_ram_used -= bytes;
|
||||
spin_unlock(&drm_mem_lock);
|
||||
if (free_count > alloc_count) {
|
||||
DRM_MEM_ERROR(area,
|
||||
"Excess frees: %d frees, %d allocs\n",
|
||||
|
@ -277,7 +276,7 @@ void DRM(free_pages) (unsigned long address, int order, int area) {
|
|||
}
|
||||
}
|
||||
|
||||
void *DRM(ioremap) (unsigned long offset, unsigned long size,
|
||||
void *drm_ioremap (unsigned long offset, unsigned long size,
|
||||
drm_device_t * dev) {
|
||||
void *pt;
|
||||
|
||||
|
@ -288,19 +287,19 @@ void *DRM(ioremap) (unsigned long offset, unsigned long size,
|
|||
}
|
||||
|
||||
if (!(pt = drm_ioremap(offset, size, dev))) {
|
||||
spin_lock(&DRM(mem_lock));
|
||||
++DRM(mem_stats)[DRM_MEM_MAPPINGS].fail_count;
|
||||
spin_unlock(&DRM(mem_lock));
|
||||
spin_lock(&drm_mem_lock);
|
||||
++drm_mem_stats[DRM_MEM_MAPPINGS].fail_count;
|
||||
spin_unlock(&drm_mem_lock);
|
||||
return NULL;
|
||||
}
|
||||
spin_lock(&DRM(mem_lock));
|
||||
++DRM(mem_stats)[DRM_MEM_MAPPINGS].succeed_count;
|
||||
DRM(mem_stats)[DRM_MEM_MAPPINGS].bytes_allocated += size;
|
||||
spin_unlock(&DRM(mem_lock));
|
||||
spin_lock(&drm_mem_lock);
|
||||
++drm_mem_stats[DRM_MEM_MAPPINGS].succeed_count;
|
||||
drm_mem_stats[DRM_MEM_MAPPINGS].bytes_allocated += size;
|
||||
spin_unlock(&drm_mem_lock);
|
||||
return pt;
|
||||
}
|
||||
|
||||
void *DRM(ioremap_nocache) (unsigned long offset, unsigned long size,
|
||||
void *drm_ioremap_nocache (unsigned long offset, unsigned long size,
|
||||
drm_device_t * dev) {
|
||||
void *pt;
|
||||
|
||||
|
@ -311,19 +310,19 @@ void *DRM(ioremap_nocache) (unsigned long offset, unsigned long size,
|
|||
}
|
||||
|
||||
if (!(pt = drm_ioremap_nocache(offset, size, dev))) {
|
||||
spin_lock(&DRM(mem_lock));
|
||||
++DRM(mem_stats)[DRM_MEM_MAPPINGS].fail_count;
|
||||
spin_unlock(&DRM(mem_lock));
|
||||
spin_lock(&drm_mem_lock);
|
||||
++drm_mem_stats[DRM_MEM_MAPPINGS].fail_count;
|
||||
spin_unlock(&drm_mem_lock);
|
||||
return NULL;
|
||||
}
|
||||
spin_lock(&DRM(mem_lock));
|
||||
++DRM(mem_stats)[DRM_MEM_MAPPINGS].succeed_count;
|
||||
DRM(mem_stats)[DRM_MEM_MAPPINGS].bytes_allocated += size;
|
||||
spin_unlock(&DRM(mem_lock));
|
||||
spin_lock(&drm_mem_lock);
|
||||
++drm_mem_stats[DRM_MEM_MAPPINGS].succeed_count;
|
||||
drm_mem_stats[DRM_MEM_MAPPINGS].bytes_allocated += size;
|
||||
spin_unlock(&drm_mem_lock);
|
||||
return pt;
|
||||
}
|
||||
|
||||
void DRM(ioremapfree) (void *pt, unsigned long size, drm_device_t * dev) {
|
||||
void drm_ioremapfree (void *pt, unsigned long size, drm_device_t * dev) {
|
||||
int alloc_count;
|
||||
int free_count;
|
||||
|
||||
|
@ -333,11 +332,11 @@ void DRM(ioremapfree) (void *pt, unsigned long size, drm_device_t * dev) {
|
|||
else
|
||||
drm_ioremapfree(pt, size, dev);
|
||||
|
||||
spin_lock(&DRM(mem_lock));
|
||||
DRM(mem_stats)[DRM_MEM_MAPPINGS].bytes_freed += size;
|
||||
free_count = ++DRM(mem_stats)[DRM_MEM_MAPPINGS].free_count;
|
||||
alloc_count = DRM(mem_stats)[DRM_MEM_MAPPINGS].succeed_count;
|
||||
spin_unlock(&DRM(mem_lock));
|
||||
spin_lock(&drm_mem_lock);
|
||||
drm_mem_stats[DRM_MEM_MAPPINGS].bytes_freed += size;
|
||||
free_count = ++drm_mem_stats[DRM_MEM_MAPPINGS].free_count;
|
||||
alloc_count = drm_mem_stats[DRM_MEM_MAPPINGS].succeed_count;
|
||||
spin_unlock(&drm_mem_lock);
|
||||
if (free_count > alloc_count) {
|
||||
DRM_MEM_ERROR(DRM_MEM_MAPPINGS,
|
||||
"Excess frees: %d frees, %d allocs\n",
|
||||
|
@ -347,7 +346,7 @@ void DRM(ioremapfree) (void *pt, unsigned long size, drm_device_t * dev) {
|
|||
|
||||
#if __OS_HAS_AGP
|
||||
|
||||
DRM_AGP_MEM *DRM(alloc_agp) (int pages, u32 type) {
|
||||
DRM_AGP_MEM *drm_alloc_agp (drm_device_t *dev, int pages, u32 type) {
|
||||
DRM_AGP_MEM *handle;
|
||||
|
||||
if (!pages) {
|
||||
|
@ -355,21 +354,21 @@ DRM_AGP_MEM *DRM(alloc_agp) (int pages, u32 type) {
|
|||
return NULL;
|
||||
}
|
||||
|
||||
if ((handle = DRM(agp_allocate_memory) (pages, type))) {
|
||||
spin_lock(&DRM(mem_lock));
|
||||
++DRM(mem_stats)[DRM_MEM_TOTALAGP].succeed_count;
|
||||
DRM(mem_stats)[DRM_MEM_TOTALAGP].bytes_allocated
|
||||
if ((handle = drm_agp_allocate_memory (pages, type))) {
|
||||
spin_lock(&drm_mem_lock);
|
||||
++drm_mem_stats[DRM_MEM_TOTALAGP].succeed_count;
|
||||
drm_mem_stats[DRM_MEM_TOTALAGP].bytes_allocated
|
||||
+= pages << PAGE_SHIFT;
|
||||
spin_unlock(&DRM(mem_lock));
|
||||
spin_unlock(&drm_mem_lock);
|
||||
return handle;
|
||||
}
|
||||
spin_lock(&DRM(mem_lock));
|
||||
++DRM(mem_stats)[DRM_MEM_TOTALAGP].fail_count;
|
||||
spin_unlock(&DRM(mem_lock));
|
||||
spin_lock(&drm_mem_lock);
|
||||
++drm_mem_stats[DRM_MEM_TOTALAGP].fail_count;
|
||||
spin_unlock(&drm_mem_lock);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int DRM(free_agp) (DRM_AGP_MEM * handle, int pages) {
|
||||
int drm_free_agp (DRM_AGP_MEM * handle, int pages) {
|
||||
int alloc_count;
|
||||
int free_count;
|
||||
int retval = -EINVAL;
|
||||
|
@ -380,13 +379,13 @@ int DRM(free_agp) (DRM_AGP_MEM * handle, int pages) {
|
|||
return retval;
|
||||
}
|
||||
|
||||
if (DRM(agp_free_memory) (handle)) {
|
||||
spin_lock(&DRM(mem_lock));
|
||||
free_count = ++DRM(mem_stats)[DRM_MEM_TOTALAGP].free_count;
|
||||
alloc_count = DRM(mem_stats)[DRM_MEM_TOTALAGP].succeed_count;
|
||||
DRM(mem_stats)[DRM_MEM_TOTALAGP].bytes_freed
|
||||
if (drm_agp_free_memory (handle)) {
|
||||
spin_lock(&drm_mem_lock);
|
||||
free_count = ++drm_mem_stats[DRM_MEM_TOTALAGP].free_count;
|
||||
alloc_count = drm_mem_stats[DRM_MEM_TOTALAGP].succeed_count;
|
||||
drm_mem_stats[DRM_MEM_TOTALAGP].bytes_freed
|
||||
+= pages << PAGE_SHIFT;
|
||||
spin_unlock(&DRM(mem_lock));
|
||||
spin_unlock(&drm_mem_lock);
|
||||
if (free_count > alloc_count) {
|
||||
DRM_MEM_ERROR(DRM_MEM_TOTALAGP,
|
||||
"Excess frees: %d frees, %d allocs\n",
|
||||
|
@ -397,7 +396,7 @@ int DRM(free_agp) (DRM_AGP_MEM * handle, int pages) {
|
|||
return retval;
|
||||
}
|
||||
|
||||
int DRM(bind_agp) (DRM_AGP_MEM * handle, unsigned int start) {
|
||||
int drm_bind_agp (DRM_AGP_MEM * handle, unsigned int start) {
|
||||
int retcode = -EINVAL;
|
||||
|
||||
if (!handle) {
|
||||
|
@ -406,21 +405,21 @@ int DRM(bind_agp) (DRM_AGP_MEM * handle, unsigned int start) {
|
|||
return retcode;
|
||||
}
|
||||
|
||||
if (!(retcode = DRM(agp_bind_memory) (handle, start))) {
|
||||
spin_lock(&DRM(mem_lock));
|
||||
++DRM(mem_stats)[DRM_MEM_BOUNDAGP].succeed_count;
|
||||
DRM(mem_stats)[DRM_MEM_BOUNDAGP].bytes_allocated
|
||||
if (!(retcode = drm_agp_bind_memory (handle, start))) {
|
||||
spin_lock(&drm_mem_lock);
|
||||
++drm_mem_stats[DRM_MEM_BOUNDAGP].succeed_count;
|
||||
drm_mem_stats[DRM_MEM_BOUNDAGP].bytes_allocated
|
||||
+= handle->page_count << PAGE_SHIFT;
|
||||
spin_unlock(&DRM(mem_lock));
|
||||
spin_unlock(&drm_mem_lock);
|
||||
return retcode;
|
||||
}
|
||||
spin_lock(&DRM(mem_lock));
|
||||
++DRM(mem_stats)[DRM_MEM_BOUNDAGP].fail_count;
|
||||
spin_unlock(&DRM(mem_lock));
|
||||
spin_lock(&drm_mem_lock);
|
||||
++drm_mem_stats[DRM_MEM_BOUNDAGP].fail_count;
|
||||
spin_unlock(&drm_mem_lock);
|
||||
return retcode;
|
||||
}
|
||||
|
||||
int DRM(unbind_agp) (DRM_AGP_MEM * handle) {
|
||||
int drm_unbind_agp (DRM_AGP_MEM * handle) {
|
||||
int alloc_count;
|
||||
int free_count;
|
||||
int retcode = -EINVAL;
|
||||
|
@ -431,14 +430,14 @@ int DRM(unbind_agp) (DRM_AGP_MEM * handle) {
|
|||
return retcode;
|
||||
}
|
||||
|
||||
if ((retcode = DRM(agp_unbind_memory) (handle)))
|
||||
if ((retcode = drm_agp_unbind_memory (handle)))
|
||||
return retcode;
|
||||
spin_lock(&DRM(mem_lock));
|
||||
free_count = ++DRM(mem_stats)[DRM_MEM_BOUNDAGP].free_count;
|
||||
alloc_count = DRM(mem_stats)[DRM_MEM_BOUNDAGP].succeed_count;
|
||||
DRM(mem_stats)[DRM_MEM_BOUNDAGP].bytes_freed
|
||||
spin_lock(&drm_mem_lock);
|
||||
free_count = ++drm_mem_stats[DRM_MEM_BOUNDAGP].free_count;
|
||||
alloc_count = drm_mem_stats[DRM_MEM_BOUNDAGP].succeed_count;
|
||||
drm_mem_stats[DRM_MEM_BOUNDAGP].bytes_freed
|
||||
+= handle->page_count << PAGE_SHIFT;
|
||||
spin_unlock(&DRM(mem_lock));
|
||||
spin_unlock(&drm_mem_lock);
|
||||
if (free_count > alloc_count) {
|
||||
DRM_MEM_ERROR(DRM_MEM_BOUNDAGP,
|
||||
"Excess frees: %d frees, %d allocs\n",
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
#define DRM_ERR(d) -(d)
|
||||
/** Current process ID */
|
||||
#define DRM_CURRENTPID current->pid
|
||||
#define DRM_SUSER(p) capable(CAP_SYS_ADMIN)
|
||||
#define DRM_UDELAY(d) udelay(d)
|
||||
/** Read a byte from a MMIO region */
|
||||
#define DRM_READ8(map, offset) readb(((void __iomem *)(map)->handle) + (offset))
|
||||
|
|
|
@ -46,6 +46,7 @@
|
|||
{0x1002, 0x4E50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|CHIP_IS_MOBILITY}, \
|
||||
{0x1002, 0x4E51, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|CHIP_IS_MOBILITY}, \
|
||||
{0x1002, 0x4E54, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|CHIP_IS_MOBILITY}, \
|
||||
{0x1002, 0x4E56, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|CHIP_IS_MOBILITY}, \
|
||||
{0x1002, 0x5144, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|CHIP_SINGLE_CRTC}, \
|
||||
{0x1002, 0x5145, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|CHIP_SINGLE_CRTC}, \
|
||||
{0x1002, 0x5146, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|CHIP_SINGLE_CRTC}, \
|
||||
|
@ -69,6 +70,7 @@
|
|||
{0x1002, 0x516B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \
|
||||
{0x1002, 0x516C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \
|
||||
{0x1002, 0x5460, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \
|
||||
{0x1002, 0x554F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \
|
||||
{0x1002, 0x5834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|CHIP_IS_IGP}, \
|
||||
{0x1002, 0x5835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|CHIP_IS_IGP|CHIP_IS_MOBILITY}, \
|
||||
{0x1002, 0x5836, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|CHIP_IS_IGP}, \
|
||||
|
@ -82,10 +84,13 @@
|
|||
{0x1002, 0x5969, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100}, \
|
||||
{0x1002, 0x596A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \
|
||||
{0x1002, 0x596B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \
|
||||
{0x1002, 0x5b60, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \
|
||||
{0x1002, 0x5c61, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280|CHIP_IS_MOBILITY}, \
|
||||
{0x1002, 0x5c62, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \
|
||||
{0x1002, 0x5c63, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280|CHIP_IS_MOBILITY}, \
|
||||
{0x1002, 0x5c64, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \
|
||||
{0x1002, 0x5d4d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \
|
||||
{0x1002, 0x5e4b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420}, \
|
||||
{0, 0, 0}
|
||||
|
||||
#define r128_PCI_IDS \
|
||||
|
@ -176,7 +181,7 @@
|
|||
|
||||
#define viadrv_PCI_IDS \
|
||||
{0x1106, 0x3022, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
|
||||
{0x1106, 0x3118, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
|
||||
{0x1106, 0x3118, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VIA_PRO_GROUP_A}, \
|
||||
{0x1106, 0x3122, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
|
||||
{0x1106, 0x7205, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
|
||||
{0x1106, 0x3108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
|
||||
|
@ -196,6 +201,10 @@
|
|||
{0x8086, 0x2572, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
|
||||
{0, 0, 0}
|
||||
|
||||
#define gamma_PCI_IDS \
|
||||
{0x3d3d, 0x0008, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
|
||||
{0, 0, 0}
|
||||
|
||||
#define savage_PCI_IDS \
|
||||
{0x5333, 0x8a20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE3D}, \
|
||||
{0x5333, 0x8a21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE3D}, \
|
||||
|
@ -234,3 +243,4 @@
|
|||
{0x8086, 0x2592, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
|
||||
{0x8086, 0x2772, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
|
||||
{0, 0, 0}
|
||||
|
||||
|
|
|
@ -61,16 +61,14 @@ static struct drm_proc_list {
|
|||
const char *name; /**< file name */
|
||||
int (*f) (char *, char **, off_t, int, int *, void *); /**< proc callback*/
|
||||
} drm_proc_list[] = {
|
||||
{
|
||||
"name", drm_name_info}, {
|
||||
"mem", drm_mem_info}, {
|
||||
"vm", drm_vm_info}, {
|
||||
"clients", drm_clients_info}, {
|
||||
"queues", drm_queues_info}, {
|
||||
"bufs", drm_bufs_info},
|
||||
{"name", drm_name_info},
|
||||
{"mem", drm_mem_info},
|
||||
{"vm", drm_vm_info},
|
||||
{"clients", drm_clients_info},
|
||||
{"queues", drm_queues_info},
|
||||
{"bufs", drm_bufs_info},
|
||||
#if DRM_DEBUG_CODE
|
||||
{
|
||||
"vma", drm_vma_info},
|
||||
{"vma", drm_vma_info},
|
||||
#endif
|
||||
};
|
||||
|
||||
|
|
|
@ -93,8 +93,8 @@ static int drm_fill_in_dev(drm_device_t * dev, struct pci_dev *pdev,
|
|||
|
||||
dev->driver = driver;
|
||||
|
||||
if (dev->driver->preinit)
|
||||
if ((retcode = dev->driver->preinit(dev, ent->driver_data)))
|
||||
if (dev->driver->load)
|
||||
if ((retcode = dev->driver->load(dev, ent->driver_data)))
|
||||
goto error_out_unreg;
|
||||
|
||||
if (drm_core_has_AGP(dev)) {
|
||||
|
@ -124,47 +124,10 @@ static int drm_fill_in_dev(drm_device_t * dev, struct pci_dev *pdev,
|
|||
return 0;
|
||||
|
||||
error_out_unreg:
|
||||
drm_takedown(dev);
|
||||
drm_lastclose(dev);
|
||||
return retcode;
|
||||
}
|
||||
|
||||
/**
|
||||
* File \c open operation.
|
||||
*
|
||||
* \param inode device inode.
|
||||
* \param filp file pointer.
|
||||
*
|
||||
* Puts the dev->fops corresponding to the device minor number into
|
||||
* \p filp, call the \c open method, and restore the file operations.
|
||||
*/
|
||||
int drm_stub_open(struct inode *inode, struct file *filp)
|
||||
{
|
||||
drm_device_t *dev = NULL;
|
||||
int minor = iminor(inode);
|
||||
int err = -ENODEV;
|
||||
struct file_operations *old_fops;
|
||||
|
||||
DRM_DEBUG("\n");
|
||||
|
||||
if (!((minor >= 0) && (minor < drm_cards_limit)))
|
||||
return -ENODEV;
|
||||
|
||||
if (!drm_heads[minor])
|
||||
return -ENODEV;
|
||||
|
||||
if (!(dev = drm_heads[minor]->dev))
|
||||
return -ENODEV;
|
||||
|
||||
old_fops = filp->f_op;
|
||||
filp->f_op = fops_get(&dev->driver->fops);
|
||||
if (filp->f_op->open && (err = filp->f_op->open(inode, filp))) {
|
||||
fops_put(filp->f_op);
|
||||
filp->f_op = fops_get(old_fops);
|
||||
}
|
||||
fops_put(old_fops);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a secondary minor number.
|
||||
|
@ -200,11 +163,7 @@ static int drm_get_head(drm_device_t * dev, drm_head_t * head)
|
|||
goto err_g1;
|
||||
}
|
||||
|
||||
head->dev_class = drm_sysfs_device_add(drm_class,
|
||||
MKDEV(DRM_MAJOR,
|
||||
minor),
|
||||
&dev->pdev->dev,
|
||||
"card%d", minor);
|
||||
head->dev_class = drm_sysfs_device_add(drm_class, head);
|
||||
if (IS_ERR(head->dev_class)) {
|
||||
printk(KERN_ERR
|
||||
"DRM: Error sysfs_device_add.\n");
|
||||
|
@ -258,11 +217,10 @@ int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
|
|||
}
|
||||
if ((ret = drm_get_head(dev, &dev->primary)))
|
||||
goto err_g1;
|
||||
|
||||
/* postinit is a required function to display the signon banner */
|
||||
/* drivers add secondary heads here if needed */
|
||||
if ((ret = dev->driver->postinit(dev, ent->driver_data)))
|
||||
goto err_g1;
|
||||
|
||||
DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
|
||||
driver->name, driver->major, driver->minor, driver->patchlevel,
|
||||
driver->date, dev->primary.minor);
|
||||
|
||||
return 0;
|
||||
|
||||
|
@ -318,10 +276,9 @@ int drm_put_head(drm_head_t * head)
|
|||
DRM_DEBUG("release secondary minor %d\n", minor);
|
||||
|
||||
drm_proc_cleanup(minor, drm_proc_root, head->dev_root);
|
||||
drm_sysfs_device_remove(MKDEV(DRM_MAJOR, head->minor));
|
||||
drm_sysfs_device_remove(head->dev_class);
|
||||
|
||||
*head = (drm_head_t) {
|
||||
.dev = NULL};
|
||||
*head = (drm_head_t) {.dev = NULL};
|
||||
|
||||
drm_heads[minor] = NULL;
|
||||
|
||||
|
|
|
@ -15,8 +15,6 @@
|
|||
#include <linux/device.h>
|
||||
#include <linux/kdev_t.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/string.h>
|
||||
|
||||
#include "drm_core.h"
|
||||
#include "drmP.h"
|
||||
|
@ -28,15 +26,11 @@ struct drm_sysfs_class {
|
|||
#define to_drm_sysfs_class(d) container_of(d, struct drm_sysfs_class, class)
|
||||
|
||||
struct simple_dev {
|
||||
struct list_head node;
|
||||
dev_t dev;
|
||||
struct class_device class_dev;
|
||||
};
|
||||
#define to_simple_dev(d) container_of(d, struct simple_dev, class_dev)
|
||||
|
||||
static LIST_HEAD(simple_dev_list);
|
||||
static DEFINE_SPINLOCK(simple_dev_list_lock);
|
||||
|
||||
static void release_simple_dev(struct class_device *class_dev)
|
||||
{
|
||||
struct simple_dev *s_dev = to_simple_dev(class_dev);
|
||||
|
@ -124,6 +118,18 @@ void drm_sysfs_destroy(struct drm_sysfs_class *cs)
|
|||
class_unregister(&cs->class);
|
||||
}
|
||||
|
||||
static ssize_t show_dri(struct class_device *class_device, char *buf)
|
||||
{
|
||||
drm_device_t * dev = ((drm_head_t *)class_get_devdata(class_device))->dev;
|
||||
if (dev->driver->dri_library_name)
|
||||
return dev->driver->dri_library_name(dev, buf);
|
||||
return snprintf(buf, PAGE_SIZE, "%s\n", dev->driver->pci_driver.name);
|
||||
}
|
||||
|
||||
static struct class_device_attribute class_device_attrs[] = {
|
||||
__ATTR(dri_library_name, S_IRUGO, show_dri, NULL),
|
||||
};
|
||||
|
||||
/**
|
||||
* drm_sysfs_device_add - adds a class device to sysfs for a character driver
|
||||
* @cs: pointer to the struct drm_sysfs_class that this device should be registered to.
|
||||
|
@ -138,13 +144,11 @@ void drm_sysfs_destroy(struct drm_sysfs_class *cs)
|
|||
* Note: the struct drm_sysfs_class passed to this function must have previously been
|
||||
* created with a call to drm_sysfs_create().
|
||||
*/
|
||||
struct class_device *drm_sysfs_device_add(struct drm_sysfs_class *cs, dev_t dev,
|
||||
struct device *device,
|
||||
const char *fmt, ...)
|
||||
struct class_device *drm_sysfs_device_add(struct drm_sysfs_class *cs,
|
||||
drm_head_t *head)
|
||||
{
|
||||
va_list args;
|
||||
struct simple_dev *s_dev = NULL;
|
||||
int retval;
|
||||
int i, retval;
|
||||
|
||||
if ((cs == NULL) || (IS_ERR(cs))) {
|
||||
retval = -ENODEV;
|
||||
|
@ -158,26 +162,23 @@ struct class_device *drm_sysfs_device_add(struct drm_sysfs_class *cs, dev_t dev,
|
|||
}
|
||||
memset(s_dev, 0x00, sizeof(*s_dev));
|
||||
|
||||
s_dev->dev = dev;
|
||||
s_dev->class_dev.dev = device;
|
||||
s_dev->dev = MKDEV(DRM_MAJOR, head->minor);
|
||||
s_dev->class_dev.dev = &(head->dev->pdev)->dev;
|
||||
s_dev->class_dev.class = &cs->class;
|
||||
|
||||
va_start(args, fmt);
|
||||
vsnprintf(s_dev->class_dev.class_id, BUS_ID_SIZE, fmt, args);
|
||||
va_end(args);
|
||||
snprintf(s_dev->class_dev.class_id, BUS_ID_SIZE, "card%d", head->minor);
|
||||
retval = class_device_register(&s_dev->class_dev);
|
||||
if (retval)
|
||||
goto error;
|
||||
|
||||
class_device_create_file(&s_dev->class_dev, &cs->attr);
|
||||
class_set_devdata(&s_dev->class_dev, head);
|
||||
|
||||
spin_lock(&simple_dev_list_lock);
|
||||
list_add(&s_dev->node, &simple_dev_list);
|
||||
spin_unlock(&simple_dev_list_lock);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(class_device_attrs); i++)
|
||||
class_device_create_file(&s_dev->class_dev, &class_device_attrs[i]);
|
||||
return &s_dev->class_dev;
|
||||
|
||||
error:
|
||||
error:
|
||||
kfree(s_dev);
|
||||
return ERR_PTR(retval);
|
||||
}
|
||||
|
@ -189,23 +190,12 @@ struct class_device *drm_sysfs_device_add(struct drm_sysfs_class *cs, dev_t dev,
|
|||
* This call unregisters and cleans up a class device that was created with a
|
||||
* call to drm_sysfs_device_add()
|
||||
*/
|
||||
void drm_sysfs_device_remove(dev_t dev)
|
||||
void drm_sysfs_device_remove(struct class_device *class_dev)
|
||||
{
|
||||
struct simple_dev *s_dev = NULL;
|
||||
int found = 0;
|
||||
struct simple_dev *s_dev = to_simple_dev(class_dev);
|
||||
int i;
|
||||
|
||||
spin_lock(&simple_dev_list_lock);
|
||||
list_for_each_entry(s_dev, &simple_dev_list, node) {
|
||||
if (s_dev->dev == dev) {
|
||||
found = 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (found) {
|
||||
list_del(&s_dev->node);
|
||||
spin_unlock(&simple_dev_list_lock);
|
||||
class_device_unregister(&s_dev->class_dev);
|
||||
} else {
|
||||
spin_unlock(&simple_dev_list_lock);
|
||||
}
|
||||
for (i = 0; i < ARRAY_SIZE(class_device_attrs); i++)
|
||||
class_device_remove_file(&s_dev->class_dev, &class_device_attrs[i]);
|
||||
class_device_unregister(&s_dev->class_dev);
|
||||
}
|
||||
|
|
|
@ -114,7 +114,6 @@ static int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
|
|||
|
||||
static struct file_operations i810_buffer_fops = {
|
||||
.open = drm_open,
|
||||
.flush = drm_flush,
|
||||
.release = drm_release,
|
||||
.ioctl = drm_ioctl,
|
||||
.mmap = i810_mmap_buffers,
|
||||
|
@ -1319,12 +1318,24 @@ static int i810_flip_bufs(struct inode *inode, struct file *filp,
|
|||
return 0;
|
||||
}
|
||||
|
||||
void i810_driver_pretakedown(drm_device_t * dev)
|
||||
int i810_driver_load(drm_device_t *dev, unsigned long flags)
|
||||
{
|
||||
/* i810 has 4 more counters */
|
||||
dev->counters += 4;
|
||||
dev->types[6] = _DRM_STAT_IRQ;
|
||||
dev->types[7] = _DRM_STAT_PRIMARY;
|
||||
dev->types[8] = _DRM_STAT_SECONDARY;
|
||||
dev->types[9] = _DRM_STAT_DMA;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void i810_driver_lastclose(drm_device_t * dev)
|
||||
{
|
||||
i810_dma_cleanup(dev);
|
||||
}
|
||||
|
||||
void i810_driver_prerelease(drm_device_t * dev, DRMFILE filp)
|
||||
void i810_driver_preclose(drm_device_t * dev, DRMFILE filp)
|
||||
{
|
||||
if (dev->dev_private) {
|
||||
drm_i810_private_t *dev_priv = dev->dev_private;
|
||||
|
@ -1334,7 +1345,7 @@ void i810_driver_prerelease(drm_device_t * dev, DRMFILE filp)
|
|||
}
|
||||
}
|
||||
|
||||
void i810_driver_release(drm_device_t * dev, struct file *filp)
|
||||
void i810_driver_reclaim_buffers_locked(drm_device_t * dev, struct file *filp)
|
||||
{
|
||||
i810_reclaim_buffers(dev, filp);
|
||||
}
|
||||
|
@ -1346,21 +1357,21 @@ int i810_driver_dma_quiescent(drm_device_t * dev)
|
|||
}
|
||||
|
||||
drm_ioctl_desc_t i810_ioctls[] = {
|
||||
[DRM_IOCTL_NR(DRM_I810_INIT)] = {i810_dma_init, 1, 1},
|
||||
[DRM_IOCTL_NR(DRM_I810_VERTEX)] = {i810_dma_vertex, 1, 0},
|
||||
[DRM_IOCTL_NR(DRM_I810_CLEAR)] = {i810_clear_bufs, 1, 0},
|
||||
[DRM_IOCTL_NR(DRM_I810_FLUSH)] = {i810_flush_ioctl, 1, 0},
|
||||
[DRM_IOCTL_NR(DRM_I810_GETAGE)] = {i810_getage, 1, 0},
|
||||
[DRM_IOCTL_NR(DRM_I810_GETBUF)] = {i810_getbuf, 1, 0},
|
||||
[DRM_IOCTL_NR(DRM_I810_SWAP)] = {i810_swap_bufs, 1, 0},
|
||||
[DRM_IOCTL_NR(DRM_I810_COPY)] = {i810_copybuf, 1, 0},
|
||||
[DRM_IOCTL_NR(DRM_I810_DOCOPY)] = {i810_docopy, 1, 0},
|
||||
[DRM_IOCTL_NR(DRM_I810_OV0INFO)] = {i810_ov0_info, 1, 0},
|
||||
[DRM_IOCTL_NR(DRM_I810_FSTATUS)] = {i810_fstatus, 1, 0},
|
||||
[DRM_IOCTL_NR(DRM_I810_OV0FLIP)] = {i810_ov0_flip, 1, 0},
|
||||
[DRM_IOCTL_NR(DRM_I810_MC)] = {i810_dma_mc, 1, 1},
|
||||
[DRM_IOCTL_NR(DRM_I810_RSTATUS)] = {i810_rstatus, 1, 0},
|
||||
[DRM_IOCTL_NR(DRM_I810_FLIP)] = {i810_flip_bufs, 1, 0}
|
||||
[DRM_IOCTL_NR(DRM_I810_INIT)] = {i810_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
|
||||
[DRM_IOCTL_NR(DRM_I810_VERTEX)] = {i810_dma_vertex, DRM_AUTH},
|
||||
[DRM_IOCTL_NR(DRM_I810_CLEAR)] = {i810_clear_bufs, DRM_AUTH},
|
||||
[DRM_IOCTL_NR(DRM_I810_FLUSH)] = {i810_flush_ioctl, DRM_AUTH},
|
||||
[DRM_IOCTL_NR(DRM_I810_GETAGE)] = {i810_getage, DRM_AUTH},
|
||||
[DRM_IOCTL_NR(DRM_I810_GETBUF)] = {i810_getbuf, DRM_AUTH},
|
||||
[DRM_IOCTL_NR(DRM_I810_SWAP)] = {i810_swap_bufs, DRM_AUTH},
|
||||
[DRM_IOCTL_NR(DRM_I810_COPY)] = {i810_copybuf, DRM_AUTH},
|
||||
[DRM_IOCTL_NR(DRM_I810_DOCOPY)] = {i810_docopy, DRM_AUTH},
|
||||
[DRM_IOCTL_NR(DRM_I810_OV0INFO)] = {i810_ov0_info, DRM_AUTH},
|
||||
[DRM_IOCTL_NR(DRM_I810_FSTATUS)] = {i810_fstatus, DRM_AUTH},
|
||||
[DRM_IOCTL_NR(DRM_I810_OV0FLIP)] = {i810_ov0_flip, DRM_AUTH},
|
||||
[DRM_IOCTL_NR(DRM_I810_MC)] = {i810_dma_mc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
|
||||
[DRM_IOCTL_NR(DRM_I810_RSTATUS)] = {i810_rstatus, DRM_AUTH},
|
||||
[DRM_IOCTL_NR(DRM_I810_FLIP)] = {i810_flip_bufs, DRM_AUTH}
|
||||
};
|
||||
|
||||
int i810_max_ioctl = DRM_ARRAY_SIZE(i810_ioctls);
|
||||
|
|
|
@ -38,38 +38,6 @@
|
|||
|
||||
#include "drm_pciids.h"
|
||||
|
||||
static int postinit(struct drm_device *dev, unsigned long flags)
|
||||
{
|
||||
/* i810 has 4 more counters */
|
||||
dev->counters += 4;
|
||||
dev->types[6] = _DRM_STAT_IRQ;
|
||||
dev->types[7] = _DRM_STAT_PRIMARY;
|
||||
dev->types[8] = _DRM_STAT_SECONDARY;
|
||||
dev->types[9] = _DRM_STAT_DMA;
|
||||
|
||||
DRM_INFO("Initialized %s %d.%d.%d %s on minor %d: %s\n",
|
||||
DRIVER_NAME,
|
||||
DRIVER_MAJOR,
|
||||
DRIVER_MINOR,
|
||||
DRIVER_PATCHLEVEL,
|
||||
DRIVER_DATE, dev->primary.minor, pci_pretty_name(dev->pdev)
|
||||
);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int version(drm_version_t * version)
|
||||
{
|
||||
int len;
|
||||
|
||||
version->version_major = DRIVER_MAJOR;
|
||||
version->version_minor = DRIVER_MINOR;
|
||||
version->version_patchlevel = DRIVER_PATCHLEVEL;
|
||||
DRM_COPY(version->name, DRIVER_NAME);
|
||||
DRM_COPY(version->date, DRIVER_DATE);
|
||||
DRM_COPY(version->desc, DRIVER_DESC);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct pci_device_id pciidlist[] = {
|
||||
i810_PCI_IDS
|
||||
};
|
||||
|
@ -79,16 +47,14 @@ static struct drm_driver driver = {
|
|||
DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | DRIVER_USE_MTRR |
|
||||
DRIVER_HAVE_DMA | DRIVER_DMA_QUEUE,
|
||||
.dev_priv_size = sizeof(drm_i810_buf_priv_t),
|
||||
.pretakedown = i810_driver_pretakedown,
|
||||
.prerelease = i810_driver_prerelease,
|
||||
.load = i810_driver_load,
|
||||
.lastclose = i810_driver_lastclose,
|
||||
.preclose = i810_driver_preclose,
|
||||
.device_is_agp = i810_driver_device_is_agp,
|
||||
.release = i810_driver_release,
|
||||
.reclaim_buffers_locked = i810_driver_reclaim_buffers_locked,
|
||||
.dma_quiescent = i810_driver_dma_quiescent,
|
||||
.reclaim_buffers = i810_reclaim_buffers,
|
||||
.get_map_ofs = drm_core_get_map_ofs,
|
||||
.get_reg_ofs = drm_core_get_reg_ofs,
|
||||
.postinit = postinit,
|
||||
.version = version,
|
||||
.ioctls = i810_ioctls,
|
||||
.fops = {
|
||||
.owner = THIS_MODULE,
|
||||
|
@ -98,13 +64,19 @@ static struct drm_driver driver = {
|
|||
.mmap = drm_mmap,
|
||||
.poll = drm_poll,
|
||||
.fasync = drm_fasync,
|
||||
}
|
||||
,
|
||||
},
|
||||
|
||||
.pci_driver = {
|
||||
.name = DRIVER_NAME,
|
||||
.id_table = pciidlist,
|
||||
}
|
||||
,
|
||||
.name = DRIVER_NAME,
|
||||
.id_table = pciidlist,
|
||||
},
|
||||
|
||||
.name = DRIVER_NAME,
|
||||
.desc = DRIVER_DESC,
|
||||
.date = DRIVER_DATE,
|
||||
.major = DRIVER_MAJOR,
|
||||
.minor = DRIVER_MINOR,
|
||||
.patchlevel = DRIVER_PATCHLEVEL,
|
||||
};
|
||||
|
||||
static int __init i810_init(void)
|
||||
|
|
|
@ -116,9 +116,13 @@ typedef struct drm_i810_private {
|
|||
extern void i810_reclaim_buffers(drm_device_t * dev, struct file *filp);
|
||||
|
||||
extern int i810_driver_dma_quiescent(drm_device_t * dev);
|
||||
extern void i810_driver_release(drm_device_t * dev, struct file *filp);
|
||||
extern void i810_driver_pretakedown(drm_device_t * dev);
|
||||
extern void i810_driver_prerelease(drm_device_t * dev, DRMFILE filp);
|
||||
extern void i810_driver_reclaim_buffers_locked(drm_device_t * dev,
|
||||
struct file *filp);
|
||||
extern int i810_driver_load(struct drm_device *, unsigned long flags);
|
||||
extern void i810_driver_lastclose(drm_device_t * dev);
|
||||
extern void i810_driver_preclose(drm_device_t * dev, DRMFILE filp);
|
||||
extern void i810_driver_reclaim_buffers_locked(drm_device_t * dev,
|
||||
struct file *filp);
|
||||
extern int i810_driver_device_is_agp(drm_device_t * dev);
|
||||
|
||||
extern drm_ioctl_desc_t i810_ioctls[];
|
||||
|
|
|
@ -116,7 +116,6 @@ static int i830_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
|
|||
|
||||
static struct file_operations i830_buffer_fops = {
|
||||
.open = drm_open,
|
||||
.flush = drm_flush,
|
||||
.release = drm_release,
|
||||
.ioctl = drm_ioctl,
|
||||
.mmap = i830_mmap_buffers,
|
||||
|
@ -1517,12 +1516,24 @@ static int i830_setparam(struct inode *inode, struct file *filp,
|
|||
return 0;
|
||||
}
|
||||
|
||||
void i830_driver_pretakedown(drm_device_t * dev)
|
||||
int i830_driver_load(drm_device_t *dev, unsigned long flags)
|
||||
{
|
||||
/* i830 has 4 more counters */
|
||||
dev->counters += 4;
|
||||
dev->types[6] = _DRM_STAT_IRQ;
|
||||
dev->types[7] = _DRM_STAT_PRIMARY;
|
||||
dev->types[8] = _DRM_STAT_SECONDARY;
|
||||
dev->types[9] = _DRM_STAT_DMA;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void i830_driver_lastclose(drm_device_t * dev)
|
||||
{
|
||||
i830_dma_cleanup(dev);
|
||||
}
|
||||
|
||||
void i830_driver_prerelease(drm_device_t * dev, DRMFILE filp)
|
||||
void i830_driver_preclose(drm_device_t * dev, DRMFILE filp)
|
||||
{
|
||||
if (dev->dev_private) {
|
||||
drm_i830_private_t *dev_priv = dev->dev_private;
|
||||
|
@ -1532,7 +1543,7 @@ void i830_driver_prerelease(drm_device_t * dev, DRMFILE filp)
|
|||
}
|
||||
}
|
||||
|
||||
void i830_driver_release(drm_device_t * dev, struct file *filp)
|
||||
void i830_driver_reclaim_buffers_locked(drm_device_t * dev, struct file *filp)
|
||||
{
|
||||
i830_reclaim_buffers(dev, filp);
|
||||
}
|
||||
|
@ -1544,20 +1555,20 @@ int i830_driver_dma_quiescent(drm_device_t * dev)
|
|||
}
|
||||
|
||||
drm_ioctl_desc_t i830_ioctls[] = {
|
||||
[DRM_IOCTL_NR(DRM_I830_INIT)] = {i830_dma_init, 1, 1},
|
||||
[DRM_IOCTL_NR(DRM_I830_VERTEX)] = {i830_dma_vertex, 1, 0},
|
||||
[DRM_IOCTL_NR(DRM_I830_CLEAR)] = {i830_clear_bufs, 1, 0},
|
||||
[DRM_IOCTL_NR(DRM_I830_FLUSH)] = {i830_flush_ioctl, 1, 0},
|
||||
[DRM_IOCTL_NR(DRM_I830_GETAGE)] = {i830_getage, 1, 0},
|
||||
[DRM_IOCTL_NR(DRM_I830_GETBUF)] = {i830_getbuf, 1, 0},
|
||||
[DRM_IOCTL_NR(DRM_I830_SWAP)] = {i830_swap_bufs, 1, 0},
|
||||
[DRM_IOCTL_NR(DRM_I830_COPY)] = {i830_copybuf, 1, 0},
|
||||
[DRM_IOCTL_NR(DRM_I830_DOCOPY)] = {i830_docopy, 1, 0},
|
||||
[DRM_IOCTL_NR(DRM_I830_FLIP)] = {i830_flip_bufs, 1, 0},
|
||||
[DRM_IOCTL_NR(DRM_I830_IRQ_EMIT)] = {i830_irq_emit, 1, 0},
|
||||
[DRM_IOCTL_NR(DRM_I830_IRQ_WAIT)] = {i830_irq_wait, 1, 0},
|
||||
[DRM_IOCTL_NR(DRM_I830_GETPARAM)] = {i830_getparam, 1, 0},
|
||||
[DRM_IOCTL_NR(DRM_I830_SETPARAM)] = {i830_setparam, 1, 0}
|
||||
[DRM_IOCTL_NR(DRM_I830_INIT)] = {i830_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
|
||||
[DRM_IOCTL_NR(DRM_I830_VERTEX)] = {i830_dma_vertex, DRM_AUTH},
|
||||
[DRM_IOCTL_NR(DRM_I830_CLEAR)] = {i830_clear_bufs, DRM_AUTH},
|
||||
[DRM_IOCTL_NR(DRM_I830_FLUSH)] = {i830_flush_ioctl, DRM_AUTH},
|
||||
[DRM_IOCTL_NR(DRM_I830_GETAGE)] = {i830_getage, DRM_AUTH},
|
||||
[DRM_IOCTL_NR(DRM_I830_GETBUF)] = {i830_getbuf, DRM_AUTH},
|
||||
[DRM_IOCTL_NR(DRM_I830_SWAP)] = {i830_swap_bufs, DRM_AUTH},
|
||||
[DRM_IOCTL_NR(DRM_I830_COPY)] = {i830_copybuf, DRM_AUTH},
|
||||
[DRM_IOCTL_NR(DRM_I830_DOCOPY)] = {i830_docopy, DRM_AUTH},
|
||||
[DRM_IOCTL_NR(DRM_I830_FLIP)] = {i830_flip_bufs, DRM_AUTH},
|
||||
[DRM_IOCTL_NR(DRM_I830_IRQ_EMIT)] = {i830_irq_emit, DRM_AUTH},
|
||||
[DRM_IOCTL_NR(DRM_I830_IRQ_WAIT)] = {i830_irq_wait, DRM_AUTH},
|
||||
[DRM_IOCTL_NR(DRM_I830_GETPARAM)] = {i830_getparam, DRM_AUTH},
|
||||
[DRM_IOCTL_NR(DRM_I830_SETPARAM)] = {i830_setparam, DRM_AUTH}
|
||||
};
|
||||
|
||||
int i830_max_ioctl = DRM_ARRAY_SIZE(i830_ioctls);
|
||||
|
|
|
@ -40,37 +40,6 @@
|
|||
|
||||
#include "drm_pciids.h"
|
||||
|
||||
static int postinit(struct drm_device *dev, unsigned long flags)
|
||||
{
|
||||
dev->counters += 4;
|
||||
dev->types[6] = _DRM_STAT_IRQ;
|
||||
dev->types[7] = _DRM_STAT_PRIMARY;
|
||||
dev->types[8] = _DRM_STAT_SECONDARY;
|
||||
dev->types[9] = _DRM_STAT_DMA;
|
||||
|
||||
DRM_INFO("Initialized %s %d.%d.%d %s on minor %d: %s\n",
|
||||
DRIVER_NAME,
|
||||
DRIVER_MAJOR,
|
||||
DRIVER_MINOR,
|
||||
DRIVER_PATCHLEVEL,
|
||||
DRIVER_DATE, dev->primary.minor, pci_pretty_name(dev->pdev)
|
||||
);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int version(drm_version_t * version)
|
||||
{
|
||||
int len;
|
||||
|
||||
version->version_major = DRIVER_MAJOR;
|
||||
version->version_minor = DRIVER_MINOR;
|
||||
version->version_patchlevel = DRIVER_PATCHLEVEL;
|
||||
DRM_COPY(version->name, DRIVER_NAME);
|
||||
DRM_COPY(version->date, DRIVER_DATE);
|
||||
DRM_COPY(version->desc, DRIVER_DESC);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct pci_device_id pciidlist[] = {
|
||||
i830_PCI_IDS
|
||||
};
|
||||
|
@ -83,12 +52,12 @@ static struct drm_driver driver = {
|
|||
.driver_features |= DRIVER_HAVE_IRQ | DRIVER_SHARED_IRQ,
|
||||
#endif
|
||||
.dev_priv_size = sizeof(drm_i830_buf_priv_t),
|
||||
.pretakedown = i830_driver_pretakedown,
|
||||
.prerelease = i830_driver_prerelease,
|
||||
.load = i830_driver_load,
|
||||
.lastclose = i830_driver_lastclose,
|
||||
.preclose = i830_driver_preclose,
|
||||
.device_is_agp = i830_driver_device_is_agp,
|
||||
.release = i830_driver_release,
|
||||
.reclaim_buffers_locked = i830_driver_reclaim_buffers_locked,
|
||||
.dma_quiescent = i830_driver_dma_quiescent,
|
||||
.reclaim_buffers = i830_reclaim_buffers,
|
||||
.get_map_ofs = drm_core_get_map_ofs,
|
||||
.get_reg_ofs = drm_core_get_reg_ofs,
|
||||
#if USE_IRQS
|
||||
|
@ -97,8 +66,6 @@ static struct drm_driver driver = {
|
|||
.irq_uninstall = i830_driver_irq_uninstall,
|
||||
.irq_handler = i830_driver_irq_handler,
|
||||
#endif
|
||||
.postinit = postinit,
|
||||
.version = version,
|
||||
.ioctls = i830_ioctls,
|
||||
.fops = {
|
||||
.owner = THIS_MODULE,
|
||||
|
@ -108,13 +75,19 @@ static struct drm_driver driver = {
|
|||
.mmap = drm_mmap,
|
||||
.poll = drm_poll,
|
||||
.fasync = drm_fasync,
|
||||
}
|
||||
,
|
||||
.pci_driver = {
|
||||
.name = DRIVER_NAME,
|
||||
.id_table = pciidlist,
|
||||
}
|
||||
},
|
||||
|
||||
.pci_driver = {
|
||||
.name = DRIVER_NAME,
|
||||
.id_table = pciidlist,
|
||||
},
|
||||
|
||||
.name = DRIVER_NAME,
|
||||
.desc = DRIVER_DESC,
|
||||
.date = DRIVER_DATE,
|
||||
.major = DRIVER_MAJOR,
|
||||
.minor = DRIVER_MINOR,
|
||||
.patchlevel = DRIVER_PATCHLEVEL,
|
||||
};
|
||||
|
||||
static int __init i830_init(void)
|
||||
|
|
|
@ -136,10 +136,12 @@ extern irqreturn_t i830_driver_irq_handler(DRM_IRQ_ARGS);
|
|||
extern void i830_driver_irq_preinstall(drm_device_t * dev);
|
||||
extern void i830_driver_irq_postinstall(drm_device_t * dev);
|
||||
extern void i830_driver_irq_uninstall(drm_device_t * dev);
|
||||
extern void i830_driver_pretakedown(drm_device_t * dev);
|
||||
extern void i830_driver_release(drm_device_t * dev, struct file *filp);
|
||||
extern int i830_driver_load(struct drm_device *, unsigned long flags);
|
||||
extern void i830_driver_preclose(drm_device_t * dev, DRMFILE filp);
|
||||
extern void i830_driver_lastclose(drm_device_t * dev);
|
||||
extern void i830_driver_reclaim_buffers_locked(drm_device_t * dev,
|
||||
struct file *filp);
|
||||
extern int i830_driver_dma_quiescent(drm_device_t * dev);
|
||||
extern void i830_driver_prerelease(drm_device_t * dev, DRMFILE filp);
|
||||
extern int i830_driver_device_is_agp(drm_device_t * dev);
|
||||
|
||||
#define I830_READ(reg) DRM_READ32(dev_priv->mmio_map, reg)
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
/* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
|
||||
*/
|
||||
/**************************************************************************
|
||||
*
|
||||
/*
|
||||
* Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
|
||||
* All Rights Reserved.
|
||||
*
|
||||
|
@ -25,7 +24,7 @@
|
|||
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
|
||||
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
*/
|
||||
|
||||
#include "drmP.h"
|
||||
#include "drm.h"
|
||||
|
@ -196,7 +195,7 @@ static int i915_initialize(drm_device_t * dev,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int i915_resume(drm_device_t * dev)
|
||||
static int i915_dma_resume(drm_device_t * dev)
|
||||
{
|
||||
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
|
||||
|
||||
|
@ -253,7 +252,7 @@ static int i915_dma_init(DRM_IOCTL_ARGS)
|
|||
retcode = i915_dma_cleanup(dev);
|
||||
break;
|
||||
case I915_RESUME_DMA:
|
||||
retcode = i915_resume(dev);
|
||||
retcode = i915_dma_resume(dev);
|
||||
break;
|
||||
default:
|
||||
retcode = -EINVAL;
|
||||
|
@ -654,6 +653,9 @@ static int i915_getparam(DRM_IOCTL_ARGS)
|
|||
case I915_PARAM_ALLOW_BATCHBUFFER:
|
||||
value = dev_priv->allow_batchbuffer ? 1 : 0;
|
||||
break;
|
||||
case I915_PARAM_LAST_DISPATCH:
|
||||
value = READ_BREADCRUMB(dev_priv);
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("Unkown parameter %d\n", param.param);
|
||||
return DRM_ERR(EINVAL);
|
||||
|
@ -699,7 +701,19 @@ static int i915_setparam(DRM_IOCTL_ARGS)
|
|||
return 0;
|
||||
}
|
||||
|
||||
void i915_driver_pretakedown(drm_device_t * dev)
|
||||
int i915_driver_load(drm_device_t *dev, unsigned long flags)
|
||||
{
|
||||
/* i915 has 4 more counters */
|
||||
dev->counters += 4;
|
||||
dev->types[6] = _DRM_STAT_IRQ;
|
||||
dev->types[7] = _DRM_STAT_PRIMARY;
|
||||
dev->types[8] = _DRM_STAT_SECONDARY;
|
||||
dev->types[9] = _DRM_STAT_DMA;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void i915_driver_lastclose(drm_device_t * dev)
|
||||
{
|
||||
if (dev->dev_private) {
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
|
@ -708,7 +722,7 @@ void i915_driver_pretakedown(drm_device_t * dev)
|
|||
i915_dma_cleanup(dev);
|
||||
}
|
||||
|
||||
void i915_driver_prerelease(drm_device_t * dev, DRMFILE filp)
|
||||
void i915_driver_preclose(drm_device_t * dev, DRMFILE filp)
|
||||
{
|
||||
if (dev->dev_private) {
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
|
@ -717,18 +731,18 @@ void i915_driver_prerelease(drm_device_t * dev, DRMFILE filp)
|
|||
}
|
||||
|
||||
drm_ioctl_desc_t i915_ioctls[] = {
|
||||
[DRM_IOCTL_NR(DRM_I915_INIT)] = {i915_dma_init, 1, 1},
|
||||
[DRM_IOCTL_NR(DRM_I915_FLUSH)] = {i915_flush_ioctl, 1, 0},
|
||||
[DRM_IOCTL_NR(DRM_I915_FLIP)] = {i915_flip_bufs, 1, 0},
|
||||
[DRM_IOCTL_NR(DRM_I915_BATCHBUFFER)] = {i915_batchbuffer, 1, 0},
|
||||
[DRM_IOCTL_NR(DRM_I915_IRQ_EMIT)] = {i915_irq_emit, 1, 0},
|
||||
[DRM_IOCTL_NR(DRM_I915_IRQ_WAIT)] = {i915_irq_wait, 1, 0},
|
||||
[DRM_IOCTL_NR(DRM_I915_GETPARAM)] = {i915_getparam, 1, 0},
|
||||
[DRM_IOCTL_NR(DRM_I915_SETPARAM)] = {i915_setparam, 1, 1},
|
||||
[DRM_IOCTL_NR(DRM_I915_ALLOC)] = {i915_mem_alloc, 1, 0},
|
||||
[DRM_IOCTL_NR(DRM_I915_FREE)] = {i915_mem_free, 1, 0},
|
||||
[DRM_IOCTL_NR(DRM_I915_INIT_HEAP)] = {i915_mem_init_heap, 1, 1},
|
||||
[DRM_IOCTL_NR(DRM_I915_CMDBUFFER)] = {i915_cmdbuffer, 1, 0}
|
||||
[DRM_IOCTL_NR(DRM_I915_INIT)] = {i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
|
||||
[DRM_IOCTL_NR(DRM_I915_FLUSH)] = {i915_flush_ioctl, DRM_AUTH},
|
||||
[DRM_IOCTL_NR(DRM_I915_FLIP)] = {i915_flip_bufs, DRM_AUTH},
|
||||
[DRM_IOCTL_NR(DRM_I915_BATCHBUFFER)] = {i915_batchbuffer, DRM_AUTH},
|
||||
[DRM_IOCTL_NR(DRM_I915_IRQ_EMIT)] = {i915_irq_emit, DRM_AUTH},
|
||||
[DRM_IOCTL_NR(DRM_I915_IRQ_WAIT)] = {i915_irq_wait, DRM_AUTH},
|
||||
[DRM_IOCTL_NR(DRM_I915_GETPARAM)] = {i915_getparam, DRM_AUTH},
|
||||
[DRM_IOCTL_NR(DRM_I915_SETPARAM)] = {i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
|
||||
[DRM_IOCTL_NR(DRM_I915_ALLOC)] = {i915_mem_alloc, DRM_AUTH},
|
||||
[DRM_IOCTL_NR(DRM_I915_FREE)] = {i915_mem_free, DRM_AUTH},
|
||||
[DRM_IOCTL_NR(DRM_I915_INIT_HEAP)] = {i915_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
|
||||
[DRM_IOCTL_NR(DRM_I915_CMDBUFFER)] = {i915_cmdbuffer, DRM_AUTH}
|
||||
};
|
||||
|
||||
int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
/**************************************************************************
|
||||
*
|
||||
/*
|
||||
* Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
|
||||
* All Rights Reserved.
|
||||
*
|
||||
|
@ -23,7 +22,7 @@
|
|||
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
|
||||
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
*/
|
||||
|
||||
#ifndef _I915_DRM_H_
|
||||
#define _I915_DRM_H_
|
||||
|
@ -152,6 +151,7 @@ typedef struct drm_i915_irq_wait {
|
|||
*/
|
||||
#define I915_PARAM_IRQ_ACTIVE 1
|
||||
#define I915_PARAM_ALLOW_BATCHBUFFER 2
|
||||
#define I915_PARAM_LAST_DISPATCH 3
|
||||
|
||||
typedef struct drm_i915_getparam {
|
||||
int param;
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
/* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*-
|
||||
*/
|
||||
/**************************************************************************
|
||||
/*
|
||||
*
|
||||
* Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
|
||||
* All Rights Reserved.
|
||||
|
@ -25,7 +25,7 @@
|
|||
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
|
||||
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
*/
|
||||
|
||||
#include "drmP.h"
|
||||
#include "drm.h"
|
||||
|
@ -34,48 +34,22 @@
|
|||
|
||||
#include "drm_pciids.h"
|
||||
|
||||
static int postinit(struct drm_device *dev, unsigned long flags)
|
||||
{
|
||||
dev->counters += 4;
|
||||
dev->types[6] = _DRM_STAT_IRQ;
|
||||
dev->types[7] = _DRM_STAT_PRIMARY;
|
||||
dev->types[8] = _DRM_STAT_SECONDARY;
|
||||
dev->types[9] = _DRM_STAT_DMA;
|
||||
|
||||
DRM_INFO("Initialized %s %d.%d.%d %s on minor %d: %s\n",
|
||||
DRIVER_NAME,
|
||||
DRIVER_MAJOR,
|
||||
DRIVER_MINOR,
|
||||
DRIVER_PATCHLEVEL,
|
||||
DRIVER_DATE, dev->primary.minor, pci_pretty_name(dev->pdev)
|
||||
);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int version(drm_version_t * version)
|
||||
{
|
||||
int len;
|
||||
|
||||
version->version_major = DRIVER_MAJOR;
|
||||
version->version_minor = DRIVER_MINOR;
|
||||
version->version_patchlevel = DRIVER_PATCHLEVEL;
|
||||
DRM_COPY(version->name, DRIVER_NAME);
|
||||
DRM_COPY(version->date, DRIVER_DATE);
|
||||
DRM_COPY(version->desc, DRIVER_DESC);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct pci_device_id pciidlist[] = {
|
||||
i915_PCI_IDS
|
||||
};
|
||||
|
||||
static struct drm_driver driver = {
|
||||
/* don't use mtrr's here, the Xserver or user space app should
|
||||
* deal with them for intel hardware.
|
||||
*/
|
||||
.driver_features =
|
||||
DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | DRIVER_USE_MTRR |
|
||||
DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED,
|
||||
.pretakedown = i915_driver_pretakedown,
|
||||
.prerelease = i915_driver_prerelease,
|
||||
DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR |*/
|
||||
DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_IRQ_VBL,
|
||||
.load = i915_driver_load,
|
||||
.lastclose = i915_driver_lastclose,
|
||||
.preclose = i915_driver_preclose,
|
||||
.device_is_agp = i915_driver_device_is_agp,
|
||||
.vblank_wait = i915_driver_vblank_wait,
|
||||
.irq_preinstall = i915_driver_irq_preinstall,
|
||||
.irq_postinstall = i915_driver_irq_postinstall,
|
||||
.irq_uninstall = i915_driver_irq_uninstall,
|
||||
|
@ -83,8 +57,6 @@ static struct drm_driver driver = {
|
|||
.reclaim_buffers = drm_core_reclaim_buffers,
|
||||
.get_map_ofs = drm_core_get_map_ofs,
|
||||
.get_reg_ofs = drm_core_get_reg_ofs,
|
||||
.postinit = postinit,
|
||||
.version = version,
|
||||
.ioctls = i915_ioctls,
|
||||
.fops = {
|
||||
.owner = THIS_MODULE,
|
||||
|
@ -97,11 +69,19 @@ static struct drm_driver driver = {
|
|||
#ifdef CONFIG_COMPAT
|
||||
.compat_ioctl = i915_compat_ioctl,
|
||||
#endif
|
||||
},
|
||||
},
|
||||
|
||||
.pci_driver = {
|
||||
.name = DRIVER_NAME,
|
||||
.id_table = pciidlist,
|
||||
}
|
||||
.name = DRIVER_NAME,
|
||||
.id_table = pciidlist,
|
||||
},
|
||||
|
||||
.name = DRIVER_NAME,
|
||||
.desc = DRIVER_DESC,
|
||||
.date = DRIVER_DATE,
|
||||
.major = DRIVER_MAJOR,
|
||||
.minor = DRIVER_MINOR,
|
||||
.patchlevel = DRIVER_PATCHLEVEL,
|
||||
};
|
||||
|
||||
static int __init i915_init(void)
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
/* i915_drv.h -- Private header for the I915 driver -*- linux-c -*-
|
||||
*/
|
||||
/**************************************************************************
|
||||
/*
|
||||
*
|
||||
* Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
|
||||
* All Rights Reserved.
|
||||
|
@ -25,7 +25,7 @@
|
|||
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
|
||||
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
*/
|
||||
|
||||
#ifndef _I915_DRV_H_
|
||||
#define _I915_DRV_H_
|
||||
|
@ -37,21 +37,18 @@
|
|||
|
||||
#define DRIVER_NAME "i915"
|
||||
#define DRIVER_DESC "Intel Graphics"
|
||||
#define DRIVER_DATE "20040405"
|
||||
#define DRIVER_DATE "20051209"
|
||||
|
||||
/* Interface history:
|
||||
*
|
||||
* 1.1: Original.
|
||||
* 1.2: Add Power Management
|
||||
* 1.3: Add vblank support
|
||||
*/
|
||||
#define DRIVER_MAJOR 1
|
||||
#define DRIVER_MINOR 1
|
||||
#define DRIVER_MINOR 3
|
||||
#define DRIVER_PATCHLEVEL 0
|
||||
|
||||
/* We use our own dma mechanisms, not the drm template code. However,
|
||||
* the shared IRQ code is useful to us:
|
||||
*/
|
||||
#define __HAVE_PM 1
|
||||
|
||||
typedef struct _drm_i915_ring_buffer {
|
||||
int tail_mask;
|
||||
unsigned long Start;
|
||||
|
@ -97,6 +94,7 @@ typedef struct drm_i915_private {
|
|||
int tex_lru_log_granularity;
|
||||
int allow_batchbuffer;
|
||||
struct mem_block *agp_heap;
|
||||
unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds;
|
||||
} drm_i915_private_t;
|
||||
|
||||
extern drm_ioctl_desc_t i915_ioctls[];
|
||||
|
@ -104,14 +102,18 @@ extern int i915_max_ioctl;
|
|||
|
||||
/* i915_dma.c */
|
||||
extern void i915_kernel_lost_context(drm_device_t * dev);
|
||||
extern void i915_driver_pretakedown(drm_device_t * dev);
|
||||
extern void i915_driver_prerelease(drm_device_t * dev, DRMFILE filp);
|
||||
extern int i915_driver_load(struct drm_device *, unsigned long flags);
|
||||
extern void i915_driver_lastclose(drm_device_t * dev);
|
||||
extern void i915_driver_preclose(drm_device_t * dev, DRMFILE filp);
|
||||
extern int i915_driver_device_is_agp(drm_device_t * dev);
|
||||
extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
|
||||
unsigned long arg);
|
||||
|
||||
/* i915_irq.c */
|
||||
extern int i915_irq_emit(DRM_IOCTL_ARGS);
|
||||
extern int i915_irq_wait(DRM_IOCTL_ARGS);
|
||||
|
||||
extern int i915_driver_vblank_wait(drm_device_t *dev, unsigned int *sequence);
|
||||
extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS);
|
||||
extern void i915_driver_irq_preinstall(drm_device_t * dev);
|
||||
extern void i915_driver_irq_postinstall(drm_device_t * dev);
|
||||
|
@ -125,13 +127,10 @@ extern void i915_mem_takedown(struct mem_block **heap);
|
|||
extern void i915_mem_release(drm_device_t * dev,
|
||||
DRMFILE filp, struct mem_block *heap);
|
||||
|
||||
extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
|
||||
unsigned long arg);
|
||||
|
||||
#define I915_READ(reg) DRM_READ32(dev_priv->mmio_map, reg)
|
||||
#define I915_WRITE(reg,val) DRM_WRITE32(dev_priv->mmio_map, reg, val)
|
||||
#define I915_READ16(reg) DRM_READ16(dev_priv->mmio_map, reg)
|
||||
#define I915_WRITE16(reg,val) DRM_WRITE16(dev_priv->mmio_map, reg, val)
|
||||
#define I915_READ(reg) DRM_READ32(dev_priv->mmio_map, (reg))
|
||||
#define I915_WRITE(reg,val) DRM_WRITE32(dev_priv->mmio_map, (reg), (val))
|
||||
#define I915_READ16(reg) DRM_READ16(dev_priv->mmio_map, (reg))
|
||||
#define I915_WRITE16(reg,val) DRM_WRITE16(dev_priv->mmio_map, (reg), (val))
|
||||
|
||||
#define I915_VERBOSE 0
|
||||
|
||||
|
@ -195,6 +194,13 @@ extern int i915_wait_ring(drm_device_t * dev, int n, const char *caller);
|
|||
#define PPCR 0x61204
|
||||
#define PPCR_ON (1<<0)
|
||||
|
||||
#define DVOB 0x61140
|
||||
#define DVOB_ON (1<<31)
|
||||
#define DVOC 0x61160
|
||||
#define DVOC_ON (1<<31)
|
||||
#define LVDS 0x61180
|
||||
#define LVDS_ON (1<<31)
|
||||
|
||||
#define ADPA 0x61100
|
||||
#define ADPA_DPMS_MASK (~(3<<10))
|
||||
#define ADPA_DPMS_ON (0<<10)
|
||||
|
@ -258,4 +264,6 @@ extern int i915_wait_ring(drm_device_t * dev, int n, const char *caller);
|
|||
|
||||
#define CMD_OP_DESTBUFFER_INFO ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1)
|
||||
|
||||
#define READ_BREADCRUMB(dev_priv) (((u32 *)(dev_priv->hw_status_page))[5])
|
||||
|
||||
#endif
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
/* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
|
||||
/* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
|
||||
*/
|
||||
/**************************************************************************
|
||||
*
|
||||
/*
|
||||
* Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
|
||||
* All Rights Reserved.
|
||||
*
|
||||
|
@ -25,16 +24,18 @@
|
|||
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
|
||||
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
*/
|
||||
|
||||
#include "drmP.h"
|
||||
#include "drm.h"
|
||||
#include "i915_drm.h"
|
||||
#include "i915_drv.h"
|
||||
|
||||
#define USER_INT_FLAG 0x2
|
||||
#define USER_INT_FLAG (1<<1)
|
||||
#define VSYNC_PIPEB_FLAG (1<<5)
|
||||
#define VSYNC_PIPEA_FLAG (1<<7)
|
||||
|
||||
#define MAX_NOPID ((u32)~0)
|
||||
#define READ_BREADCRUMB(dev_priv) (((u32*)(dev_priv->hw_status_page))[5])
|
||||
|
||||
irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
|
||||
{
|
||||
|
@ -43,7 +44,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
|
|||
u16 temp;
|
||||
|
||||
temp = I915_READ16(I915REG_INT_IDENTITY_R);
|
||||
temp &= USER_INT_FLAG;
|
||||
temp &= (USER_INT_FLAG | VSYNC_PIPEA_FLAG);
|
||||
|
||||
DRM_DEBUG("%s flag=%08x\n", __FUNCTION__, temp);
|
||||
|
||||
|
@ -51,7 +52,15 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
|
|||
return IRQ_NONE;
|
||||
|
||||
I915_WRITE16(I915REG_INT_IDENTITY_R, temp);
|
||||
DRM_WAKEUP(&dev_priv->irq_queue);
|
||||
|
||||
if (temp & USER_INT_FLAG)
|
||||
DRM_WAKEUP(&dev_priv->irq_queue);
|
||||
|
||||
if (temp & VSYNC_PIPEA_FLAG) {
|
||||
atomic_inc(&dev->vbl_received);
|
||||
DRM_WAKEUP(&dev->vbl_queue);
|
||||
drm_vbl_send_signals(dev);
|
||||
}
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
@ -102,6 +111,27 @@ static int i915_wait_irq(drm_device_t * dev, int irq_nr)
|
|||
return ret;
|
||||
}
|
||||
|
||||
int i915_driver_vblank_wait(drm_device_t *dev, unsigned int *sequence)
|
||||
{
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
unsigned int cur_vblank;
|
||||
int ret = 0;
|
||||
|
||||
if (!dev_priv) {
|
||||
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
|
||||
DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
|
||||
(((cur_vblank = atomic_read(&dev->vbl_received))
|
||||
- *sequence) <= (1<<23)));
|
||||
|
||||
*sequence = cur_vblank;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
/* Needs the lock as it touches the ring.
|
||||
*/
|
||||
int i915_irq_emit(DRM_IOCTL_ARGS)
|
||||
|
@ -165,7 +195,7 @@ void i915_driver_irq_postinstall(drm_device_t * dev)
|
|||
{
|
||||
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
|
||||
|
||||
I915_WRITE16(I915REG_INT_ENABLE_R, USER_INT_FLAG);
|
||||
I915_WRITE16(I915REG_INT_ENABLE_R, USER_INT_FLAG | VSYNC_PIPEA_FLAG);
|
||||
DRM_INIT_WAITQUEUE(&dev_priv->irq_queue);
|
||||
}
|
||||
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
/* i915_mem.c -- Simple agp/fb memory manager for i915 -*- linux-c -*-
|
||||
*/
|
||||
/**************************************************************************
|
||||
*
|
||||
/*
|
||||
* Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
|
||||
* All Rights Reserved.
|
||||
*
|
||||
|
@ -25,7 +24,7 @@
|
|||
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
|
||||
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
*/
|
||||
|
||||
#include "drmP.h"
|
||||
#include "drm.h"
|
||||
|
|
|
@ -44,7 +44,9 @@
|
|||
#define MGA_DEFAULT_USEC_TIMEOUT 10000
|
||||
#define MGA_FREELIST_DEBUG 0
|
||||
|
||||
static int mga_do_cleanup_dma(drm_device_t * dev);
|
||||
#define MINIMAL_CLEANUP 0
|
||||
#define FULL_CLEANUP 1
|
||||
static int mga_do_cleanup_dma(drm_device_t *dev, int full_cleanup);
|
||||
|
||||
/* ================================================================
|
||||
* Engine control
|
||||
|
@ -391,7 +393,7 @@ int mga_freelist_put(drm_device_t * dev, drm_buf_t * buf)
|
|||
* DMA initialization, cleanup
|
||||
*/
|
||||
|
||||
int mga_driver_preinit(drm_device_t * dev, unsigned long flags)
|
||||
int mga_driver_load(drm_device_t * dev, unsigned long flags)
|
||||
{
|
||||
drm_mga_private_t *dev_priv;
|
||||
|
||||
|
@ -405,6 +407,14 @@ int mga_driver_preinit(drm_device_t * dev, unsigned long flags)
|
|||
dev_priv->usec_timeout = MGA_DEFAULT_USEC_TIMEOUT;
|
||||
dev_priv->chipset = flags;
|
||||
|
||||
dev_priv->mmio_base = drm_get_resource_start(dev, 1);
|
||||
dev_priv->mmio_size = drm_get_resource_len(dev, 1);
|
||||
|
||||
dev->counters += 3;
|
||||
dev->types[6] = _DRM_STAT_IRQ;
|
||||
dev->types[7] = _DRM_STAT_PRIMARY;
|
||||
dev->types[8] = _DRM_STAT_SECONDARY;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -438,17 +448,19 @@ static int mga_do_agp_dma_bootstrap(drm_device_t * dev,
|
|||
drm_buf_desc_t req;
|
||||
drm_agp_mode_t mode;
|
||||
drm_agp_info_t info;
|
||||
drm_agp_buffer_t agp_req;
|
||||
drm_agp_binding_t bind_req;
|
||||
|
||||
/* Acquire AGP. */
|
||||
err = drm_agp_acquire(dev);
|
||||
if (err) {
|
||||
DRM_ERROR("Unable to acquire AGP\n");
|
||||
DRM_ERROR("Unable to acquire AGP: %d\n", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
err = drm_agp_info(dev, &info);
|
||||
if (err) {
|
||||
DRM_ERROR("Unable to get AGP info\n");
|
||||
DRM_ERROR("Unable to get AGP info: %d\n", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -472,18 +484,24 @@ static int mga_do_agp_dma_bootstrap(drm_device_t * dev,
|
|||
}
|
||||
|
||||
/* Allocate and bind AGP memory. */
|
||||
dev_priv->agp_pages = agp_size / PAGE_SIZE;
|
||||
dev_priv->agp_mem = drm_alloc_agp(dev, dev_priv->agp_pages, 0);
|
||||
if (dev_priv->agp_mem == NULL) {
|
||||
dev_priv->agp_pages = 0;
|
||||
agp_req.size = agp_size;
|
||||
agp_req.type = 0;
|
||||
err = drm_agp_alloc(dev, &agp_req);
|
||||
if (err) {
|
||||
dev_priv->agp_size = 0;
|
||||
DRM_ERROR("Unable to allocate %uMB AGP memory\n",
|
||||
dma_bs->agp_size);
|
||||
return DRM_ERR(ENOMEM);
|
||||
return err;
|
||||
}
|
||||
|
||||
dev_priv->agp_size = agp_size;
|
||||
dev_priv->agp_handle = agp_req.handle;
|
||||
|
||||
err = drm_bind_agp(dev_priv->agp_mem, 0);
|
||||
bind_req.handle = agp_req.handle;
|
||||
bind_req.offset = 0;
|
||||
err = drm_agp_bind(dev, &bind_req);
|
||||
if (err) {
|
||||
DRM_ERROR("Unable to bind AGP memory\n");
|
||||
DRM_ERROR("Unable to bind AGP memory: %d\n", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -497,7 +515,7 @@ static int mga_do_agp_dma_bootstrap(drm_device_t * dev,
|
|||
err = drm_addmap(dev, offset, warp_size,
|
||||
_DRM_AGP, _DRM_READ_ONLY, &dev_priv->warp);
|
||||
if (err) {
|
||||
DRM_ERROR("Unable to map WARP microcode\n");
|
||||
DRM_ERROR("Unable to map WARP microcode: %d\n", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -505,7 +523,7 @@ static int mga_do_agp_dma_bootstrap(drm_device_t * dev,
|
|||
err = drm_addmap(dev, offset, dma_bs->primary_size,
|
||||
_DRM_AGP, _DRM_READ_ONLY, &dev_priv->primary);
|
||||
if (err) {
|
||||
DRM_ERROR("Unable to map primary DMA region\n");
|
||||
DRM_ERROR("Unable to map primary DMA region: %d\n", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -513,7 +531,7 @@ static int mga_do_agp_dma_bootstrap(drm_device_t * dev,
|
|||
err = drm_addmap(dev, offset, secondary_size,
|
||||
_DRM_AGP, 0, &dev->agp_buffer_map);
|
||||
if (err) {
|
||||
DRM_ERROR("Unable to map secondary DMA region\n");
|
||||
DRM_ERROR("Unable to map secondary DMA region: %d\n", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -525,15 +543,29 @@ static int mga_do_agp_dma_bootstrap(drm_device_t * dev,
|
|||
|
||||
err = drm_addbufs_agp(dev, &req);
|
||||
if (err) {
|
||||
DRM_ERROR("Unable to add secondary DMA buffers\n");
|
||||
DRM_ERROR("Unable to add secondary DMA buffers: %d\n", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
{
|
||||
drm_map_list_t *_entry;
|
||||
unsigned long agp_token = 0;
|
||||
|
||||
list_for_each_entry(_entry, &dev->maplist->head, head) {
|
||||
if (_entry->map == dev->agp_buffer_map)
|
||||
agp_token = _entry->user_token;
|
||||
}
|
||||
if (!agp_token)
|
||||
return -EFAULT;
|
||||
|
||||
dev->agp_buffer_token = agp_token;
|
||||
}
|
||||
|
||||
offset += secondary_size;
|
||||
err = drm_addmap(dev, offset, agp_size - offset,
|
||||
_DRM_AGP, 0, &dev_priv->agp_textures);
|
||||
if (err) {
|
||||
DRM_ERROR("Unable to map AGP texture region\n");
|
||||
DRM_ERROR("Unable to map AGP texture region %d\n", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -603,7 +635,8 @@ static int mga_do_pci_dma_bootstrap(drm_device_t * dev,
|
|||
err = drm_addmap(dev, 0, warp_size, _DRM_CONSISTENT,
|
||||
_DRM_READ_ONLY, &dev_priv->warp);
|
||||
if (err != 0) {
|
||||
DRM_ERROR("Unable to create mapping for WARP microcode\n");
|
||||
DRM_ERROR("Unable to create mapping for WARP microcode: %d\n",
|
||||
err);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -622,7 +655,7 @@ static int mga_do_pci_dma_bootstrap(drm_device_t * dev,
|
|||
}
|
||||
|
||||
if (err != 0) {
|
||||
DRM_ERROR("Unable to allocate primary DMA region\n");
|
||||
DRM_ERROR("Unable to allocate primary DMA region: %d\n", err);
|
||||
return DRM_ERR(ENOMEM);
|
||||
}
|
||||
|
||||
|
@ -646,7 +679,7 @@ static int mga_do_pci_dma_bootstrap(drm_device_t * dev,
|
|||
}
|
||||
|
||||
if (bin_count == 0) {
|
||||
DRM_ERROR("Unable to add secondary DMA buffers\n");
|
||||
DRM_ERROR("Unable to add secondary DMA buffers: %d\n", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -682,7 +715,7 @@ static int mga_do_dma_bootstrap(drm_device_t * dev,
|
|||
err = drm_addmap(dev, dev_priv->mmio_base, dev_priv->mmio_size,
|
||||
_DRM_REGISTERS, _DRM_READ_ONLY, &dev_priv->mmio);
|
||||
if (err) {
|
||||
DRM_ERROR("Unable to map MMIO region\n");
|
||||
DRM_ERROR("Unable to map MMIO region: %d\n", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -690,7 +723,7 @@ static int mga_do_dma_bootstrap(drm_device_t * dev,
|
|||
_DRM_READ_ONLY | _DRM_LOCKED | _DRM_KERNEL,
|
||||
&dev_priv->status);
|
||||
if (err) {
|
||||
DRM_ERROR("Unable to map status region\n");
|
||||
DRM_ERROR("Unable to map status region: %d\n", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -708,7 +741,7 @@ static int mga_do_dma_bootstrap(drm_device_t * dev,
|
|||
*/
|
||||
|
||||
if (err) {
|
||||
mga_do_cleanup_dma(dev);
|
||||
mga_do_cleanup_dma(dev, MINIMAL_CLEANUP);
|
||||
}
|
||||
|
||||
/* Not only do we want to try and initialized PCI cards for PCI DMA,
|
||||
|
@ -731,35 +764,32 @@ int mga_dma_bootstrap(DRM_IOCTL_ARGS)
|
|||
DRM_DEVICE;
|
||||
drm_mga_dma_bootstrap_t bootstrap;
|
||||
int err;
|
||||
static const int modes[] = { 0, 1, 2, 2, 4, 4, 4, 4 };
|
||||
const drm_mga_private_t *const dev_priv =
|
||||
(drm_mga_private_t *) dev->dev_private;
|
||||
|
||||
DRM_COPY_FROM_USER_IOCTL(bootstrap,
|
||||
(drm_mga_dma_bootstrap_t __user *) data,
|
||||
sizeof(bootstrap));
|
||||
|
||||
err = mga_do_dma_bootstrap(dev, &bootstrap);
|
||||
if (!err) {
|
||||
static const int modes[] = { 0, 1, 2, 2, 4, 4, 4, 4 };
|
||||
const drm_mga_private_t *const dev_priv =
|
||||
(drm_mga_private_t *) dev->dev_private;
|
||||
|
||||
if (dev_priv->agp_textures != NULL) {
|
||||
bootstrap.texture_handle =
|
||||
dev_priv->agp_textures->offset;
|
||||
bootstrap.texture_size = dev_priv->agp_textures->size;
|
||||
} else {
|
||||
bootstrap.texture_handle = 0;
|
||||
bootstrap.texture_size = 0;
|
||||
}
|
||||
|
||||
bootstrap.agp_mode = modes[bootstrap.agp_mode & 0x07];
|
||||
if (DRM_COPY_TO_USER((void __user *)data, &bootstrap,
|
||||
sizeof(bootstrap))) {
|
||||
err = DRM_ERR(EFAULT);
|
||||
}
|
||||
} else {
|
||||
mga_do_cleanup_dma(dev);
|
||||
if (err) {
|
||||
mga_do_cleanup_dma(dev, FULL_CLEANUP);
|
||||
return err;
|
||||
}
|
||||
|
||||
if (dev_priv->agp_textures != NULL) {
|
||||
bootstrap.texture_handle = dev_priv->agp_textures->offset;
|
||||
bootstrap.texture_size = dev_priv->agp_textures->size;
|
||||
} else {
|
||||
bootstrap.texture_handle = 0;
|
||||
bootstrap.texture_size = 0;
|
||||
}
|
||||
|
||||
bootstrap.agp_mode = modes[bootstrap.agp_mode & 0x07];
|
||||
DRM_COPY_TO_USER_IOCTL((drm_mga_dma_bootstrap_t __user *)data,
|
||||
bootstrap, sizeof(bootstrap));
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -853,13 +883,13 @@ static int mga_do_init_dma(drm_device_t * dev, drm_mga_init_t * init)
|
|||
|
||||
ret = mga_warp_install_microcode(dev_priv);
|
||||
if (ret < 0) {
|
||||
DRM_ERROR("failed to install WARP ucode!\n");
|
||||
DRM_ERROR("failed to install WARP ucode!: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = mga_warp_init(dev_priv);
|
||||
if (ret < 0) {
|
||||
DRM_ERROR("failed to init WARP engine!\n");
|
||||
DRM_ERROR("failed to init WARP engine!: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -904,7 +934,7 @@ static int mga_do_init_dma(drm_device_t * dev, drm_mga_init_t * init)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int mga_do_cleanup_dma(drm_device_t * dev)
|
||||
static int mga_do_cleanup_dma(drm_device_t *dev, int full_cleanup)
|
||||
{
|
||||
int err = 0;
|
||||
DRM_DEBUG("\n");
|
||||
|
@ -932,31 +962,39 @@ static int mga_do_cleanup_dma(drm_device_t * dev)
|
|||
|
||||
if (dev_priv->used_new_dma_init) {
|
||||
#if __OS_HAS_AGP
|
||||
if (dev_priv->agp_mem != NULL) {
|
||||
dev_priv->agp_textures = NULL;
|
||||
drm_unbind_agp(dev_priv->agp_mem);
|
||||
if (dev_priv->agp_handle != 0) {
|
||||
drm_agp_binding_t unbind_req;
|
||||
drm_agp_buffer_t free_req;
|
||||
|
||||
drm_free_agp(dev_priv->agp_mem,
|
||||
dev_priv->agp_pages);
|
||||
dev_priv->agp_pages = 0;
|
||||
dev_priv->agp_mem = NULL;
|
||||
unbind_req.handle = dev_priv->agp_handle;
|
||||
drm_agp_unbind(dev, &unbind_req);
|
||||
|
||||
free_req.handle = dev_priv->agp_handle;
|
||||
drm_agp_free(dev, &free_req);
|
||||
|
||||
dev_priv->agp_textures = NULL;
|
||||
dev_priv->agp_size = 0;
|
||||
dev_priv->agp_handle = 0;
|
||||
}
|
||||
|
||||
if ((dev->agp != NULL) && dev->agp->acquired) {
|
||||
err = drm_agp_release(dev);
|
||||
}
|
||||
#endif
|
||||
dev_priv->used_new_dma_init = 0;
|
||||
}
|
||||
|
||||
dev_priv->warp = NULL;
|
||||
dev_priv->primary = NULL;
|
||||
dev_priv->mmio = NULL;
|
||||
dev_priv->status = NULL;
|
||||
dev_priv->sarea = NULL;
|
||||
dev_priv->sarea_priv = NULL;
|
||||
dev->agp_buffer_map = NULL;
|
||||
|
||||
if (full_cleanup) {
|
||||
dev_priv->mmio = NULL;
|
||||
dev_priv->status = NULL;
|
||||
dev_priv->used_new_dma_init = 0;
|
||||
}
|
||||
|
||||
memset(&dev_priv->prim, 0, sizeof(dev_priv->prim));
|
||||
dev_priv->warp_pipe = 0;
|
||||
memset(dev_priv->warp_pipe_phys, 0,
|
||||
|
@ -967,7 +1005,7 @@ static int mga_do_cleanup_dma(drm_device_t * dev)
|
|||
}
|
||||
}
|
||||
|
||||
return err;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int mga_dma_init(DRM_IOCTL_ARGS)
|
||||
|
@ -985,11 +1023,11 @@ int mga_dma_init(DRM_IOCTL_ARGS)
|
|||
case MGA_INIT_DMA:
|
||||
err = mga_do_init_dma(dev, &init);
|
||||
if (err) {
|
||||
(void)mga_do_cleanup_dma(dev);
|
||||
(void)mga_do_cleanup_dma(dev, FULL_CLEANUP);
|
||||
}
|
||||
return err;
|
||||
case MGA_CLEANUP_DMA:
|
||||
return mga_do_cleanup_dma(dev);
|
||||
return mga_do_cleanup_dma(dev, FULL_CLEANUP);
|
||||
}
|
||||
|
||||
return DRM_ERR(EINVAL);
|
||||
|
@ -1118,7 +1156,7 @@ int mga_dma_buffers(DRM_IOCTL_ARGS)
|
|||
/**
|
||||
* Called just before the module is unloaded.
|
||||
*/
|
||||
int mga_driver_postcleanup(drm_device_t * dev)
|
||||
int mga_driver_unload(drm_device_t * dev)
|
||||
{
|
||||
drm_free(dev->dev_private, sizeof(drm_mga_private_t), DRM_MEM_DRIVER);
|
||||
dev->dev_private = NULL;
|
||||
|
@ -1129,9 +1167,9 @@ int mga_driver_postcleanup(drm_device_t * dev)
|
|||
/**
|
||||
* Called when the last opener of the device is closed.
|
||||
*/
|
||||
void mga_driver_pretakedown(drm_device_t * dev)
|
||||
void mga_driver_lastclose(drm_device_t * dev)
|
||||
{
|
||||
mga_do_cleanup_dma(dev);
|
||||
mga_do_cleanup_dma(dev, FULL_CLEANUP);
|
||||
}
|
||||
|
||||
int mga_driver_dma_quiescent(drm_device_t * dev)
|
||||
|
|
|
@ -38,41 +38,6 @@
|
|||
#include "drm_pciids.h"
|
||||
|
||||
static int mga_driver_device_is_agp(drm_device_t * dev);
|
||||
static int postinit(struct drm_device *dev, unsigned long flags)
|
||||
{
|
||||
drm_mga_private_t *const dev_priv =
|
||||
(drm_mga_private_t *) dev->dev_private;
|
||||
|
||||
dev_priv->mmio_base = pci_resource_start(dev->pdev, 1);
|
||||
dev_priv->mmio_size = pci_resource_len(dev->pdev, 1);
|
||||
|
||||
dev->counters += 3;
|
||||
dev->types[6] = _DRM_STAT_IRQ;
|
||||
dev->types[7] = _DRM_STAT_PRIMARY;
|
||||
dev->types[8] = _DRM_STAT_SECONDARY;
|
||||
|
||||
DRM_INFO("Initialized %s %d.%d.%d %s on minor %d: %s\n",
|
||||
DRIVER_NAME,
|
||||
DRIVER_MAJOR,
|
||||
DRIVER_MINOR,
|
||||
DRIVER_PATCHLEVEL,
|
||||
DRIVER_DATE, dev->primary.minor, pci_pretty_name(dev->pdev)
|
||||
);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int version(drm_version_t * version)
|
||||
{
|
||||
int len;
|
||||
|
||||
version->version_major = DRIVER_MAJOR;
|
||||
version->version_minor = DRIVER_MINOR;
|
||||
version->version_patchlevel = DRIVER_PATCHLEVEL;
|
||||
DRM_COPY(version->name, DRIVER_NAME);
|
||||
DRM_COPY(version->date, DRIVER_DATE);
|
||||
DRM_COPY(version->desc, DRIVER_DESC);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct pci_device_id pciidlist[] = {
|
||||
mga_PCI_IDS
|
||||
|
@ -80,12 +45,12 @@ static struct pci_device_id pciidlist[] = {
|
|||
|
||||
static struct drm_driver driver = {
|
||||
.driver_features =
|
||||
DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | DRIVER_USE_MTRR |
|
||||
DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA |
|
||||
DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
|
||||
DRIVER_IRQ_VBL,
|
||||
.preinit = mga_driver_preinit,
|
||||
.postcleanup = mga_driver_postcleanup,
|
||||
.pretakedown = mga_driver_pretakedown,
|
||||
.load = mga_driver_load,
|
||||
.unload = mga_driver_unload,
|
||||
.lastclose = mga_driver_lastclose,
|
||||
.dma_quiescent = mga_driver_dma_quiescent,
|
||||
.device_is_agp = mga_driver_device_is_agp,
|
||||
.vblank_wait = mga_driver_vblank_wait,
|
||||
|
@ -96,8 +61,6 @@ static struct drm_driver driver = {
|
|||
.reclaim_buffers = drm_core_reclaim_buffers,
|
||||
.get_map_ofs = drm_core_get_map_ofs,
|
||||
.get_reg_ofs = drm_core_get_reg_ofs,
|
||||
.postinit = postinit,
|
||||
.version = version,
|
||||
.ioctls = mga_ioctls,
|
||||
.dma_ioctl = mga_dma_buffers,
|
||||
.fops = {
|
||||
|
@ -113,9 +76,16 @@ static struct drm_driver driver = {
|
|||
#endif
|
||||
},
|
||||
.pci_driver = {
|
||||
.name = DRIVER_NAME,
|
||||
.id_table = pciidlist,
|
||||
}
|
||||
.name = DRIVER_NAME,
|
||||
.id_table = pciidlist,
|
||||
},
|
||||
|
||||
.name = DRIVER_NAME,
|
||||
.desc = DRIVER_DESC,
|
||||
.date = DRIVER_DATE,
|
||||
.major = DRIVER_MAJOR,
|
||||
.minor = DRIVER_MINOR,
|
||||
.patchlevel = DRIVER_PATCHLEVEL,
|
||||
};
|
||||
|
||||
static int __init mga_init(void)
|
||||
|
|
|
@ -38,11 +38,11 @@
|
|||
|
||||
#define DRIVER_NAME "mga"
|
||||
#define DRIVER_DESC "Matrox G200/G400"
|
||||
#define DRIVER_DATE "20050607"
|
||||
#define DRIVER_DATE "20051102"
|
||||
|
||||
#define DRIVER_MAJOR 3
|
||||
#define DRIVER_MINOR 2
|
||||
#define DRIVER_PATCHLEVEL 0
|
||||
#define DRIVER_PATCHLEVEL 1
|
||||
|
||||
typedef struct drm_mga_primary_buffer {
|
||||
u8 *start;
|
||||
|
@ -144,22 +144,22 @@ typedef struct drm_mga_private {
|
|||
drm_local_map_t *primary;
|
||||
drm_local_map_t *agp_textures;
|
||||
|
||||
DRM_AGP_MEM *agp_mem;
|
||||
unsigned int agp_pages;
|
||||
unsigned long agp_handle;
|
||||
unsigned int agp_size;
|
||||
} drm_mga_private_t;
|
||||
|
||||
extern drm_ioctl_desc_t mga_ioctls[];
|
||||
extern int mga_max_ioctl;
|
||||
|
||||
/* mga_dma.c */
|
||||
extern int mga_driver_preinit(drm_device_t * dev, unsigned long flags);
|
||||
extern int mga_dma_bootstrap(DRM_IOCTL_ARGS);
|
||||
extern int mga_dma_init(DRM_IOCTL_ARGS);
|
||||
extern int mga_dma_flush(DRM_IOCTL_ARGS);
|
||||
extern int mga_dma_reset(DRM_IOCTL_ARGS);
|
||||
extern int mga_dma_buffers(DRM_IOCTL_ARGS);
|
||||
extern int mga_driver_postcleanup(drm_device_t * dev);
|
||||
extern void mga_driver_pretakedown(drm_device_t * dev);
|
||||
extern int mga_driver_load(drm_device_t *dev, unsigned long flags);
|
||||
extern int mga_driver_unload(drm_device_t * dev);
|
||||
extern void mga_driver_lastclose(drm_device_t * dev);
|
||||
extern int mga_driver_dma_quiescent(drm_device_t * dev);
|
||||
|
||||
extern int mga_do_wait_for_idle(drm_mga_private_t * dev_priv);
|
||||
|
|
|
@ -1127,19 +1127,19 @@ static int mga_wait_fence(DRM_IOCTL_ARGS)
|
|||
}
|
||||
|
||||
drm_ioctl_desc_t mga_ioctls[] = {
|
||||
[DRM_IOCTL_NR(DRM_MGA_INIT)] = {mga_dma_init, 1, 1},
|
||||
[DRM_IOCTL_NR(DRM_MGA_FLUSH)] = {mga_dma_flush, 1, 0},
|
||||
[DRM_IOCTL_NR(DRM_MGA_RESET)] = {mga_dma_reset, 1, 0},
|
||||
[DRM_IOCTL_NR(DRM_MGA_SWAP)] = {mga_dma_swap, 1, 0},
|
||||
[DRM_IOCTL_NR(DRM_MGA_CLEAR)] = {mga_dma_clear, 1, 0},
|
||||
[DRM_IOCTL_NR(DRM_MGA_VERTEX)] = {mga_dma_vertex, 1, 0},
|
||||
[DRM_IOCTL_NR(DRM_MGA_INDICES)] = {mga_dma_indices, 1, 0},
|
||||
[DRM_IOCTL_NR(DRM_MGA_ILOAD)] = {mga_dma_iload, 1, 0},
|
||||
[DRM_IOCTL_NR(DRM_MGA_BLIT)] = {mga_dma_blit, 1, 0},
|
||||
[DRM_IOCTL_NR(DRM_MGA_GETPARAM)] = {mga_getparam, 1, 0},
|
||||
[DRM_IOCTL_NR(DRM_MGA_SET_FENCE)] = {mga_set_fence, 1, 0},
|
||||
[DRM_IOCTL_NR(DRM_MGA_WAIT_FENCE)] = {mga_wait_fence, 1, 0},
|
||||
[DRM_IOCTL_NR(DRM_MGA_DMA_BOOTSTRAP)] = {mga_dma_bootstrap, 1, 1},
|
||||
[DRM_IOCTL_NR(DRM_MGA_INIT)] = {mga_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
|
||||
[DRM_IOCTL_NR(DRM_MGA_FLUSH)] = {mga_dma_flush, DRM_AUTH},
|
||||
[DRM_IOCTL_NR(DRM_MGA_RESET)] = {mga_dma_reset, DRM_AUTH},
|
||||
[DRM_IOCTL_NR(DRM_MGA_SWAP)] = {mga_dma_swap, DRM_AUTH},
|
||||
[DRM_IOCTL_NR(DRM_MGA_CLEAR)] = {mga_dma_clear, DRM_AUTH},
|
||||
[DRM_IOCTL_NR(DRM_MGA_VERTEX)] = {mga_dma_vertex, DRM_AUTH},
|
||||
[DRM_IOCTL_NR(DRM_MGA_INDICES)] = {mga_dma_indices, DRM_AUTH},
|
||||
[DRM_IOCTL_NR(DRM_MGA_ILOAD)] = {mga_dma_iload, DRM_AUTH},
|
||||
[DRM_IOCTL_NR(DRM_MGA_BLIT)] = {mga_dma_blit, DRM_AUTH},
|
||||
[DRM_IOCTL_NR(DRM_MGA_GETPARAM)] = {mga_getparam, DRM_AUTH},
|
||||
[DRM_IOCTL_NR(DRM_MGA_SET_FENCE)] = {mga_set_fence, DRM_AUTH},
|
||||
[DRM_IOCTL_NR(DRM_MGA_WAIT_FENCE)] = {mga_wait_fence, DRM_AUTH},
|
||||
[DRM_IOCTL_NR(DRM_MGA_DMA_BOOTSTRAP)] = {mga_dma_bootstrap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
|
||||
};
|
||||
|
||||
int mga_max_ioctl = DRM_ARRAY_SIZE(mga_ioctls);
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
/* r128_cce.c -- ATI Rage 128 driver -*- linux-c -*-
|
||||
/* r128_cce.c -- ATI Rage 128 driver -*- linux-c -*-
|
||||
* Created: Wed Apr 5 19:24:19 2000 by kevin@precisioninsight.com
|
||||
*
|
||||
*/
|
||||
/*
|
||||
* Copyright 2000 Precision Insight, Inc., Cedar Park, Texas.
|
||||
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
|
||||
* All Rights Reserved.
|
||||
|
@ -559,7 +560,8 @@ static int r128_do_init_cce(drm_device_t * dev, drm_r128_init_t * init)
|
|||
if (dev_priv->is_pci) {
|
||||
#endif
|
||||
dev_priv->gart_info.gart_table_location = DRM_ATI_GART_MAIN;
|
||||
dev_priv->gart_info.addr = dev_priv->gart_info.bus_addr = 0;
|
||||
dev_priv->gart_info.addr = NULL;
|
||||
dev_priv->gart_info.bus_addr = 0;
|
||||
dev_priv->gart_info.is_pcie = 0;
|
||||
if (!drm_ati_pcigart_init(dev, &dev_priv->gart_info)) {
|
||||
DRM_ERROR("failed to init PCI GART!\n");
|
||||
|
@ -601,15 +603,16 @@ int r128_do_cleanup_cce(drm_device_t * dev)
|
|||
drm_core_ioremapfree(dev_priv->cce_ring, dev);
|
||||
if (dev_priv->ring_rptr != NULL)
|
||||
drm_core_ioremapfree(dev_priv->ring_rptr, dev);
|
||||
if (dev->agp_buffer_map != NULL)
|
||||
if (dev->agp_buffer_map != NULL) {
|
||||
drm_core_ioremapfree(dev->agp_buffer_map, dev);
|
||||
dev->agp_buffer_map = NULL;
|
||||
}
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
if (dev_priv->gart_info.bus_addr)
|
||||
if (!drm_ati_pcigart_cleanup(dev,
|
||||
&dev_priv->
|
||||
gart_info))
|
||||
&dev_priv->gart_info))
|
||||
DRM_ERROR
|
||||
("failed to cleanup PCI GART!\n");
|
||||
}
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
/* r128_drm.h -- Public header for the r128 driver -*- linux-c -*-
|
||||
* Created: Wed Apr 5 19:24:19 2000 by kevin@precisioninsight.com
|
||||
*
|
||||
* Copyright 2000 Precision Insight, Inc., Cedar Park, Texas.
|
||||
*/
|
||||
/* Copyright 2000 Precision Insight, Inc., Cedar Park, Texas.
|
||||
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
|
||||
* All rights reserved.
|
||||
*
|
||||
|
|
|
@ -37,31 +37,6 @@
|
|||
|
||||
#include "drm_pciids.h"
|
||||
|
||||
static int postinit(struct drm_device *dev, unsigned long flags)
|
||||
{
|
||||
DRM_INFO("Initialized %s %d.%d.%d %s on minor %d: %s\n",
|
||||
DRIVER_NAME,
|
||||
DRIVER_MAJOR,
|
||||
DRIVER_MINOR,
|
||||
DRIVER_PATCHLEVEL,
|
||||
DRIVER_DATE, dev->primary.minor, pci_pretty_name(dev->pdev)
|
||||
);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int version(drm_version_t * version)
|
||||
{
|
||||
int len;
|
||||
|
||||
version->version_major = DRIVER_MAJOR;
|
||||
version->version_minor = DRIVER_MINOR;
|
||||
version->version_patchlevel = DRIVER_PATCHLEVEL;
|
||||
DRM_COPY(version->name, DRIVER_NAME);
|
||||
DRM_COPY(version->date, DRIVER_DATE);
|
||||
DRM_COPY(version->desc, DRIVER_DESC);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct pci_device_id pciidlist[] = {
|
||||
r128_PCI_IDS
|
||||
};
|
||||
|
@ -72,8 +47,8 @@ static struct drm_driver driver = {
|
|||
DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
|
||||
DRIVER_IRQ_VBL,
|
||||
.dev_priv_size = sizeof(drm_r128_buf_priv_t),
|
||||
.prerelease = r128_driver_prerelease,
|
||||
.pretakedown = r128_driver_pretakedown,
|
||||
.preclose = r128_driver_preclose,
|
||||
.lastclose = r128_driver_lastclose,
|
||||
.vblank_wait = r128_driver_vblank_wait,
|
||||
.irq_preinstall = r128_driver_irq_preinstall,
|
||||
.irq_postinstall = r128_driver_irq_postinstall,
|
||||
|
@ -82,8 +57,6 @@ static struct drm_driver driver = {
|
|||
.reclaim_buffers = drm_core_reclaim_buffers,
|
||||
.get_map_ofs = drm_core_get_map_ofs,
|
||||
.get_reg_ofs = drm_core_get_reg_ofs,
|
||||
.postinit = postinit,
|
||||
.version = version,
|
||||
.ioctls = r128_ioctls,
|
||||
.dma_ioctl = r128_cce_buffers,
|
||||
.fops = {
|
||||
|
@ -97,12 +70,19 @@ static struct drm_driver driver = {
|
|||
#ifdef CONFIG_COMPAT
|
||||
.compat_ioctl = r128_compat_ioctl,
|
||||
#endif
|
||||
}
|
||||
,
|
||||
},
|
||||
|
||||
.pci_driver = {
|
||||
.name = DRIVER_NAME,
|
||||
.id_table = pciidlist,
|
||||
}
|
||||
.name = DRIVER_NAME,
|
||||
.id_table = pciidlist,
|
||||
},
|
||||
|
||||
.name = DRIVER_NAME,
|
||||
.desc = DRIVER_DESC,
|
||||
.date = DRIVER_DATE,
|
||||
.major = DRIVER_MAJOR,
|
||||
.minor = DRIVER_MINOR,
|
||||
.patchlevel = DRIVER_PATCHLEVEL,
|
||||
};
|
||||
|
||||
static int __init r128_init(void)
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
/* r128_drv.h -- Private header for r128 driver -*- linux-c -*-
|
||||
* Created: Mon Dec 13 09:51:11 1999 by faith@precisioninsight.com
|
||||
*
|
||||
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
|
||||
*/
|
||||
/* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
|
||||
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
|
||||
* All rights reserved.
|
||||
*
|
||||
|
@ -154,8 +154,8 @@ extern irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS);
|
|||
extern void r128_driver_irq_preinstall(drm_device_t * dev);
|
||||
extern void r128_driver_irq_postinstall(drm_device_t * dev);
|
||||
extern void r128_driver_irq_uninstall(drm_device_t * dev);
|
||||
extern void r128_driver_pretakedown(drm_device_t * dev);
|
||||
extern void r128_driver_prerelease(drm_device_t * dev, DRMFILE filp);
|
||||
extern void r128_driver_lastclose(drm_device_t * dev);
|
||||
extern void r128_driver_preclose(drm_device_t * dev, DRMFILE filp);
|
||||
|
||||
extern long r128_compat_ioctl(struct file *filp, unsigned int cmd,
|
||||
unsigned long arg);
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/* r128_irq.c -- IRQ handling for radeon -*- linux-c -*-
|
||||
*
|
||||
/* r128_irq.c -- IRQ handling for radeon -*- linux-c -*- */
|
||||
/*
|
||||
* Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
|
||||
*
|
||||
* The Weather Channel (TM) funded Tungsten Graphics to develop the
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
/* r128_state.c -- State support for r128 -*- linux-c -*-
|
||||
* Created: Thu Jan 27 02:53:43 2000 by gareth@valinux.com
|
||||
*
|
||||
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
|
||||
*/
|
||||
/* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
|
@ -1674,7 +1674,7 @@ static int r128_getparam(DRM_IOCTL_ARGS)
|
|||
return 0;
|
||||
}
|
||||
|
||||
void r128_driver_prerelease(drm_device_t * dev, DRMFILE filp)
|
||||
void r128_driver_preclose(drm_device_t * dev, DRMFILE filp)
|
||||
{
|
||||
if (dev->dev_private) {
|
||||
drm_r128_private_t *dev_priv = dev->dev_private;
|
||||
|
@ -1684,29 +1684,29 @@ void r128_driver_prerelease(drm_device_t * dev, DRMFILE filp)
|
|||
}
|
||||
}
|
||||
|
||||
void r128_driver_pretakedown(drm_device_t * dev)
|
||||
void r128_driver_lastclose(drm_device_t * dev)
|
||||
{
|
||||
r128_do_cleanup_cce(dev);
|
||||
}
|
||||
|
||||
drm_ioctl_desc_t r128_ioctls[] = {
|
||||
[DRM_IOCTL_NR(DRM_R128_INIT)] = {r128_cce_init, 1, 1},
|
||||
[DRM_IOCTL_NR(DRM_R128_CCE_START)] = {r128_cce_start, 1, 1},
|
||||
[DRM_IOCTL_NR(DRM_R128_CCE_STOP)] = {r128_cce_stop, 1, 1},
|
||||
[DRM_IOCTL_NR(DRM_R128_CCE_RESET)] = {r128_cce_reset, 1, 1},
|
||||
[DRM_IOCTL_NR(DRM_R128_CCE_IDLE)] = {r128_cce_idle, 1, 0},
|
||||
[DRM_IOCTL_NR(DRM_R128_RESET)] = {r128_engine_reset, 1, 0},
|
||||
[DRM_IOCTL_NR(DRM_R128_FULLSCREEN)] = {r128_fullscreen, 1, 0},
|
||||
[DRM_IOCTL_NR(DRM_R128_SWAP)] = {r128_cce_swap, 1, 0},
|
||||
[DRM_IOCTL_NR(DRM_R128_FLIP)] = {r128_cce_flip, 1, 0},
|
||||
[DRM_IOCTL_NR(DRM_R128_CLEAR)] = {r128_cce_clear, 1, 0},
|
||||
[DRM_IOCTL_NR(DRM_R128_VERTEX)] = {r128_cce_vertex, 1, 0},
|
||||
[DRM_IOCTL_NR(DRM_R128_INDICES)] = {r128_cce_indices, 1, 0},
|
||||
[DRM_IOCTL_NR(DRM_R128_BLIT)] = {r128_cce_blit, 1, 0},
|
||||
[DRM_IOCTL_NR(DRM_R128_DEPTH)] = {r128_cce_depth, 1, 0},
|
||||
[DRM_IOCTL_NR(DRM_R128_STIPPLE)] = {r128_cce_stipple, 1, 0},
|
||||
[DRM_IOCTL_NR(DRM_R128_INDIRECT)] = {r128_cce_indirect, 1, 1},
|
||||
[DRM_IOCTL_NR(DRM_R128_GETPARAM)] = {r128_getparam, 1, 0},
|
||||
[DRM_IOCTL_NR(DRM_R128_INIT)] = {r128_cce_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
|
||||
[DRM_IOCTL_NR(DRM_R128_CCE_START)] = {r128_cce_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
|
||||
[DRM_IOCTL_NR(DRM_R128_CCE_STOP)] = {r128_cce_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
|
||||
[DRM_IOCTL_NR(DRM_R128_CCE_RESET)] = {r128_cce_reset, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
|
||||
[DRM_IOCTL_NR(DRM_R128_CCE_IDLE)] = {r128_cce_idle, DRM_AUTH},
|
||||
[DRM_IOCTL_NR(DRM_R128_RESET)] = {r128_engine_reset, DRM_AUTH},
|
||||
[DRM_IOCTL_NR(DRM_R128_FULLSCREEN)] = {r128_fullscreen, DRM_AUTH},
|
||||
[DRM_IOCTL_NR(DRM_R128_SWAP)] = {r128_cce_swap, DRM_AUTH},
|
||||
[DRM_IOCTL_NR(DRM_R128_FLIP)] = {r128_cce_flip, DRM_AUTH},
|
||||
[DRM_IOCTL_NR(DRM_R128_CLEAR)] = {r128_cce_clear, DRM_AUTH},
|
||||
[DRM_IOCTL_NR(DRM_R128_VERTEX)] = {r128_cce_vertex, DRM_AUTH},
|
||||
[DRM_IOCTL_NR(DRM_R128_INDICES)] = {r128_cce_indices, DRM_AUTH},
|
||||
[DRM_IOCTL_NR(DRM_R128_BLIT)] = {r128_cce_blit, DRM_AUTH},
|
||||
[DRM_IOCTL_NR(DRM_R128_DEPTH)] = {r128_cce_depth, DRM_AUTH},
|
||||
[DRM_IOCTL_NR(DRM_R128_STIPPLE)] = {r128_cce_stipple, DRM_AUTH},
|
||||
[DRM_IOCTL_NR(DRM_R128_INDIRECT)] = {r128_cce_indirect, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
|
||||
[DRM_IOCTL_NR(DRM_R128_GETPARAM)] = {r128_getparam, DRM_AUTH},
|
||||
};
|
||||
|
||||
int r128_max_ioctl = DRM_ARRAY_SIZE(r128_ioctls);
|
||||
|
|
|
@ -52,8 +52,8 @@ static const int r300_cliprect_cntl[4] = {
|
|||
* Emit up to R300_SIMULTANEOUS_CLIPRECTS cliprects from the given command
|
||||
* buffer, starting with index n.
|
||||
*/
|
||||
static int r300_emit_cliprects(drm_radeon_private_t * dev_priv,
|
||||
drm_radeon_kcmd_buffer_t * cmdbuf, int n)
|
||||
static int r300_emit_cliprects(drm_radeon_private_t *dev_priv,
|
||||
drm_radeon_kcmd_buffer_t *cmdbuf, int n)
|
||||
{
|
||||
drm_clip_rect_t box;
|
||||
int nr;
|
||||
|
@ -216,6 +216,7 @@ void r300_init_reg_flags(void)
|
|||
ADD_RANGE(R300_TX_UNK1_0, 16);
|
||||
ADD_RANGE(R300_TX_SIZE_0, 16);
|
||||
ADD_RANGE(R300_TX_FORMAT_0, 16);
|
||||
ADD_RANGE(R300_TX_PITCH_0, 16);
|
||||
/* Texture offset is dangerous and needs more checking */
|
||||
ADD_RANGE_MARK(R300_TX_OFFSET_0, 16, MARK_CHECK_OFFSET);
|
||||
ADD_RANGE(R300_TX_UNK4_0, 16);
|
||||
|
@ -242,7 +243,7 @@ static __inline__ int r300_check_range(unsigned reg, int count)
|
|||
|
||||
/* we expect offsets passed to the framebuffer to be either within video memory or
|
||||
within AGP space */
|
||||
static __inline__ int r300_check_offset(drm_radeon_private_t * dev_priv,
|
||||
static __inline__ int r300_check_offset(drm_radeon_private_t *dev_priv,
|
||||
u32 offset)
|
||||
{
|
||||
/* we realy want to check against end of video aperture
|
||||
|
@ -317,8 +318,8 @@ static __inline__ int r300_emit_carefully_checked_packet0(drm_radeon_private_t *
|
|||
*
|
||||
* Note that checks are performed on contents and addresses of the registers
|
||||
*/
|
||||
static __inline__ int r300_emit_packet0(drm_radeon_private_t * dev_priv,
|
||||
drm_radeon_kcmd_buffer_t * cmdbuf,
|
||||
static __inline__ int r300_emit_packet0(drm_radeon_private_t *dev_priv,
|
||||
drm_radeon_kcmd_buffer_t *cmdbuf,
|
||||
drm_r300_cmd_header_t header)
|
||||
{
|
||||
int reg;
|
||||
|
@ -363,8 +364,8 @@ static __inline__ int r300_emit_packet0(drm_radeon_private_t * dev_priv,
|
|||
* the graphics card.
|
||||
* Called by r300_do_cp_cmdbuf.
|
||||
*/
|
||||
static __inline__ int r300_emit_vpu(drm_radeon_private_t * dev_priv,
|
||||
drm_radeon_kcmd_buffer_t * cmdbuf,
|
||||
static __inline__ int r300_emit_vpu(drm_radeon_private_t *dev_priv,
|
||||
drm_radeon_kcmd_buffer_t *cmdbuf,
|
||||
drm_r300_cmd_header_t header)
|
||||
{
|
||||
int sz;
|
||||
|
@ -400,8 +401,8 @@ static __inline__ int r300_emit_vpu(drm_radeon_private_t * dev_priv,
|
|||
* Emit a clear packet from userspace.
|
||||
* Called by r300_emit_packet3.
|
||||
*/
|
||||
static __inline__ int r300_emit_clear(drm_radeon_private_t * dev_priv,
|
||||
drm_radeon_kcmd_buffer_t * cmdbuf)
|
||||
static __inline__ int r300_emit_clear(drm_radeon_private_t *dev_priv,
|
||||
drm_radeon_kcmd_buffer_t *cmdbuf)
|
||||
{
|
||||
RING_LOCALS;
|
||||
|
||||
|
@ -421,8 +422,8 @@ static __inline__ int r300_emit_clear(drm_radeon_private_t * dev_priv,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static __inline__ int r300_emit_3d_load_vbpntr(drm_radeon_private_t * dev_priv,
|
||||
drm_radeon_kcmd_buffer_t * cmdbuf,
|
||||
static __inline__ int r300_emit_3d_load_vbpntr(drm_radeon_private_t *dev_priv,
|
||||
drm_radeon_kcmd_buffer_t *cmdbuf,
|
||||
u32 header)
|
||||
{
|
||||
int count, i, k;
|
||||
|
@ -489,8 +490,8 @@ static __inline__ int r300_emit_3d_load_vbpntr(drm_radeon_private_t * dev_priv,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t * dev_priv,
|
||||
drm_radeon_kcmd_buffer_t * cmdbuf)
|
||||
static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t *dev_priv,
|
||||
drm_radeon_kcmd_buffer_t *cmdbuf)
|
||||
{
|
||||
u32 header;
|
||||
int count;
|
||||
|
@ -554,8 +555,8 @@ static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t * dev_priv,
|
|||
* Emit a rendering packet3 from userspace.
|
||||
* Called by r300_do_cp_cmdbuf.
|
||||
*/
|
||||
static __inline__ int r300_emit_packet3(drm_radeon_private_t * dev_priv,
|
||||
drm_radeon_kcmd_buffer_t * cmdbuf,
|
||||
static __inline__ int r300_emit_packet3(drm_radeon_private_t *dev_priv,
|
||||
drm_radeon_kcmd_buffer_t *cmdbuf,
|
||||
drm_r300_cmd_header_t header)
|
||||
{
|
||||
int n;
|
||||
|
@ -623,7 +624,7 @@ static __inline__ int r300_emit_packet3(drm_radeon_private_t * dev_priv,
|
|||
/**
|
||||
* Emit the sequence to pacify R300.
|
||||
*/
|
||||
static __inline__ void r300_pacify(drm_radeon_private_t * dev_priv)
|
||||
static __inline__ void r300_pacify(drm_radeon_private_t *dev_priv)
|
||||
{
|
||||
RING_LOCALS;
|
||||
|
||||
|
@ -657,9 +658,10 @@ static void r300_discard_buffer(drm_device_t * dev, drm_buf_t * buf)
|
|||
* commands on the DMA ring buffer.
|
||||
* Called by the ioctl handler function radeon_cp_cmdbuf.
|
||||
*/
|
||||
int r300_do_cp_cmdbuf(drm_device_t * dev,
|
||||
int r300_do_cp_cmdbuf(drm_device_t *dev,
|
||||
DRMFILE filp,
|
||||
drm_file_t * filp_priv, drm_radeon_kcmd_buffer_t * cmdbuf)
|
||||
drm_file_t *filp_priv,
|
||||
drm_radeon_kcmd_buffer_t *cmdbuf)
|
||||
{
|
||||
drm_radeon_private_t *dev_priv = dev->dev_private;
|
||||
drm_device_dma_t *dma = dev->dma;
|
||||
|
|
|
@ -797,6 +797,7 @@ I am fairly certain that they are correct unless stated otherwise in comments.
|
|||
|
||||
# define R300_TX_FORMAT_YUV_MODE 0x00800000
|
||||
|
||||
#define R300_TX_PITCH_0 0x4500
|
||||
#define R300_TX_OFFSET_0 0x4540
|
||||
/* BEGIN: Guess from R200 */
|
||||
# define R300_TXO_ENDIAN_NO_SWAP (0 << 0)
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/* radeon_cp.c -- CP support for Radeon -*- linux-c -*-
|
||||
*
|
||||
/* radeon_cp.c -- CP support for Radeon -*- linux-c -*- */
|
||||
/*
|
||||
* Copyright 2000 Precision Insight, Inc., Cedar Park, Texas.
|
||||
* Copyright 2000 VA Linux Systems, Inc., Fremont, California.
|
||||
* All Rights Reserved.
|
||||
|
@ -824,7 +824,7 @@ static int RADEON_READ_PLL(drm_device_t * dev, int addr)
|
|||
return RADEON_READ(RADEON_CLOCK_CNTL_DATA);
|
||||
}
|
||||
|
||||
static int RADEON_READ_PCIE(drm_radeon_private_t * dev_priv, int addr)
|
||||
static int RADEON_READ_PCIE(drm_radeon_private_t *dev_priv, int addr)
|
||||
{
|
||||
RADEON_WRITE8(RADEON_PCIE_INDEX, addr & 0xff);
|
||||
return RADEON_READ(RADEON_PCIE_DATA);
|
||||
|
@ -1125,7 +1125,7 @@ static void radeon_cp_init_ring_buffer(drm_device_t * dev,
|
|||
| (dev_priv->fb_location >> 16));
|
||||
|
||||
#if __OS_HAS_AGP
|
||||
if (!dev_priv->is_pci) {
|
||||
if (dev_priv->flags & CHIP_IS_AGP) {
|
||||
RADEON_WRITE(RADEON_MC_AGP_LOCATION,
|
||||
(((dev_priv->gart_vm_start - 1 +
|
||||
dev_priv->gart_size) & 0xffff0000) |
|
||||
|
@ -1152,7 +1152,7 @@ static void radeon_cp_init_ring_buffer(drm_device_t * dev,
|
|||
dev_priv->ring.tail = cur_read_ptr;
|
||||
|
||||
#if __OS_HAS_AGP
|
||||
if (!dev_priv->is_pci) {
|
||||
if (dev_priv->flags & CHIP_IS_AGP) {
|
||||
/* set RADEON_AGP_BASE here instead of relying on X from user space */
|
||||
RADEON_WRITE(RADEON_AGP_BASE, (unsigned int)dev->agp->base);
|
||||
RADEON_WRITE(RADEON_CP_RB_RPTR_ADDR,
|
||||
|
@ -1278,13 +1278,15 @@ static void radeon_set_pciegart(drm_radeon_private_t * dev_priv, int on)
|
|||
/* Enable or disable PCI GART on the chip */
|
||||
static void radeon_set_pcigart(drm_radeon_private_t * dev_priv, int on)
|
||||
{
|
||||
u32 tmp = RADEON_READ(RADEON_AIC_CNTL);
|
||||
u32 tmp;
|
||||
|
||||
if (dev_priv->flags & CHIP_IS_PCIE) {
|
||||
radeon_set_pciegart(dev_priv, on);
|
||||
return;
|
||||
}
|
||||
|
||||
tmp = RADEON_READ(RADEON_AIC_CNTL);
|
||||
|
||||
if (on) {
|
||||
RADEON_WRITE(RADEON_AIC_CNTL,
|
||||
tmp | RADEON_PCIGART_TRANSLATE_EN);
|
||||
|
@ -1312,13 +1314,17 @@ static void radeon_set_pcigart(drm_radeon_private_t * dev_priv, int on)
|
|||
static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
|
||||
{
|
||||
drm_radeon_private_t *dev_priv = dev->dev_private;
|
||||
|
||||
DRM_DEBUG("\n");
|
||||
|
||||
dev_priv->is_pci = init->is_pci;
|
||||
if (init->is_pci && (dev_priv->flags & CHIP_IS_AGP))
|
||||
{
|
||||
DRM_DEBUG("Forcing AGP card to PCI mode\n");
|
||||
dev_priv->flags &= ~CHIP_IS_AGP;
|
||||
}
|
||||
|
||||
if (dev_priv->is_pci && !dev->sg) {
|
||||
if ((!(dev_priv->flags & CHIP_IS_AGP)) && !dev->sg) {
|
||||
DRM_ERROR("PCI GART memory not allocated!\n");
|
||||
dev->dev_private = (void *)dev_priv;
|
||||
radeon_do_cleanup_cp(dev);
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
|
@ -1327,12 +1333,11 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
|
|||
if (dev_priv->usec_timeout < 1 ||
|
||||
dev_priv->usec_timeout > RADEON_MAX_USEC_TIMEOUT) {
|
||||
DRM_DEBUG("TIMEOUT problem!\n");
|
||||
dev->dev_private = (void *)dev_priv;
|
||||
radeon_do_cleanup_cp(dev);
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
|
||||
switch (init->func) {
|
||||
switch(init->func) {
|
||||
case RADEON_INIT_R200_CP:
|
||||
dev_priv->microcode_version = UCODE_R200;
|
||||
break;
|
||||
|
@ -1353,7 +1358,6 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
|
|||
if ((init->cp_mode != RADEON_CSQ_PRIBM_INDDIS) &&
|
||||
(init->cp_mode != RADEON_CSQ_PRIBM_INDBM)) {
|
||||
DRM_DEBUG("BAD cp_mode (%x)!\n", init->cp_mode);
|
||||
dev->dev_private = (void *)dev_priv;
|
||||
radeon_do_cleanup_cp(dev);
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
|
@ -1416,8 +1420,6 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
|
|||
|
||||
DRM_GETSAREA();
|
||||
|
||||
dev_priv->fb_offset = init->fb_offset;
|
||||
dev_priv->mmio_offset = init->mmio_offset;
|
||||
dev_priv->ring_offset = init->ring_offset;
|
||||
dev_priv->ring_rptr_offset = init->ring_rptr_offset;
|
||||
dev_priv->buffers_offset = init->buffers_offset;
|
||||
|
@ -1425,29 +1427,19 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
|
|||
|
||||
if (!dev_priv->sarea) {
|
||||
DRM_ERROR("could not find sarea!\n");
|
||||
dev->dev_private = (void *)dev_priv;
|
||||
radeon_do_cleanup_cp(dev);
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
|
||||
dev_priv->mmio = drm_core_findmap(dev, init->mmio_offset);
|
||||
if (!dev_priv->mmio) {
|
||||
DRM_ERROR("could not find mmio region!\n");
|
||||
dev->dev_private = (void *)dev_priv;
|
||||
radeon_do_cleanup_cp(dev);
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
dev_priv->cp_ring = drm_core_findmap(dev, init->ring_offset);
|
||||
if (!dev_priv->cp_ring) {
|
||||
DRM_ERROR("could not find cp ring region!\n");
|
||||
dev->dev_private = (void *)dev_priv;
|
||||
radeon_do_cleanup_cp(dev);
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
dev_priv->ring_rptr = drm_core_findmap(dev, init->ring_rptr_offset);
|
||||
if (!dev_priv->ring_rptr) {
|
||||
DRM_ERROR("could not find ring read pointer!\n");
|
||||
dev->dev_private = (void *)dev_priv;
|
||||
radeon_do_cleanup_cp(dev);
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
|
@ -1455,7 +1447,6 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
|
|||
dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
|
||||
if (!dev->agp_buffer_map) {
|
||||
DRM_ERROR("could not find dma buffer region!\n");
|
||||
dev->dev_private = (void *)dev_priv;
|
||||
radeon_do_cleanup_cp(dev);
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
|
@ -1465,7 +1456,6 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
|
|||
drm_core_findmap(dev, init->gart_textures_offset);
|
||||
if (!dev_priv->gart_textures) {
|
||||
DRM_ERROR("could not find GART texture region!\n");
|
||||
dev->dev_private = (void *)dev_priv;
|
||||
radeon_do_cleanup_cp(dev);
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
|
@ -1476,7 +1466,7 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
|
|||
init->sarea_priv_offset);
|
||||
|
||||
#if __OS_HAS_AGP
|
||||
if (!dev_priv->is_pci) {
|
||||
if (dev_priv->flags & CHIP_IS_AGP) {
|
||||
drm_core_ioremap(dev_priv->cp_ring, dev);
|
||||
drm_core_ioremap(dev_priv->ring_rptr, dev);
|
||||
drm_core_ioremap(dev->agp_buffer_map, dev);
|
||||
|
@ -1484,7 +1474,6 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
|
|||
!dev_priv->ring_rptr->handle ||
|
||||
!dev->agp_buffer_map->handle) {
|
||||
DRM_ERROR("could not find ioremap agp regions!\n");
|
||||
dev->dev_private = (void *)dev_priv;
|
||||
radeon_do_cleanup_cp(dev);
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
|
@ -1525,7 +1514,7 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
|
|||
+ RADEON_READ(RADEON_CONFIG_APER_SIZE);
|
||||
|
||||
#if __OS_HAS_AGP
|
||||
if (!dev_priv->is_pci)
|
||||
if (dev_priv->flags & CHIP_IS_AGP)
|
||||
dev_priv->gart_buffers_offset = (dev->agp_buffer_map->offset
|
||||
- dev->agp->base
|
||||
+ dev_priv->gart_vm_start);
|
||||
|
@ -1551,7 +1540,7 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
|
|||
dev_priv->ring.high_mark = RADEON_RING_HIGH_MARK;
|
||||
|
||||
#if __OS_HAS_AGP
|
||||
if (!dev_priv->is_pci) {
|
||||
if (dev_priv->flags & CHIP_IS_AGP) {
|
||||
/* Turn off PCI GART */
|
||||
radeon_set_pcigart(dev_priv, 0);
|
||||
} else
|
||||
|
@ -1561,25 +1550,28 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
|
|||
if (dev_priv->pcigart_offset) {
|
||||
dev_priv->gart_info.bus_addr =
|
||||
dev_priv->pcigart_offset + dev_priv->fb_location;
|
||||
dev_priv->gart_info.mapping.offset =
|
||||
dev_priv->gart_info.bus_addr;
|
||||
dev_priv->gart_info.mapping.size =
|
||||
RADEON_PCIGART_TABLE_SIZE;
|
||||
|
||||
drm_core_ioremap(&dev_priv->gart_info.mapping, dev);
|
||||
dev_priv->gart_info.addr =
|
||||
(unsigned long)drm_ioremap(dev_priv->gart_info.
|
||||
bus_addr,
|
||||
RADEON_PCIGART_TABLE_SIZE,
|
||||
dev);
|
||||
dev_priv->gart_info.mapping.handle;
|
||||
|
||||
dev_priv->gart_info.is_pcie =
|
||||
!!(dev_priv->flags & CHIP_IS_PCIE);
|
||||
dev_priv->gart_info.gart_table_location =
|
||||
DRM_ATI_GART_FB;
|
||||
|
||||
DRM_DEBUG("Setting phys_pci_gart to %08lX %08lX\n",
|
||||
DRM_DEBUG("Setting phys_pci_gart to %p %08lX\n",
|
||||
dev_priv->gart_info.addr,
|
||||
dev_priv->pcigart_offset);
|
||||
} else {
|
||||
dev_priv->gart_info.gart_table_location =
|
||||
DRM_ATI_GART_MAIN;
|
||||
dev_priv->gart_info.addr =
|
||||
dev_priv->gart_info.bus_addr = 0;
|
||||
dev_priv->gart_info.addr = NULL;
|
||||
dev_priv->gart_info.bus_addr = 0;
|
||||
if (dev_priv->flags & CHIP_IS_PCIE) {
|
||||
DRM_ERROR
|
||||
("Cannot use PCI Express without GART in FB memory\n");
|
||||
|
@ -1590,7 +1582,6 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
|
|||
|
||||
if (!drm_ati_pcigart_init(dev, &dev_priv->gart_info)) {
|
||||
DRM_ERROR("failed to init PCI GART!\n");
|
||||
dev->dev_private = (void *)dev_priv;
|
||||
radeon_do_cleanup_cp(dev);
|
||||
return DRM_ERR(ENOMEM);
|
||||
}
|
||||
|
@ -1604,8 +1595,6 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
|
|||
|
||||
dev_priv->last_buf = 0;
|
||||
|
||||
dev->dev_private = (void *)dev_priv;
|
||||
|
||||
radeon_do_engine_reset(dev);
|
||||
|
||||
return 0;
|
||||
|
@ -1624,11 +1613,15 @@ static int radeon_do_cleanup_cp(drm_device_t * dev)
|
|||
drm_irq_uninstall(dev);
|
||||
|
||||
#if __OS_HAS_AGP
|
||||
if (!dev_priv->is_pci) {
|
||||
if (dev_priv->cp_ring != NULL)
|
||||
if (dev_priv->flags & CHIP_IS_AGP) {
|
||||
if (dev_priv->cp_ring != NULL) {
|
||||
drm_core_ioremapfree(dev_priv->cp_ring, dev);
|
||||
if (dev_priv->ring_rptr != NULL)
|
||||
dev_priv->cp_ring = NULL;
|
||||
}
|
||||
if (dev_priv->ring_rptr != NULL) {
|
||||
drm_core_ioremapfree(dev_priv->ring_rptr, dev);
|
||||
dev_priv->ring_rptr = NULL;
|
||||
}
|
||||
if (dev->agp_buffer_map != NULL) {
|
||||
drm_core_ioremapfree(dev->agp_buffer_map, dev);
|
||||
dev->agp_buffer_map = NULL;
|
||||
|
@ -1636,17 +1629,20 @@ static int radeon_do_cleanup_cp(drm_device_t * dev)
|
|||
} else
|
||||
#endif
|
||||
{
|
||||
if (dev_priv->gart_info.bus_addr)
|
||||
|
||||
if (dev_priv->gart_info.bus_addr) {
|
||||
/* Turn off PCI GART */
|
||||
radeon_set_pcigart(dev_priv, 0);
|
||||
if (!drm_ati_pcigart_cleanup(dev, &dev_priv->gart_info))
|
||||
DRM_ERROR("failed to cleanup PCI GART!\n");
|
||||
}
|
||||
|
||||
if (dev_priv->gart_info.gart_table_location == DRM_ATI_GART_FB) {
|
||||
drm_ioremapfree((void *)dev_priv->gart_info.addr,
|
||||
RADEON_PCIGART_TABLE_SIZE, dev);
|
||||
if (dev_priv->gart_info.gart_table_location == DRM_ATI_GART_FB)
|
||||
{
|
||||
drm_core_ioremapfree(&dev_priv->gart_info.mapping, dev);
|
||||
dev_priv->gart_info.addr = 0;
|
||||
}
|
||||
}
|
||||
|
||||
/* only clear to the start of flags */
|
||||
memset(dev_priv, 0, offsetof(drm_radeon_private_t, flags));
|
||||
|
||||
|
@ -1672,7 +1668,7 @@ static int radeon_do_resume_cp(drm_device_t * dev)
|
|||
DRM_DEBUG("Starting radeon_do_resume_cp()\n");
|
||||
|
||||
#if __OS_HAS_AGP
|
||||
if (!dev_priv->is_pci) {
|
||||
if (dev_priv->flags & CHIP_IS_AGP) {
|
||||
/* Turn off PCI GART */
|
||||
radeon_set_pcigart(dev_priv, 0);
|
||||
} else
|
||||
|
@ -2103,7 +2099,7 @@ int radeon_cp_buffers(DRM_IOCTL_ARGS)
|
|||
return ret;
|
||||
}
|
||||
|
||||
int radeon_driver_preinit(struct drm_device *dev, unsigned long flags)
|
||||
int radeon_driver_load(struct drm_device *dev, unsigned long flags)
|
||||
{
|
||||
drm_radeon_private_t *dev_priv;
|
||||
int ret = 0;
|
||||
|
@ -2136,11 +2132,14 @@ int radeon_driver_preinit(struct drm_device *dev, unsigned long flags)
|
|||
dev_priv->flags |= CHIP_IS_PCIE;
|
||||
|
||||
DRM_DEBUG("%s card detected\n",
|
||||
((dev_priv->flags & CHIP_IS_AGP) ? "AGP" : "PCI"));
|
||||
((dev_priv->flags & CHIP_IS_AGP) ? "AGP" : (((dev_priv->flags & CHIP_IS_PCIE) ? "PCIE" : "PCI"))));
|
||||
return ret;
|
||||
}
|
||||
|
||||
int radeon_presetup(struct drm_device *dev)
|
||||
/* Create mappings for registers and framebuffer so userland doesn't necessarily
|
||||
* have to find them.
|
||||
*/
|
||||
int radeon_driver_firstopen(struct drm_device *dev)
|
||||
{
|
||||
int ret;
|
||||
drm_local_map_t *map;
|
||||
|
@ -2161,12 +2160,11 @@ int radeon_presetup(struct drm_device *dev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
int radeon_driver_postcleanup(struct drm_device *dev)
|
||||
int radeon_driver_unload(struct drm_device *dev)
|
||||
{
|
||||
drm_radeon_private_t *dev_priv = dev->dev_private;
|
||||
|
||||
DRM_DEBUG("\n");
|
||||
|
||||
drm_free(dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER);
|
||||
|
||||
dev->dev_private = NULL;
|
||||
|
|
|
@ -624,6 +624,11 @@ typedef struct drm_radeon_indirect {
|
|||
int discard;
|
||||
} drm_radeon_indirect_t;
|
||||
|
||||
/* enum for card type parameters */
|
||||
#define RADEON_CARD_PCI 0
|
||||
#define RADEON_CARD_AGP 1
|
||||
#define RADEON_CARD_PCIE 2
|
||||
|
||||
/* 1.3: An ioctl to get parameters that aren't available to the 3d
|
||||
* client any other way.
|
||||
*/
|
||||
|
@ -640,6 +645,7 @@ typedef struct drm_radeon_indirect {
|
|||
#define RADEON_PARAM_SAREA_HANDLE 9
|
||||
#define RADEON_PARAM_GART_TEX_HANDLE 10
|
||||
#define RADEON_PARAM_SCRATCH_OFFSET 11
|
||||
#define RADEON_PARAM_CARD_TYPE 12
|
||||
|
||||
typedef struct drm_radeon_getparam {
|
||||
int param;
|
||||
|
|
|
@ -42,29 +42,15 @@ int radeon_no_wb;
|
|||
MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers\n");
|
||||
module_param_named(no_wb, radeon_no_wb, int, 0444);
|
||||
|
||||
static int postinit(struct drm_device *dev, unsigned long flags)
|
||||
static int dri_library_name(struct drm_device *dev, char *buf)
|
||||
{
|
||||
DRM_INFO("Initialized %s %d.%d.%d %s on minor %d: %s\n",
|
||||
DRIVER_NAME,
|
||||
DRIVER_MAJOR,
|
||||
DRIVER_MINOR,
|
||||
DRIVER_PATCHLEVEL,
|
||||
DRIVER_DATE, dev->primary.minor, pci_pretty_name(dev->pdev)
|
||||
);
|
||||
return 0;
|
||||
}
|
||||
drm_radeon_private_t *dev_priv = dev->dev_private;
|
||||
int family = dev_priv->flags & CHIP_FAMILY_MASK;
|
||||
|
||||
static int version(drm_version_t * version)
|
||||
{
|
||||
int len;
|
||||
|
||||
version->version_major = DRIVER_MAJOR;
|
||||
version->version_minor = DRIVER_MINOR;
|
||||
version->version_patchlevel = DRIVER_PATCHLEVEL;
|
||||
DRM_COPY(version->name, DRIVER_NAME);
|
||||
DRM_COPY(version->date, DRIVER_DATE);
|
||||
DRM_COPY(version->desc, DRIVER_DESC);
|
||||
return 0;
|
||||
return snprintf(buf, PAGE_SIZE, "%s\n",
|
||||
(family < CHIP_R200) ? "radeon" :
|
||||
((family < CHIP_R300) ? "r200" :
|
||||
"r300"));
|
||||
}
|
||||
|
||||
static struct pci_device_id pciidlist[] = {
|
||||
|
@ -77,23 +63,22 @@ static struct drm_driver driver = {
|
|||
DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED |
|
||||
DRIVER_IRQ_VBL,
|
||||
.dev_priv_size = sizeof(drm_radeon_buf_priv_t),
|
||||
.preinit = radeon_driver_preinit,
|
||||
.presetup = radeon_presetup,
|
||||
.postcleanup = radeon_driver_postcleanup,
|
||||
.prerelease = radeon_driver_prerelease,
|
||||
.pretakedown = radeon_driver_pretakedown,
|
||||
.open_helper = radeon_driver_open_helper,
|
||||
.load = radeon_driver_load,
|
||||
.firstopen = radeon_driver_firstopen,
|
||||
.open = radeon_driver_open,
|
||||
.preclose = radeon_driver_preclose,
|
||||
.postclose = radeon_driver_postclose,
|
||||
.lastclose = radeon_driver_lastclose,
|
||||
.unload = radeon_driver_unload,
|
||||
.vblank_wait = radeon_driver_vblank_wait,
|
||||
.dri_library_name = dri_library_name,
|
||||
.irq_preinstall = radeon_driver_irq_preinstall,
|
||||
.irq_postinstall = radeon_driver_irq_postinstall,
|
||||
.irq_uninstall = radeon_driver_irq_uninstall,
|
||||
.irq_handler = radeon_driver_irq_handler,
|
||||
.free_filp_priv = radeon_driver_free_filp_priv,
|
||||
.reclaim_buffers = drm_core_reclaim_buffers,
|
||||
.get_map_ofs = drm_core_get_map_ofs,
|
||||
.get_reg_ofs = drm_core_get_reg_ofs,
|
||||
.postinit = postinit,
|
||||
.version = version,
|
||||
.ioctls = radeon_ioctls,
|
||||
.dma_ioctl = radeon_cp_buffers,
|
||||
.fops = {
|
||||
|
@ -107,12 +92,19 @@ static struct drm_driver driver = {
|
|||
#ifdef CONFIG_COMPAT
|
||||
.compat_ioctl = radeon_compat_ioctl,
|
||||
#endif
|
||||
}
|
||||
,
|
||||
},
|
||||
|
||||
.pci_driver = {
|
||||
.name = DRIVER_NAME,
|
||||
.id_table = pciidlist,
|
||||
}
|
||||
.name = DRIVER_NAME,
|
||||
.id_table = pciidlist,
|
||||
},
|
||||
|
||||
.name = DRIVER_NAME,
|
||||
.desc = DRIVER_DESC,
|
||||
.date = DRIVER_DATE,
|
||||
.major = DRIVER_MAJOR,
|
||||
.minor = DRIVER_MINOR,
|
||||
.patchlevel = DRIVER_PATCHLEVEL,
|
||||
};
|
||||
|
||||
static int __init radeon_init(void)
|
||||
|
|
|
@ -38,7 +38,7 @@
|
|||
|
||||
#define DRIVER_NAME "radeon"
|
||||
#define DRIVER_DESC "ATI Radeon"
|
||||
#define DRIVER_DATE "20050911"
|
||||
#define DRIVER_DATE "20051229"
|
||||
|
||||
/* Interface history:
|
||||
*
|
||||
|
@ -73,7 +73,7 @@
|
|||
* 1.11- Add packet R200_EMIT_RB3D_BLENDCOLOR to support GL_EXT_blend_color
|
||||
* and GL_EXT_blend_[func|equation]_separate on r200
|
||||
* 1.12- Add R300 CP microcode support - this just loads the CP on r300
|
||||
* (No 3D support yet - just microcode loading)
|
||||
* (No 3D support yet - just microcode loading).
|
||||
* 1.13- Add packet R200_EMIT_TCL_POINT_SPRITE_CNTL for ARB_point_parameters
|
||||
* - Add hyperz support, add hyperz flags to clear ioctl.
|
||||
* 1.14- Add support for color tiling
|
||||
|
@ -88,14 +88,13 @@
|
|||
* R200_EMIT_PP_TXFILTER_0-5, 2 more regs) and R200_EMIT_ATF_TFACTOR
|
||||
* (replaces R200_EMIT_TFACTOR_0 (8 consts instead of 6)
|
||||
* 1.19- Add support for gart table in FB memory and PCIE r300
|
||||
* 1.20- Add support for r300 texrect
|
||||
* 1.21- Add support for card type getparam
|
||||
*/
|
||||
#define DRIVER_MAJOR 1
|
||||
#define DRIVER_MINOR 19
|
||||
#define DRIVER_MINOR 21
|
||||
#define DRIVER_PATCHLEVEL 0
|
||||
|
||||
#define GET_RING_HEAD(dev_priv) DRM_READ32( (dev_priv)->ring_rptr, 0 )
|
||||
#define SET_RING_HEAD(dev_priv,val) DRM_WRITE32( (dev_priv)->ring_rptr, 0, (val) )
|
||||
|
||||
/*
|
||||
* Radeon chip families
|
||||
*/
|
||||
|
@ -103,8 +102,8 @@ enum radeon_family {
|
|||
CHIP_R100,
|
||||
CHIP_RS100,
|
||||
CHIP_RV100,
|
||||
CHIP_R200,
|
||||
CHIP_RV200,
|
||||
CHIP_R200,
|
||||
CHIP_RS200,
|
||||
CHIP_R250,
|
||||
CHIP_RS250,
|
||||
|
@ -138,6 +137,9 @@ enum radeon_chip_flags {
|
|||
CHIP_IS_PCIE = 0x00200000UL,
|
||||
};
|
||||
|
||||
#define GET_RING_HEAD(dev_priv) DRM_READ32( (dev_priv)->ring_rptr, 0 )
|
||||
#define SET_RING_HEAD(dev_priv,val) DRM_WRITE32( (dev_priv)->ring_rptr, 0, (val) )
|
||||
|
||||
typedef struct drm_radeon_freelist {
|
||||
unsigned int age;
|
||||
drm_buf_t *buf;
|
||||
|
@ -245,8 +247,6 @@ typedef struct drm_radeon_private {
|
|||
|
||||
drm_radeon_depth_clear_t depth_clear;
|
||||
|
||||
unsigned long fb_offset;
|
||||
unsigned long mmio_offset;
|
||||
unsigned long ring_offset;
|
||||
unsigned long ring_rptr_offset;
|
||||
unsigned long buffers_offset;
|
||||
|
@ -273,7 +273,6 @@ typedef struct drm_radeon_private {
|
|||
|
||||
/* starting from here on, data is preserved accross an open */
|
||||
uint32_t flags; /* see radeon_chip_flags */
|
||||
int is_pci;
|
||||
} drm_radeon_private_t;
|
||||
|
||||
typedef struct drm_radeon_buf_priv {
|
||||
|
@ -330,17 +329,14 @@ extern irqreturn_t radeon_driver_irq_handler(DRM_IRQ_ARGS);
|
|||
extern void radeon_driver_irq_preinstall(drm_device_t * dev);
|
||||
extern void radeon_driver_irq_postinstall(drm_device_t * dev);
|
||||
extern void radeon_driver_irq_uninstall(drm_device_t * dev);
|
||||
extern void radeon_driver_prerelease(drm_device_t * dev, DRMFILE filp);
|
||||
extern void radeon_driver_pretakedown(drm_device_t * dev);
|
||||
extern int radeon_driver_open_helper(drm_device_t * dev,
|
||||
drm_file_t * filp_priv);
|
||||
extern void radeon_driver_free_filp_priv(drm_device_t * dev,
|
||||
drm_file_t * filp_priv);
|
||||
|
||||
extern int radeon_preinit(struct drm_device *dev, unsigned long flags);
|
||||
extern int radeon_postinit(struct drm_device *dev, unsigned long flags);
|
||||
extern int radeon_postcleanup(struct drm_device *dev);
|
||||
|
||||
extern int radeon_driver_load(struct drm_device *dev, unsigned long flags);
|
||||
extern int radeon_driver_unload(struct drm_device *dev);
|
||||
extern int radeon_driver_firstopen(struct drm_device *dev);
|
||||
extern void radeon_driver_preclose(drm_device_t * dev, DRMFILE filp);
|
||||
extern void radeon_driver_postclose(drm_device_t * dev, drm_file_t * filp);
|
||||
extern void radeon_driver_lastclose(drm_device_t * dev);
|
||||
extern int radeon_driver_open(drm_device_t * dev, drm_file_t * filp_priv);
|
||||
extern long radeon_compat_ioctl(struct file *filp, unsigned int cmd,
|
||||
unsigned long arg);
|
||||
|
||||
|
@ -364,6 +360,8 @@ extern int r300_do_cp_cmdbuf(drm_device_t * dev, DRMFILE filp,
|
|||
*/
|
||||
|
||||
#define RADEON_AGP_COMMAND 0x0f60
|
||||
#define RADEON_AGP_COMMAND_PCI_CONFIG 0x0060 /* offset in PCI config */
|
||||
# define RADEON_AGP_ENABLE (1<<8)
|
||||
#define RADEON_AUX_SCISSOR_CNTL 0x26f0
|
||||
# define RADEON_EXCLUSIVE_SCISSOR_0 (1 << 24)
|
||||
# define RADEON_EXCLUSIVE_SCISSOR_1 (1 << 25)
|
||||
|
@ -651,6 +649,8 @@ extern int r300_do_cp_cmdbuf(drm_device_t * dev, DRMFILE filp,
|
|||
|
||||
#define RADEON_WAIT_UNTIL 0x1720
|
||||
# define RADEON_WAIT_CRTC_PFLIP (1 << 0)
|
||||
# define RADEON_WAIT_2D_IDLE (1 << 14)
|
||||
# define RADEON_WAIT_3D_IDLE (1 << 15)
|
||||
# define RADEON_WAIT_2D_IDLECLEAN (1 << 16)
|
||||
# define RADEON_WAIT_3D_IDLECLEAN (1 << 17)
|
||||
# define RADEON_WAIT_HOST_IDLECLEAN (1 << 18)
|
||||
|
@ -1105,7 +1105,6 @@ do { \
|
|||
write = 0; \
|
||||
_tab += _i; \
|
||||
} \
|
||||
\
|
||||
while (_size > 0) { \
|
||||
*(ring + write) = *_tab++; \
|
||||
write++; \
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/* radeon_state.c -- State support for Radeon -*- linux-c -*-
|
||||
*
|
||||
/* radeon_state.c -- State support for Radeon -*- linux-c -*- */
|
||||
/*
|
||||
* Copyright 2000 VA Linux Systems, Inc., Fremont, California.
|
||||
* All Rights Reserved.
|
||||
*
|
||||
|
@ -72,10 +72,7 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
|
|||
|
||||
case RADEON_EMIT_PP_MISC:
|
||||
if (radeon_check_and_fixup_offset(dev_priv, filp_priv,
|
||||
&data[(RADEON_RB3D_DEPTHOFFSET
|
||||
-
|
||||
RADEON_PP_MISC) /
|
||||
4])) {
|
||||
&data[(RADEON_RB3D_DEPTHOFFSET - RADEON_PP_MISC) / 4])) {
|
||||
DRM_ERROR("Invalid depth buffer offset\n");
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
|
@ -83,10 +80,7 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
|
|||
|
||||
case RADEON_EMIT_PP_CNTL:
|
||||
if (radeon_check_and_fixup_offset(dev_priv, filp_priv,
|
||||
&data[(RADEON_RB3D_COLOROFFSET
|
||||
-
|
||||
RADEON_PP_CNTL) /
|
||||
4])) {
|
||||
&data[(RADEON_RB3D_COLOROFFSET - RADEON_PP_CNTL) / 4])) {
|
||||
DRM_ERROR("Invalid colour buffer offset\n");
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
|
@ -109,10 +103,7 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
|
|||
case RADEON_EMIT_PP_TXFILTER_1:
|
||||
case RADEON_EMIT_PP_TXFILTER_2:
|
||||
if (radeon_check_and_fixup_offset(dev_priv, filp_priv,
|
||||
&data[(RADEON_PP_TXOFFSET_0
|
||||
-
|
||||
RADEON_PP_TXFILTER_0) /
|
||||
4])) {
|
||||
&data[(RADEON_PP_TXOFFSET_0 - RADEON_PP_TXFILTER_0) / 4])) {
|
||||
DRM_ERROR("Invalid R100 texture offset\n");
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
|
@ -126,8 +117,9 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
|
|||
case R200_EMIT_PP_CUBIC_OFFSETS_5:{
|
||||
int i;
|
||||
for (i = 0; i < 5; i++) {
|
||||
if (radeon_check_and_fixup_offset
|
||||
(dev_priv, filp_priv, &data[i])) {
|
||||
if (radeon_check_and_fixup_offset(dev_priv,
|
||||
filp_priv,
|
||||
&data[i])) {
|
||||
DRM_ERROR
|
||||
("Invalid R200 cubic texture offset\n");
|
||||
return DRM_ERR(EINVAL);
|
||||
|
@ -239,8 +231,9 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
|
|||
|
||||
static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t *
|
||||
dev_priv,
|
||||
drm_file_t * filp_priv,
|
||||
drm_radeon_kcmd_buffer_t *cmdbuf,
|
||||
drm_file_t *filp_priv,
|
||||
drm_radeon_kcmd_buffer_t *
|
||||
cmdbuf,
|
||||
unsigned int *cmdsz)
|
||||
{
|
||||
u32 *cmd = (u32 *) cmdbuf->buf;
|
||||
|
@ -555,7 +548,8 @@ static struct {
|
|||
{R200_PP_TXOFFSET_4, 1, "R200_PP_TXOFFSET_4"},
|
||||
{R200_PP_TXOFFSET_5, 1, "R200_PP_TXOFFSET_5"},
|
||||
{R200_SE_VTE_CNTL, 1, "R200_SE_VTE_CNTL"},
|
||||
{R200_SE_TCL_OUTPUT_VTX_COMP_SEL, 1, "R200_SE_TCL_OUTPUT_VTX_COMP_SEL"},
|
||||
{R200_SE_TCL_OUTPUT_VTX_COMP_SEL, 1,
|
||||
"R200_SE_TCL_OUTPUT_VTX_COMP_SEL"},
|
||||
{R200_PP_TAM_DEBUG3, 1, "R200_PP_TAM_DEBUG3"},
|
||||
{R200_PP_CNTL_X, 1, "R200_PP_CNTL_X"},
|
||||
{R200_RB3D_DEPTHXY_OFFSET, 1, "R200_RB3D_DEPTHXY_OFFSET"},
|
||||
|
@ -569,7 +563,7 @@ static struct {
|
|||
{R200_SE_TCL_INPUT_VTX_VECTOR_ADDR_0, 4,
|
||||
"R200_SE_TCL_INPUT_VTX_VECTOR_ADDR_0"},
|
||||
{R200_PP_CUBIC_FACES_0, 1, "R200_PP_CUBIC_FACES_0"}, /* 61 */
|
||||
{R200_PP_CUBIC_OFFSET_F1_0, 5, "R200_PP_CUBIC_OFFSET_F1_0"}, /* 62 */
|
||||
{R200_PP_CUBIC_OFFSET_F1_0, 5, "R200_PP_CUBIC_OFFSET_F1_0"}, /* 62 */
|
||||
{R200_PP_CUBIC_FACES_1, 1, "R200_PP_CUBIC_FACES_1"},
|
||||
{R200_PP_CUBIC_OFFSET_F1_1, 5, "R200_PP_CUBIC_OFFSET_F1_1"},
|
||||
{R200_PP_CUBIC_FACES_2, 1, "R200_PP_CUBIC_FACES_2"},
|
||||
|
@ -592,7 +586,7 @@ static struct {
|
|||
{RADEON_PP_CUBIC_FACES_2, 1, "RADEON_PP_CUBIC_FACES_2"},
|
||||
{RADEON_PP_CUBIC_OFFSET_T2_0, 5, "RADEON_PP_CUBIC_OFFSET_T2_0"},
|
||||
{R200_PP_TRI_PERF, 2, "R200_PP_TRI_PERF"},
|
||||
{R200_PP_AFS_0, 32, "R200_PP_AFS_0"}, /* 85 */
|
||||
{R200_PP_AFS_0, 32, "R200_PP_AFS_0"}, /* 85 */
|
||||
{R200_PP_AFS_1, 32, "R200_PP_AFS_1"},
|
||||
{R200_PP_TFACTOR_0, 8, "R200_ATF_TFACTOR"},
|
||||
{R200_PP_TXFILTER_0, 8, "R200_PP_TXCTLALL_0"},
|
||||
|
@ -985,8 +979,8 @@ static void radeon_cp_dispatch_clear(drm_device_t * dev,
|
|||
* rendering a quad into just those buffers. Thus, we have to
|
||||
* make sure the 3D engine is configured correctly.
|
||||
*/
|
||||
if ((dev_priv->microcode_version == UCODE_R200) &&
|
||||
(flags & (RADEON_DEPTH | RADEON_STENCIL))) {
|
||||
else if ((dev_priv->microcode_version == UCODE_R200) &&
|
||||
(flags & (RADEON_DEPTH | RADEON_STENCIL))) {
|
||||
|
||||
int tempPP_CNTL;
|
||||
int tempRE_CNTL;
|
||||
|
@ -1637,6 +1631,14 @@ static int radeon_cp_dispatch_texture(DRMFILE filp,
|
|||
(u32 *) ((char *)dev->agp_buffer_map->handle + buf->offset);
|
||||
dwords = size / 4;
|
||||
|
||||
#define RADEON_COPY_MT(_buf, _data, _width) \
|
||||
do { \
|
||||
if (DRM_COPY_FROM_USER(_buf, _data, (_width))) {\
|
||||
DRM_ERROR("EFAULT on pad, %d bytes\n", (_width)); \
|
||||
return DRM_ERR(EFAULT); \
|
||||
} \
|
||||
} while(0)
|
||||
|
||||
if (microtile) {
|
||||
/* texture micro tiling in use, minimum texture width is thus 16 bytes.
|
||||
however, we cannot use blitter directly for texture width < 64 bytes,
|
||||
|
@ -1648,46 +1650,19 @@ static int radeon_cp_dispatch_texture(DRMFILE filp,
|
|||
from user space. */
|
||||
if (tex->height == 1) {
|
||||
if (tex_width >= 64 || tex_width <= 16) {
|
||||
if (DRM_COPY_FROM_USER(buffer, data,
|
||||
tex_width *
|
||||
sizeof(u32))) {
|
||||
DRM_ERROR
|
||||
("EFAULT on pad, %d bytes\n",
|
||||
tex_width);
|
||||
return DRM_ERR(EFAULT);
|
||||
}
|
||||
RADEON_COPY_MT(buffer, data,
|
||||
(int)(tex_width * sizeof(u32)));
|
||||
} else if (tex_width == 32) {
|
||||
if (DRM_COPY_FROM_USER
|
||||
(buffer, data, 16)) {
|
||||
DRM_ERROR
|
||||
("EFAULT on pad, %d bytes\n",
|
||||
tex_width);
|
||||
return DRM_ERR(EFAULT);
|
||||
}
|
||||
if (DRM_COPY_FROM_USER
|
||||
(buffer + 8, data + 16, 16)) {
|
||||
DRM_ERROR
|
||||
("EFAULT on pad, %d bytes\n",
|
||||
tex_width);
|
||||
return DRM_ERR(EFAULT);
|
||||
}
|
||||
RADEON_COPY_MT(buffer, data, 16);
|
||||
RADEON_COPY_MT(buffer + 8,
|
||||
data + 16, 16);
|
||||
}
|
||||
} else if (tex_width >= 64 || tex_width == 16) {
|
||||
if (DRM_COPY_FROM_USER(buffer, data,
|
||||
dwords * sizeof(u32))) {
|
||||
DRM_ERROR("EFAULT on data, %d dwords\n",
|
||||
dwords);
|
||||
return DRM_ERR(EFAULT);
|
||||
}
|
||||
RADEON_COPY_MT(buffer, data,
|
||||
(int)(dwords * sizeof(u32)));
|
||||
} else if (tex_width < 16) {
|
||||
for (i = 0; i < tex->height; i++) {
|
||||
if (DRM_COPY_FROM_USER
|
||||
(buffer, data, tex_width)) {
|
||||
DRM_ERROR
|
||||
("EFAULT on pad, %d bytes\n",
|
||||
tex_width);
|
||||
return DRM_ERR(EFAULT);
|
||||
}
|
||||
RADEON_COPY_MT(buffer, data, tex_width);
|
||||
buffer += 4;
|
||||
data += tex_width;
|
||||
}
|
||||
|
@ -1695,37 +1670,13 @@ static int radeon_cp_dispatch_texture(DRMFILE filp,
|
|||
/* TODO: make sure this works when not fitting in one buffer
|
||||
(i.e. 32bytes x 2048...) */
|
||||
for (i = 0; i < tex->height; i += 2) {
|
||||
if (DRM_COPY_FROM_USER
|
||||
(buffer, data, 16)) {
|
||||
DRM_ERROR
|
||||
("EFAULT on pad, %d bytes\n",
|
||||
tex_width);
|
||||
return DRM_ERR(EFAULT);
|
||||
}
|
||||
RADEON_COPY_MT(buffer, data, 16);
|
||||
data += 16;
|
||||
if (DRM_COPY_FROM_USER
|
||||
(buffer + 8, data, 16)) {
|
||||
DRM_ERROR
|
||||
("EFAULT on pad, %d bytes\n",
|
||||
tex_width);
|
||||
return DRM_ERR(EFAULT);
|
||||
}
|
||||
RADEON_COPY_MT(buffer + 8, data, 16);
|
||||
data += 16;
|
||||
if (DRM_COPY_FROM_USER
|
||||
(buffer + 4, data, 16)) {
|
||||
DRM_ERROR
|
||||
("EFAULT on pad, %d bytes\n",
|
||||
tex_width);
|
||||
return DRM_ERR(EFAULT);
|
||||
}
|
||||
RADEON_COPY_MT(buffer + 4, data, 16);
|
||||
data += 16;
|
||||
if (DRM_COPY_FROM_USER
|
||||
(buffer + 12, data, 16)) {
|
||||
DRM_ERROR
|
||||
("EFAULT on pad, %d bytes\n",
|
||||
tex_width);
|
||||
return DRM_ERR(EFAULT);
|
||||
}
|
||||
RADEON_COPY_MT(buffer + 12, data, 16);
|
||||
data += 16;
|
||||
buffer += 16;
|
||||
}
|
||||
|
@ -1735,31 +1686,22 @@ static int radeon_cp_dispatch_texture(DRMFILE filp,
|
|||
/* Texture image width is larger than the minimum, so we
|
||||
* can upload it directly.
|
||||
*/
|
||||
if (DRM_COPY_FROM_USER(buffer, data,
|
||||
dwords * sizeof(u32))) {
|
||||
DRM_ERROR("EFAULT on data, %d dwords\n",
|
||||
dwords);
|
||||
return DRM_ERR(EFAULT);
|
||||
}
|
||||
RADEON_COPY_MT(buffer, data,
|
||||
(int)(dwords * sizeof(u32)));
|
||||
} else {
|
||||
/* Texture image width is less than the minimum, so we
|
||||
* need to pad out each image scanline to the minimum
|
||||
* width.
|
||||
*/
|
||||
for (i = 0; i < tex->height; i++) {
|
||||
if (DRM_COPY_FROM_USER
|
||||
(buffer, data, tex_width)) {
|
||||
DRM_ERROR
|
||||
("EFAULT on pad, %d bytes\n",
|
||||
tex_width);
|
||||
return DRM_ERR(EFAULT);
|
||||
}
|
||||
RADEON_COPY_MT(buffer, data, tex_width);
|
||||
buffer += 8;
|
||||
data += tex_width;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#undef RADEON_COPY_MT
|
||||
buf->filp = filp;
|
||||
buf->used = size;
|
||||
offset = dev_priv->gart_buffers_offset + buf->offset;
|
||||
|
@ -1821,7 +1763,7 @@ static void radeon_cp_dispatch_stipple(drm_device_t * dev, u32 * stipple)
|
|||
}
|
||||
|
||||
static void radeon_apply_surface_regs(int surf_index,
|
||||
drm_radeon_private_t * dev_priv)
|
||||
drm_radeon_private_t *dev_priv)
|
||||
{
|
||||
if (!dev_priv->mmio)
|
||||
return;
|
||||
|
@ -1847,8 +1789,8 @@ static void radeon_apply_surface_regs(int surf_index,
|
|||
* freed, we suddenly need two surfaces to store A and C, which might
|
||||
* not always be available.
|
||||
*/
|
||||
static int alloc_surface(drm_radeon_surface_alloc_t * new,
|
||||
drm_radeon_private_t * dev_priv, DRMFILE filp)
|
||||
static int alloc_surface(drm_radeon_surface_alloc_t *new,
|
||||
drm_radeon_private_t *dev_priv, DRMFILE filp)
|
||||
{
|
||||
struct radeon_virt_surface *s;
|
||||
int i;
|
||||
|
@ -2158,6 +2100,11 @@ static int radeon_cp_vertex(DRM_IOCTL_ARGS)
|
|||
|
||||
LOCK_TEST_WITH_RETURN(dev, filp);
|
||||
|
||||
if (!dev_priv) {
|
||||
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
|
||||
DRM_GET_PRIV_WITH_RETURN(filp_priv, filp);
|
||||
|
||||
DRM_COPY_FROM_USER_IOCTL(vertex, (drm_radeon_vertex_t __user *) data,
|
||||
|
@ -2596,9 +2543,9 @@ static int radeon_emit_packets(drm_radeon_private_t * dev_priv,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static __inline__ int radeon_emit_scalars(drm_radeon_private_t * dev_priv,
|
||||
static __inline__ int radeon_emit_scalars(drm_radeon_private_t *dev_priv,
|
||||
drm_radeon_cmd_header_t header,
|
||||
drm_radeon_kcmd_buffer_t * cmdbuf)
|
||||
drm_radeon_kcmd_buffer_t *cmdbuf)
|
||||
{
|
||||
int sz = header.scalars.count;
|
||||
int start = header.scalars.offset;
|
||||
|
@ -2618,9 +2565,9 @@ static __inline__ int radeon_emit_scalars(drm_radeon_private_t * dev_priv,
|
|||
|
||||
/* God this is ugly
|
||||
*/
|
||||
static __inline__ int radeon_emit_scalars2(drm_radeon_private_t * dev_priv,
|
||||
static __inline__ int radeon_emit_scalars2(drm_radeon_private_t *dev_priv,
|
||||
drm_radeon_cmd_header_t header,
|
||||
drm_radeon_kcmd_buffer_t * cmdbuf)
|
||||
drm_radeon_kcmd_buffer_t *cmdbuf)
|
||||
{
|
||||
int sz = header.scalars.count;
|
||||
int start = ((unsigned int)header.scalars.offset) + 0x100;
|
||||
|
@ -2638,9 +2585,9 @@ static __inline__ int radeon_emit_scalars2(drm_radeon_private_t * dev_priv,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static __inline__ int radeon_emit_vectors(drm_radeon_private_t * dev_priv,
|
||||
static __inline__ int radeon_emit_vectors(drm_radeon_private_t *dev_priv,
|
||||
drm_radeon_cmd_header_t header,
|
||||
drm_radeon_kcmd_buffer_t * cmdbuf)
|
||||
drm_radeon_kcmd_buffer_t *cmdbuf)
|
||||
{
|
||||
int sz = header.vectors.count;
|
||||
int start = header.vectors.offset;
|
||||
|
@ -2685,8 +2632,8 @@ static int radeon_emit_packet3(drm_device_t * dev,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int radeon_emit_packet3_cliprect(drm_device_t * dev,
|
||||
drm_file_t * filp_priv,
|
||||
static int radeon_emit_packet3_cliprect(drm_device_t *dev,
|
||||
drm_file_t *filp_priv,
|
||||
drm_radeon_kcmd_buffer_t *cmdbuf,
|
||||
int orig_nbox)
|
||||
{
|
||||
|
@ -2818,7 +2765,8 @@ static int radeon_cp_cmdbuf(DRM_IOCTL_ARGS)
|
|||
kbuf = drm_alloc(cmdbuf.bufsz, DRM_MEM_DRIVER);
|
||||
if (kbuf == NULL)
|
||||
return DRM_ERR(ENOMEM);
|
||||
if (DRM_COPY_FROM_USER(kbuf, (void __user *)cmdbuf.buf, cmdbuf.bufsz)) {
|
||||
if (DRM_COPY_FROM_USER(kbuf, (void __user *)cmdbuf.buf,
|
||||
cmdbuf.bufsz)) {
|
||||
drm_free(kbuf, orig_bufsz, DRM_MEM_DRIVER);
|
||||
return DRM_ERR(EFAULT);
|
||||
}
|
||||
|
@ -2981,7 +2929,7 @@ static int radeon_cp_getparam(DRM_IOCTL_ARGS)
|
|||
value = dev_priv->gart_vm_start;
|
||||
break;
|
||||
case RADEON_PARAM_REGISTER_HANDLE:
|
||||
value = dev_priv->mmio_offset;
|
||||
value = dev_priv->mmio->offset;
|
||||
break;
|
||||
case RADEON_PARAM_STATUS_HANDLE:
|
||||
value = dev_priv->ring_rptr_offset;
|
||||
|
@ -3004,6 +2952,15 @@ static int radeon_cp_getparam(DRM_IOCTL_ARGS)
|
|||
case RADEON_PARAM_GART_TEX_HANDLE:
|
||||
value = dev_priv->gart_textures_offset;
|
||||
break;
|
||||
|
||||
case RADEON_PARAM_CARD_TYPE:
|
||||
if (dev_priv->flags & CHIP_IS_PCIE)
|
||||
value = RADEON_CARD_PCIE;
|
||||
else if (dev_priv->flags & CHIP_IS_AGP)
|
||||
value = RADEON_CARD_AGP;
|
||||
else
|
||||
value = RADEON_CARD_PCI;
|
||||
break;
|
||||
default:
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
|
@ -3066,10 +3023,11 @@ static int radeon_cp_setparam(DRM_IOCTL_ARGS)
|
|||
/* When a client dies:
|
||||
* - Check for and clean up flipped page state
|
||||
* - Free any alloced GART memory.
|
||||
* - Free any alloced radeon surfaces.
|
||||
*
|
||||
* DRM infrastructure takes care of reclaiming dma buffers.
|
||||
*/
|
||||
void radeon_driver_prerelease(drm_device_t * dev, DRMFILE filp)
|
||||
void radeon_driver_preclose(drm_device_t * dev, DRMFILE filp)
|
||||
{
|
||||
if (dev->dev_private) {
|
||||
drm_radeon_private_t *dev_priv = dev->dev_private;
|
||||
|
@ -3082,16 +3040,17 @@ void radeon_driver_prerelease(drm_device_t * dev, DRMFILE filp)
|
|||
}
|
||||
}
|
||||
|
||||
void radeon_driver_pretakedown(drm_device_t * dev)
|
||||
void radeon_driver_lastclose(drm_device_t * dev)
|
||||
{
|
||||
radeon_do_release(dev);
|
||||
}
|
||||
|
||||
int radeon_driver_open_helper(drm_device_t * dev, drm_file_t * filp_priv)
|
||||
int radeon_driver_open(drm_device_t * dev, drm_file_t * filp_priv)
|
||||
{
|
||||
drm_radeon_private_t *dev_priv = dev->dev_private;
|
||||
struct drm_radeon_driver_file_fields *radeon_priv;
|
||||
|
||||
DRM_DEBUG("\n");
|
||||
radeon_priv =
|
||||
(struct drm_radeon_driver_file_fields *)
|
||||
drm_alloc(sizeof(*radeon_priv), DRM_MEM_FILES);
|
||||
|
@ -3100,6 +3059,7 @@ int radeon_driver_open_helper(drm_device_t * dev, drm_file_t * filp_priv)
|
|||
return -ENOMEM;
|
||||
|
||||
filp_priv->driver_priv = radeon_priv;
|
||||
|
||||
if (dev_priv)
|
||||
radeon_priv->radeon_fb_delta = dev_priv->fb_location;
|
||||
else
|
||||
|
@ -3107,7 +3067,7 @@ int radeon_driver_open_helper(drm_device_t * dev, drm_file_t * filp_priv)
|
|||
return 0;
|
||||
}
|
||||
|
||||
void radeon_driver_free_filp_priv(drm_device_t * dev, drm_file_t * filp_priv)
|
||||
void radeon_driver_postclose(drm_device_t * dev, drm_file_t * filp_priv)
|
||||
{
|
||||
struct drm_radeon_driver_file_fields *radeon_priv =
|
||||
filp_priv->driver_priv;
|
||||
|
@ -3116,33 +3076,33 @@ void radeon_driver_free_filp_priv(drm_device_t * dev, drm_file_t * filp_priv)
|
|||
}
|
||||
|
||||
drm_ioctl_desc_t radeon_ioctls[] = {
|
||||
[DRM_IOCTL_NR(DRM_RADEON_CP_INIT)] = {radeon_cp_init, 1, 1},
|
||||
[DRM_IOCTL_NR(DRM_RADEON_CP_START)] = {radeon_cp_start, 1, 1},
|
||||
[DRM_IOCTL_NR(DRM_RADEON_CP_STOP)] = {radeon_cp_stop, 1, 1},
|
||||
[DRM_IOCTL_NR(DRM_RADEON_CP_RESET)] = {radeon_cp_reset, 1, 1},
|
||||
[DRM_IOCTL_NR(DRM_RADEON_CP_IDLE)] = {radeon_cp_idle, 1, 0},
|
||||
[DRM_IOCTL_NR(DRM_RADEON_CP_RESUME)] = {radeon_cp_resume, 1, 0},
|
||||
[DRM_IOCTL_NR(DRM_RADEON_RESET)] = {radeon_engine_reset, 1, 0},
|
||||
[DRM_IOCTL_NR(DRM_RADEON_FULLSCREEN)] = {radeon_fullscreen, 1, 0},
|
||||
[DRM_IOCTL_NR(DRM_RADEON_SWAP)] = {radeon_cp_swap, 1, 0},
|
||||
[DRM_IOCTL_NR(DRM_RADEON_CLEAR)] = {radeon_cp_clear, 1, 0},
|
||||
[DRM_IOCTL_NR(DRM_RADEON_VERTEX)] = {radeon_cp_vertex, 1, 0},
|
||||
[DRM_IOCTL_NR(DRM_RADEON_INDICES)] = {radeon_cp_indices, 1, 0},
|
||||
[DRM_IOCTL_NR(DRM_RADEON_TEXTURE)] = {radeon_cp_texture, 1, 0},
|
||||
[DRM_IOCTL_NR(DRM_RADEON_STIPPLE)] = {radeon_cp_stipple, 1, 0},
|
||||
[DRM_IOCTL_NR(DRM_RADEON_INDIRECT)] = {radeon_cp_indirect, 1, 1},
|
||||
[DRM_IOCTL_NR(DRM_RADEON_VERTEX2)] = {radeon_cp_vertex2, 1, 0},
|
||||
[DRM_IOCTL_NR(DRM_RADEON_CMDBUF)] = {radeon_cp_cmdbuf, 1, 0},
|
||||
[DRM_IOCTL_NR(DRM_RADEON_GETPARAM)] = {radeon_cp_getparam, 1, 0},
|
||||
[DRM_IOCTL_NR(DRM_RADEON_FLIP)] = {radeon_cp_flip, 1, 0},
|
||||
[DRM_IOCTL_NR(DRM_RADEON_ALLOC)] = {radeon_mem_alloc, 1, 0},
|
||||
[DRM_IOCTL_NR(DRM_RADEON_FREE)] = {radeon_mem_free, 1, 0},
|
||||
[DRM_IOCTL_NR(DRM_RADEON_INIT_HEAP)] = {radeon_mem_init_heap, 1, 1},
|
||||
[DRM_IOCTL_NR(DRM_RADEON_IRQ_EMIT)] = {radeon_irq_emit, 1, 0},
|
||||
[DRM_IOCTL_NR(DRM_RADEON_IRQ_WAIT)] = {radeon_irq_wait, 1, 0},
|
||||
[DRM_IOCTL_NR(DRM_RADEON_SETPARAM)] = {radeon_cp_setparam, 1, 0},
|
||||
[DRM_IOCTL_NR(DRM_RADEON_SURF_ALLOC)] = {radeon_surface_alloc, 1, 0},
|
||||
[DRM_IOCTL_NR(DRM_RADEON_SURF_FREE)] = {radeon_surface_free, 1, 0}
|
||||
[DRM_IOCTL_NR(DRM_RADEON_CP_INIT)] = {radeon_cp_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
|
||||
[DRM_IOCTL_NR(DRM_RADEON_CP_START)] = {radeon_cp_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
|
||||
[DRM_IOCTL_NR(DRM_RADEON_CP_STOP)] = {radeon_cp_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
|
||||
[DRM_IOCTL_NR(DRM_RADEON_CP_RESET)] = {radeon_cp_reset, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
|
||||
[DRM_IOCTL_NR(DRM_RADEON_CP_IDLE)] = {radeon_cp_idle, DRM_AUTH},
|
||||
[DRM_IOCTL_NR(DRM_RADEON_CP_RESUME)] = {radeon_cp_resume, DRM_AUTH},
|
||||
[DRM_IOCTL_NR(DRM_RADEON_RESET)] = {radeon_engine_reset, DRM_AUTH},
|
||||
[DRM_IOCTL_NR(DRM_RADEON_FULLSCREEN)] = {radeon_fullscreen, DRM_AUTH},
|
||||
[DRM_IOCTL_NR(DRM_RADEON_SWAP)] = {radeon_cp_swap, DRM_AUTH},
|
||||
[DRM_IOCTL_NR(DRM_RADEON_CLEAR)] = {radeon_cp_clear, DRM_AUTH},
|
||||
[DRM_IOCTL_NR(DRM_RADEON_VERTEX)] = {radeon_cp_vertex, DRM_AUTH},
|
||||
[DRM_IOCTL_NR(DRM_RADEON_INDICES)] = {radeon_cp_indices, DRM_AUTH},
|
||||
[DRM_IOCTL_NR(DRM_RADEON_TEXTURE)] = {radeon_cp_texture, DRM_AUTH},
|
||||
[DRM_IOCTL_NR(DRM_RADEON_STIPPLE)] = {radeon_cp_stipple, DRM_AUTH},
|
||||
[DRM_IOCTL_NR(DRM_RADEON_INDIRECT)] = {radeon_cp_indirect, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
|
||||
[DRM_IOCTL_NR(DRM_RADEON_VERTEX2)] = {radeon_cp_vertex2, DRM_AUTH},
|
||||
[DRM_IOCTL_NR(DRM_RADEON_CMDBUF)] = {radeon_cp_cmdbuf, DRM_AUTH},
|
||||
[DRM_IOCTL_NR(DRM_RADEON_GETPARAM)] = {radeon_cp_getparam, DRM_AUTH},
|
||||
[DRM_IOCTL_NR(DRM_RADEON_FLIP)] = {radeon_cp_flip, DRM_AUTH},
|
||||
[DRM_IOCTL_NR(DRM_RADEON_ALLOC)] = {radeon_mem_alloc, DRM_AUTH},
|
||||
[DRM_IOCTL_NR(DRM_RADEON_FREE)] = {radeon_mem_free, DRM_AUTH},
|
||||
[DRM_IOCTL_NR(DRM_RADEON_INIT_HEAP)] = {radeon_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
|
||||
[DRM_IOCTL_NR(DRM_RADEON_IRQ_EMIT)] = {radeon_irq_emit, DRM_AUTH},
|
||||
[DRM_IOCTL_NR(DRM_RADEON_IRQ_WAIT)] = {radeon_irq_wait, DRM_AUTH},
|
||||
[DRM_IOCTL_NR(DRM_RADEON_SETPARAM)] = {radeon_cp_setparam, DRM_AUTH},
|
||||
[DRM_IOCTL_NR(DRM_RADEON_SURF_ALLOC)] = {radeon_surface_alloc, DRM_AUTH},
|
||||
[DRM_IOCTL_NR(DRM_RADEON_SURF_FREE)] = {radeon_surface_free, DRM_AUTH}
|
||||
};
|
||||
|
||||
int radeon_max_ioctl = DRM_ARRAY_SIZE(radeon_ioctls);
|
||||
|
|
|
@ -533,23 +533,9 @@ static void savage_fake_dma_flush(drm_savage_private_t * dev_priv)
|
|||
dev_priv->first_dma_page = dev_priv->current_dma_page = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Initalize mappings. On Savage4 and SavageIX the alignment
|
||||
* and size of the aperture is not suitable for automatic MTRR setup
|
||||
* in drm_addmap. Therefore we do it manually before the maps are
|
||||
* initialized. We also need to take care of deleting the MTRRs in
|
||||
* postcleanup.
|
||||
*/
|
||||
int savage_preinit(drm_device_t * dev, unsigned long chipset)
|
||||
int savage_driver_load(drm_device_t *dev, unsigned long chipset)
|
||||
{
|
||||
drm_savage_private_t *dev_priv;
|
||||
unsigned long mmio_base, fb_base, fb_size, aperture_base;
|
||||
/* fb_rsrc and aper_rsrc aren't really used currently, but still exist
|
||||
* in case we decide we need information on the BAR for BSD in the
|
||||
* future.
|
||||
*/
|
||||
unsigned int fb_rsrc, aper_rsrc;
|
||||
int ret = 0;
|
||||
|
||||
dev_priv = drm_alloc(sizeof(drm_savage_private_t), DRM_MEM_DRIVER);
|
||||
if (dev_priv == NULL)
|
||||
|
@ -557,8 +543,30 @@ int savage_preinit(drm_device_t * dev, unsigned long chipset)
|
|||
|
||||
memset(dev_priv, 0, sizeof(drm_savage_private_t));
|
||||
dev->dev_private = (void *)dev_priv;
|
||||
|
||||
dev_priv->chipset = (enum savage_family)chipset;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Initalize mappings. On Savage4 and SavageIX the alignment
|
||||
* and size of the aperture is not suitable for automatic MTRR setup
|
||||
* in drm_addmap. Therefore we add them manually before the maps are
|
||||
* initialized, and tear them down on last close.
|
||||
*/
|
||||
int savage_driver_firstopen(drm_device_t *dev)
|
||||
{
|
||||
drm_savage_private_t *dev_priv = dev->dev_private;
|
||||
unsigned long mmio_base, fb_base, fb_size, aperture_base;
|
||||
/* fb_rsrc and aper_rsrc aren't really used currently, but still exist
|
||||
* in case we decide we need information on the BAR for BSD in the
|
||||
* future.
|
||||
*/
|
||||
unsigned int fb_rsrc, aper_rsrc;
|
||||
int ret = 0;
|
||||
|
||||
dev_priv->mtrr[0].handle = -1;
|
||||
dev_priv->mtrr[1].handle = -1;
|
||||
dev_priv->mtrr[2].handle = -1;
|
||||
|
@ -576,26 +584,24 @@ int savage_preinit(drm_device_t * dev, unsigned long chipset)
|
|||
dev_priv->mtrr[0].base = fb_base;
|
||||
dev_priv->mtrr[0].size = 0x01000000;
|
||||
dev_priv->mtrr[0].handle =
|
||||
mtrr_add(dev_priv->mtrr[0].base,
|
||||
dev_priv->mtrr[0].size, MTRR_TYPE_WRCOMB,
|
||||
1);
|
||||
drm_mtrr_add(dev_priv->mtrr[0].base,
|
||||
dev_priv->mtrr[0].size, DRM_MTRR_WC);
|
||||
dev_priv->mtrr[1].base = fb_base + 0x02000000;
|
||||
dev_priv->mtrr[1].size = 0x02000000;
|
||||
dev_priv->mtrr[1].handle =
|
||||
mtrr_add(dev_priv->mtrr[1].base,
|
||||
dev_priv->mtrr[1].size, MTRR_TYPE_WRCOMB,
|
||||
1);
|
||||
drm_mtrr_add(dev_priv->mtrr[1].base,
|
||||
dev_priv->mtrr[1].size, DRM_MTRR_WC);
|
||||
dev_priv->mtrr[2].base = fb_base + 0x04000000;
|
||||
dev_priv->mtrr[2].size = 0x04000000;
|
||||
dev_priv->mtrr[2].handle =
|
||||
mtrr_add(dev_priv->mtrr[2].base,
|
||||
dev_priv->mtrr[2].size, MTRR_TYPE_WRCOMB,
|
||||
1);
|
||||
drm_mtrr_add(dev_priv->mtrr[2].base,
|
||||
dev_priv->mtrr[2].size, DRM_MTRR_WC);
|
||||
} else {
|
||||
DRM_ERROR("strange pci_resource_len %08lx\n",
|
||||
drm_get_resource_len(dev, 0));
|
||||
}
|
||||
} else if (chipset != S3_SUPERSAVAGE && chipset != S3_SAVAGE2000) {
|
||||
} else if (dev_priv->chipset != S3_SUPERSAVAGE &&
|
||||
dev_priv->chipset != S3_SAVAGE2000) {
|
||||
mmio_base = drm_get_resource_start(dev, 0);
|
||||
fb_rsrc = 1;
|
||||
fb_base = drm_get_resource_start(dev, 1);
|
||||
|
@ -609,9 +615,8 @@ int savage_preinit(drm_device_t * dev, unsigned long chipset)
|
|||
dev_priv->mtrr[0].base = fb_base;
|
||||
dev_priv->mtrr[0].size = 0x08000000;
|
||||
dev_priv->mtrr[0].handle =
|
||||
mtrr_add(dev_priv->mtrr[0].base,
|
||||
dev_priv->mtrr[0].size, MTRR_TYPE_WRCOMB,
|
||||
1);
|
||||
drm_mtrr_add(dev_priv->mtrr[0].base,
|
||||
dev_priv->mtrr[0].size, DRM_MTRR_WC);
|
||||
} else {
|
||||
DRM_ERROR("strange pci_resource_len %08lx\n",
|
||||
drm_get_resource_len(dev, 1));
|
||||
|
@ -648,16 +653,21 @@ int savage_preinit(drm_device_t * dev, unsigned long chipset)
|
|||
/*
|
||||
* Delete MTRRs and free device-private data.
|
||||
*/
|
||||
int savage_postcleanup(drm_device_t * dev)
|
||||
void savage_driver_lastclose(drm_device_t *dev)
|
||||
{
|
||||
drm_savage_private_t *dev_priv = dev->dev_private;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < 3; ++i)
|
||||
if (dev_priv->mtrr[i].handle >= 0)
|
||||
mtrr_del(dev_priv->mtrr[i].handle,
|
||||
drm_mtrr_del(dev_priv->mtrr[i].handle,
|
||||
dev_priv->mtrr[i].base,
|
||||
dev_priv->mtrr[i].size);
|
||||
dev_priv->mtrr[i].size, DRM_MTRR_WC);
|
||||
}
|
||||
|
||||
int savage_driver_unload(drm_device_t *dev)
|
||||
{
|
||||
drm_savage_private_t *dev_priv = dev->dev_private;
|
||||
|
||||
drm_free(dev_priv, sizeof(drm_savage_private_t), DRM_MEM_DRIVER);
|
||||
|
||||
|
@ -994,8 +1004,7 @@ static int savage_bci_event_wait(DRM_IOCTL_ARGS)
|
|||
* DMA buffer management
|
||||
*/
|
||||
|
||||
static int savage_bci_get_buffers(DRMFILE filp, drm_device_t * dev,
|
||||
drm_dma_t * d)
|
||||
static int savage_bci_get_buffers(DRMFILE filp, drm_device_t *dev, drm_dma_t *d)
|
||||
{
|
||||
drm_buf_t *buf;
|
||||
int i;
|
||||
|
@ -1057,7 +1066,7 @@ int savage_bci_buffers(DRM_IOCTL_ARGS)
|
|||
return ret;
|
||||
}
|
||||
|
||||
void savage_reclaim_buffers(drm_device_t * dev, DRMFILE filp)
|
||||
void savage_reclaim_buffers(drm_device_t *dev, DRMFILE filp)
|
||||
{
|
||||
drm_device_dma_t *dma = dev->dma;
|
||||
drm_savage_private_t *dev_priv = dev->dev_private;
|
||||
|
@ -1090,10 +1099,10 @@ void savage_reclaim_buffers(drm_device_t * dev, DRMFILE filp)
|
|||
}
|
||||
|
||||
drm_ioctl_desc_t savage_ioctls[] = {
|
||||
[DRM_IOCTL_NR(DRM_SAVAGE_BCI_INIT)] = {savage_bci_init, 1, 1},
|
||||
[DRM_IOCTL_NR(DRM_SAVAGE_BCI_CMDBUF)] = {savage_bci_cmdbuf, 1, 0},
|
||||
[DRM_IOCTL_NR(DRM_SAVAGE_BCI_EVENT_EMIT)] = {savage_bci_event_emit, 1, 0},
|
||||
[DRM_IOCTL_NR(DRM_SAVAGE_BCI_EVENT_WAIT)] = {savage_bci_event_wait, 1, 0},
|
||||
[DRM_IOCTL_NR(DRM_SAVAGE_BCI_INIT)] = {savage_bci_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
|
||||
[DRM_IOCTL_NR(DRM_SAVAGE_BCI_CMDBUF)] = {savage_bci_cmdbuf, DRM_AUTH},
|
||||
[DRM_IOCTL_NR(DRM_SAVAGE_BCI_EVENT_EMIT)] = {savage_bci_event_emit, DRM_AUTH},
|
||||
[DRM_IOCTL_NR(DRM_SAVAGE_BCI_EVENT_WAIT)] = {savage_bci_event_wait, DRM_AUTH},
|
||||
};
|
||||
|
||||
int savage_max_ioctl = DRM_ARRAY_SIZE(savage_ioctls);
|
||||
|
|
|
@ -30,31 +30,6 @@
|
|||
|
||||
#include "drm_pciids.h"
|
||||
|
||||
static int postinit(struct drm_device *dev, unsigned long flags)
|
||||
{
|
||||
DRM_INFO("Initialized %s %d.%d.%d %s on minor %d: %s\n",
|
||||
DRIVER_NAME,
|
||||
DRIVER_MAJOR,
|
||||
DRIVER_MINOR,
|
||||
DRIVER_PATCHLEVEL,
|
||||
DRIVER_DATE, dev->primary.minor, pci_pretty_name(dev->pdev)
|
||||
);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int version(drm_version_t * version)
|
||||
{
|
||||
int len;
|
||||
|
||||
version->version_major = DRIVER_MAJOR;
|
||||
version->version_minor = DRIVER_MINOR;
|
||||
version->version_patchlevel = DRIVER_PATCHLEVEL;
|
||||
DRM_COPY(version->name, DRIVER_NAME);
|
||||
DRM_COPY(version->date, DRIVER_DATE);
|
||||
DRM_COPY(version->desc, DRIVER_DESC);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct pci_device_id pciidlist[] = {
|
||||
savage_PCI_IDS
|
||||
};
|
||||
|
@ -63,13 +38,13 @@ static struct drm_driver driver = {
|
|||
.driver_features =
|
||||
DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_HAVE_DMA | DRIVER_PCI_DMA,
|
||||
.dev_priv_size = sizeof(drm_savage_buf_priv_t),
|
||||
.preinit = savage_preinit,
|
||||
.postinit = postinit,
|
||||
.postcleanup = savage_postcleanup,
|
||||
.load = savage_driver_load,
|
||||
.firstopen = savage_driver_firstopen,
|
||||
.lastclose = savage_driver_lastclose,
|
||||
.unload = savage_driver_unload,
|
||||
.reclaim_buffers = savage_reclaim_buffers,
|
||||
.get_map_ofs = drm_core_get_map_ofs,
|
||||
.get_reg_ofs = drm_core_get_reg_ofs,
|
||||
.version = version,
|
||||
.ioctls = savage_ioctls,
|
||||
.dma_ioctl = savage_bci_buffers,
|
||||
.fops = {
|
||||
|
@ -80,12 +55,19 @@ static struct drm_driver driver = {
|
|||
.mmap = drm_mmap,
|
||||
.poll = drm_poll,
|
||||
.fasync = drm_fasync,
|
||||
}
|
||||
,
|
||||
},
|
||||
|
||||
.pci_driver = {
|
||||
.name = DRIVER_NAME,
|
||||
.id_table = pciidlist,
|
||||
}
|
||||
.name = DRIVER_NAME,
|
||||
.id_table = pciidlist,
|
||||
},
|
||||
|
||||
.name = DRIVER_NAME,
|
||||
.desc = DRIVER_DESC,
|
||||
.date = DRIVER_DATE,
|
||||
.major = DRIVER_MAJOR,
|
||||
.minor = DRIVER_MINOR,
|
||||
.patchlevel = DRIVER_PATCHLEVEL,
|
||||
};
|
||||
|
||||
static int __init savage_init(void)
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/* savage_drv.h -- Private header for the savage driver
|
||||
*
|
||||
/* savage_drv.h -- Private header for the savage driver */
|
||||
/*
|
||||
* Copyright 2004 Felix Kuehling
|
||||
* All Rights Reserved.
|
||||
*
|
||||
|
@ -192,7 +192,7 @@ typedef struct drm_savage_private {
|
|||
/* Err, there is a macro wait_event in include/linux/wait.h.
|
||||
* Avoid unwanted macro expansion. */
|
||||
void (*emit_clip_rect) (struct drm_savage_private * dev_priv,
|
||||
drm_clip_rect_t * pbox);
|
||||
const drm_clip_rect_t * pbox);
|
||||
void (*dma_flush) (struct drm_savage_private * dev_priv);
|
||||
} drm_savage_private_t;
|
||||
|
||||
|
@ -208,16 +208,18 @@ extern void savage_dma_reset(drm_savage_private_t * dev_priv);
|
|||
extern void savage_dma_wait(drm_savage_private_t * dev_priv, unsigned int page);
|
||||
extern uint32_t *savage_dma_alloc(drm_savage_private_t * dev_priv,
|
||||
unsigned int n);
|
||||
extern int savage_preinit(drm_device_t * dev, unsigned long chipset);
|
||||
extern int savage_postcleanup(drm_device_t * dev);
|
||||
extern int savage_driver_load(drm_device_t *dev, unsigned long chipset);
|
||||
extern int savage_driver_firstopen(drm_device_t *dev);
|
||||
extern void savage_driver_lastclose(drm_device_t *dev);
|
||||
extern int savage_driver_unload(drm_device_t *dev);
|
||||
extern int savage_do_cleanup_bci(drm_device_t * dev);
|
||||
extern void savage_reclaim_buffers(drm_device_t * dev, DRMFILE filp);
|
||||
|
||||
/* state functions */
|
||||
extern void savage_emit_clip_rect_s3d(drm_savage_private_t * dev_priv,
|
||||
drm_clip_rect_t * pbox);
|
||||
const drm_clip_rect_t * pbox);
|
||||
extern void savage_emit_clip_rect_s4(drm_savage_private_t * dev_priv,
|
||||
drm_clip_rect_t * pbox);
|
||||
const drm_clip_rect_t * pbox);
|
||||
|
||||
#define SAVAGE_FB_SIZE_S3 0x01000000 /* 16MB */
|
||||
#define SAVAGE_FB_SIZE_S4 0x02000000 /* 32MB */
|
||||
|
@ -500,15 +502,6 @@ extern void savage_emit_clip_rect_s4(drm_savage_private_t * dev_priv,
|
|||
|
||||
#define BCI_WRITE( val ) *bci_ptr++ = (uint32_t)(val)
|
||||
|
||||
#define BCI_COPY_FROM_USER(src,n) do { \
|
||||
unsigned int i; \
|
||||
for (i = 0; i < n; ++i) { \
|
||||
uint32_t val; \
|
||||
DRM_GET_USER_UNCHECKED(val, &((uint32_t*)(src))[i]); \
|
||||
BCI_WRITE(val); \
|
||||
} \
|
||||
} while(0)
|
||||
|
||||
/*
|
||||
* command DMA support
|
||||
*/
|
||||
|
@ -534,8 +527,8 @@ extern void savage_emit_clip_rect_s4(drm_savage_private_t * dev_priv,
|
|||
|
||||
#define DMA_WRITE( val ) *dma_ptr++ = (uint32_t)(val)
|
||||
|
||||
#define DMA_COPY_FROM_USER(src,n) do { \
|
||||
DRM_COPY_FROM_USER_UNCHECKED(dma_ptr, (src), (n)*4); \
|
||||
#define DMA_COPY(src, n) do { \
|
||||
memcpy(dma_ptr, (src), (n)*4); \
|
||||
dma_ptr += n; \
|
||||
} while(0)
|
||||
|
||||
|
|
|
@ -27,7 +27,7 @@
|
|||
#include "savage_drv.h"
|
||||
|
||||
void savage_emit_clip_rect_s3d(drm_savage_private_t * dev_priv,
|
||||
drm_clip_rect_t * pbox)
|
||||
const drm_clip_rect_t * pbox)
|
||||
{
|
||||
uint32_t scstart = dev_priv->state.s3d.new_scstart;
|
||||
uint32_t scend = dev_priv->state.s3d.new_scend;
|
||||
|
@ -53,7 +53,7 @@ void savage_emit_clip_rect_s3d(drm_savage_private_t * dev_priv,
|
|||
}
|
||||
|
||||
void savage_emit_clip_rect_s4(drm_savage_private_t * dev_priv,
|
||||
drm_clip_rect_t * pbox)
|
||||
const drm_clip_rect_t * pbox)
|
||||
{
|
||||
uint32_t drawctrl0 = dev_priv->state.s4.new_drawctrl0;
|
||||
uint32_t drawctrl1 = dev_priv->state.s4.new_drawctrl1;
|
||||
|
@ -115,18 +115,19 @@ static int savage_verify_texaddr(drm_savage_private_t * dev_priv, int unit,
|
|||
|
||||
#define SAVE_STATE(reg,where) \
|
||||
if(start <= reg && start+count > reg) \
|
||||
DRM_GET_USER_UNCHECKED(dev_priv->state.where, ®s[reg-start])
|
||||
dev_priv->state.where = regs[reg - start]
|
||||
#define SAVE_STATE_MASK(reg,where,mask) do { \
|
||||
if(start <= reg && start+count > reg) { \
|
||||
uint32_t tmp; \
|
||||
DRM_GET_USER_UNCHECKED(tmp, ®s[reg-start]); \
|
||||
tmp = regs[reg - start]; \
|
||||
dev_priv->state.where = (tmp & (mask)) | \
|
||||
(dev_priv->state.where & ~(mask)); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
static int savage_verify_state_s3d(drm_savage_private_t * dev_priv,
|
||||
unsigned int start, unsigned int count,
|
||||
const uint32_t __user * regs)
|
||||
const uint32_t *regs)
|
||||
{
|
||||
if (start < SAVAGE_TEXPALADDR_S3D ||
|
||||
start + count - 1 > SAVAGE_DESTTEXRWWATERMARK_S3D) {
|
||||
|
@ -148,8 +149,7 @@ static int savage_verify_state_s3d(drm_savage_private_t * dev_priv,
|
|||
SAVE_STATE(SAVAGE_TEXADDR_S3D, s3d.texaddr);
|
||||
if (dev_priv->state.s3d.texctrl & SAVAGE_TEXCTRL_TEXEN_MASK)
|
||||
return savage_verify_texaddr(dev_priv, 0,
|
||||
dev_priv->state.s3d.
|
||||
texaddr);
|
||||
dev_priv->state.s3d.texaddr);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -157,7 +157,7 @@ static int savage_verify_state_s3d(drm_savage_private_t * dev_priv,
|
|||
|
||||
static int savage_verify_state_s4(drm_savage_private_t * dev_priv,
|
||||
unsigned int start, unsigned int count,
|
||||
const uint32_t __user * regs)
|
||||
const uint32_t *regs)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
|
@ -174,19 +174,18 @@ static int savage_verify_state_s4(drm_savage_private_t * dev_priv,
|
|||
~SAVAGE_SCISSOR_MASK_S4);
|
||||
|
||||
/* if any texture regs were changed ... */
|
||||
if (start <= SAVAGE_TEXDESCR_S4 && start + count > SAVAGE_TEXPALADDR_S4) {
|
||||
if (start <= SAVAGE_TEXDESCR_S4 &&
|
||||
start + count > SAVAGE_TEXPALADDR_S4) {
|
||||
/* ... check texture state */
|
||||
SAVE_STATE(SAVAGE_TEXDESCR_S4, s4.texdescr);
|
||||
SAVE_STATE(SAVAGE_TEXADDR0_S4, s4.texaddr0);
|
||||
SAVE_STATE(SAVAGE_TEXADDR1_S4, s4.texaddr1);
|
||||
if (dev_priv->state.s4.texdescr & SAVAGE_TEXDESCR_TEX0EN_MASK)
|
||||
ret |=
|
||||
savage_verify_texaddr(dev_priv, 0,
|
||||
dev_priv->state.s4.texaddr0);
|
||||
ret |= savage_verify_texaddr(dev_priv, 0,
|
||||
dev_priv->state.s4.texaddr0);
|
||||
if (dev_priv->state.s4.texdescr & SAVAGE_TEXDESCR_TEX1EN_MASK)
|
||||
ret |=
|
||||
savage_verify_texaddr(dev_priv, 1,
|
||||
dev_priv->state.s4.texaddr1);
|
||||
ret |= savage_verify_texaddr(dev_priv, 1,
|
||||
dev_priv->state.s4.texaddr1);
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
@ -197,7 +196,7 @@ static int savage_verify_state_s4(drm_savage_private_t * dev_priv,
|
|||
|
||||
static int savage_dispatch_state(drm_savage_private_t * dev_priv,
|
||||
const drm_savage_cmd_header_t * cmd_header,
|
||||
const uint32_t __user * regs)
|
||||
const uint32_t *regs)
|
||||
{
|
||||
unsigned int count = cmd_header->state.count;
|
||||
unsigned int start = cmd_header->state.start;
|
||||
|
@ -209,9 +208,6 @@ static int savage_dispatch_state(drm_savage_private_t * dev_priv,
|
|||
if (!count)
|
||||
return 0;
|
||||
|
||||
if (DRM_VERIFYAREA_READ(regs, count * 4))
|
||||
return DRM_ERR(EFAULT);
|
||||
|
||||
if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
|
||||
ret = savage_verify_state_s3d(dev_priv, start, count, regs);
|
||||
if (ret != 0)
|
||||
|
@ -236,8 +232,8 @@ static int savage_dispatch_state(drm_savage_private_t * dev_priv,
|
|||
/* scissor regs are emitted in savage_dispatch_draw */
|
||||
if (start < SAVAGE_DRAWCTRL0_S4) {
|
||||
if (start + count > SAVAGE_DRAWCTRL1_S4 + 1)
|
||||
count2 =
|
||||
count - (SAVAGE_DRAWCTRL1_S4 + 1 - start);
|
||||
count2 = count -
|
||||
(SAVAGE_DRAWCTRL1_S4 + 1 - start);
|
||||
if (start + count > SAVAGE_DRAWCTRL0_S4)
|
||||
count = SAVAGE_DRAWCTRL0_S4 - start;
|
||||
} else if (start <= SAVAGE_DRAWCTRL1_S4) {
|
||||
|
@ -263,7 +259,7 @@ static int savage_dispatch_state(drm_savage_private_t * dev_priv,
|
|||
while (count > 0) {
|
||||
unsigned int n = count < 255 ? count : 255;
|
||||
DMA_SET_REGISTERS(start, n);
|
||||
DMA_COPY_FROM_USER(regs, n);
|
||||
DMA_COPY(regs, n);
|
||||
count -= n;
|
||||
start += n;
|
||||
regs += n;
|
||||
|
@ -421,8 +417,8 @@ static int savage_dispatch_dma_prim(drm_savage_private_t * dev_priv,
|
|||
|
||||
static int savage_dispatch_vb_prim(drm_savage_private_t * dev_priv,
|
||||
const drm_savage_cmd_header_t * cmd_header,
|
||||
const uint32_t __user * vtxbuf,
|
||||
unsigned int vb_size, unsigned int vb_stride)
|
||||
const uint32_t *vtxbuf, unsigned int vb_size,
|
||||
unsigned int vb_stride)
|
||||
{
|
||||
unsigned char reorder = 0;
|
||||
unsigned int prim = cmd_header->prim.prim;
|
||||
|
@ -507,8 +503,7 @@ static int savage_dispatch_vb_prim(drm_savage_private_t * dev_priv,
|
|||
|
||||
for (i = start; i < start + count; ++i) {
|
||||
unsigned int j = i + reorder[i % 3];
|
||||
DMA_COPY_FROM_USER(&vtxbuf[vb_stride * j],
|
||||
vtx_size);
|
||||
DMA_COPY(&vtxbuf[vb_stride * j], vtx_size);
|
||||
}
|
||||
|
||||
DMA_COMMIT();
|
||||
|
@ -517,13 +512,12 @@ static int savage_dispatch_vb_prim(drm_savage_private_t * dev_priv,
|
|||
DMA_DRAW_PRIMITIVE(count, prim, skip);
|
||||
|
||||
if (vb_stride == vtx_size) {
|
||||
DMA_COPY_FROM_USER(&vtxbuf[vb_stride * start],
|
||||
vtx_size * count);
|
||||
DMA_COPY(&vtxbuf[vb_stride * start],
|
||||
vtx_size * count);
|
||||
} else {
|
||||
for (i = start; i < start + count; ++i) {
|
||||
DMA_COPY_FROM_USER(&vtxbuf
|
||||
[vb_stride * i],
|
||||
vtx_size);
|
||||
DMA_COPY(&vtxbuf [vb_stride * i],
|
||||
vtx_size);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -541,7 +535,7 @@ static int savage_dispatch_vb_prim(drm_savage_private_t * dev_priv,
|
|||
|
||||
static int savage_dispatch_dma_idx(drm_savage_private_t * dev_priv,
|
||||
const drm_savage_cmd_header_t * cmd_header,
|
||||
const uint16_t __user * usr_idx,
|
||||
const uint16_t *idx,
|
||||
const drm_buf_t * dmabuf)
|
||||
{
|
||||
unsigned char reorder = 0;
|
||||
|
@ -628,11 +622,8 @@ static int savage_dispatch_dma_idx(drm_savage_private_t * dev_priv,
|
|||
while (n != 0) {
|
||||
/* Can emit up to 255 indices (85 triangles) at once. */
|
||||
unsigned int count = n > 255 ? 255 : n;
|
||||
/* Is it ok to allocate 510 bytes on the stack in an ioctl? */
|
||||
uint16_t idx[255];
|
||||
|
||||
/* Copy and check indices */
|
||||
DRM_COPY_FROM_USER_UNCHECKED(idx, usr_idx, count * 2);
|
||||
/* check indices */
|
||||
for (i = 0; i < count; ++i) {
|
||||
if (idx[i] > dmabuf->total / 32) {
|
||||
DRM_ERROR("idx[%u]=%u out of range (0-%u)\n",
|
||||
|
@ -652,8 +643,8 @@ static int savage_dispatch_dma_idx(drm_savage_private_t * dev_priv,
|
|||
|
||||
for (i = 1; i + 1 < count; i += 2)
|
||||
BCI_WRITE(idx[i + reorder[i % 3]] |
|
||||
(idx[i + 1 + reorder[(i + 1) % 3]] <<
|
||||
16));
|
||||
(idx[i + 1 +
|
||||
reorder[(i + 1) % 3]] << 16));
|
||||
if (i < count)
|
||||
BCI_WRITE(idx[i + reorder[i % 3]]);
|
||||
} else if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
|
||||
|
@ -674,7 +665,7 @@ static int savage_dispatch_dma_idx(drm_savage_private_t * dev_priv,
|
|||
BCI_WRITE(idx[i]);
|
||||
}
|
||||
|
||||
usr_idx += count;
|
||||
idx += count;
|
||||
n -= count;
|
||||
|
||||
prim |= BCI_CMD_DRAW_CONT;
|
||||
|
@ -685,8 +676,8 @@ static int savage_dispatch_dma_idx(drm_savage_private_t * dev_priv,
|
|||
|
||||
static int savage_dispatch_vb_idx(drm_savage_private_t * dev_priv,
|
||||
const drm_savage_cmd_header_t * cmd_header,
|
||||
const uint16_t __user * usr_idx,
|
||||
const uint32_t __user * vtxbuf,
|
||||
const uint16_t *idx,
|
||||
const uint32_t *vtxbuf,
|
||||
unsigned int vb_size, unsigned int vb_stride)
|
||||
{
|
||||
unsigned char reorder = 0;
|
||||
|
@ -751,11 +742,8 @@ static int savage_dispatch_vb_idx(drm_savage_private_t * dev_priv,
|
|||
while (n != 0) {
|
||||
/* Can emit up to 255 vertices (85 triangles) at once. */
|
||||
unsigned int count = n > 255 ? 255 : n;
|
||||
/* Is it ok to allocate 510 bytes on the stack in an ioctl? */
|
||||
uint16_t idx[255];
|
||||
|
||||
/* Copy and check indices */
|
||||
DRM_COPY_FROM_USER_UNCHECKED(idx, usr_idx, count * 2);
|
||||
|
||||
/* Check indices */
|
||||
for (i = 0; i < count; ++i) {
|
||||
if (idx[i] > vb_size / (vb_stride * 4)) {
|
||||
DRM_ERROR("idx[%u]=%u out of range (0-%u)\n",
|
||||
|
@ -775,8 +763,7 @@ static int savage_dispatch_vb_idx(drm_savage_private_t * dev_priv,
|
|||
|
||||
for (i = 0; i < count; ++i) {
|
||||
unsigned int j = idx[i + reorder[i % 3]];
|
||||
DMA_COPY_FROM_USER(&vtxbuf[vb_stride * j],
|
||||
vtx_size);
|
||||
DMA_COPY(&vtxbuf[vb_stride * j], vtx_size);
|
||||
}
|
||||
|
||||
DMA_COMMIT();
|
||||
|
@ -786,14 +773,13 @@ static int savage_dispatch_vb_idx(drm_savage_private_t * dev_priv,
|
|||
|
||||
for (i = 0; i < count; ++i) {
|
||||
unsigned int j = idx[i];
|
||||
DMA_COPY_FROM_USER(&vtxbuf[vb_stride * j],
|
||||
vtx_size);
|
||||
DMA_COPY(&vtxbuf[vb_stride * j], vtx_size);
|
||||
}
|
||||
|
||||
DMA_COMMIT();
|
||||
}
|
||||
|
||||
usr_idx += count;
|
||||
idx += count;
|
||||
n -= count;
|
||||
|
||||
prim |= BCI_CMD_DRAW_CONT;
|
||||
|
@ -804,11 +790,11 @@ static int savage_dispatch_vb_idx(drm_savage_private_t * dev_priv,
|
|||
|
||||
static int savage_dispatch_clear(drm_savage_private_t * dev_priv,
|
||||
const drm_savage_cmd_header_t * cmd_header,
|
||||
const drm_savage_cmd_header_t __user * data,
|
||||
const drm_savage_cmd_header_t *data,
|
||||
unsigned int nbox,
|
||||
const drm_clip_rect_t __user * usr_boxes)
|
||||
const drm_clip_rect_t *boxes)
|
||||
{
|
||||
unsigned int flags = cmd_header->clear0.flags, mask, value;
|
||||
unsigned int flags = cmd_header->clear0.flags;
|
||||
unsigned int clear_cmd;
|
||||
unsigned int i, nbufs;
|
||||
DMA_LOCALS;
|
||||
|
@ -816,9 +802,6 @@ static int savage_dispatch_clear(drm_savage_private_t * dev_priv,
|
|||
if (nbox == 0)
|
||||
return 0;
|
||||
|
||||
DRM_GET_USER_UNCHECKED(mask, &data->clear1.mask);
|
||||
DRM_GET_USER_UNCHECKED(value, &data->clear1.value);
|
||||
|
||||
clear_cmd = BCI_CMD_RECT | BCI_CMD_RECT_XP | BCI_CMD_RECT_YP |
|
||||
BCI_CMD_SEND_COLOR | BCI_CMD_DEST_PBD_NEW;
|
||||
BCI_CMD_SET_ROP(clear_cmd, 0xCC);
|
||||
|
@ -828,21 +811,19 @@ static int savage_dispatch_clear(drm_savage_private_t * dev_priv,
|
|||
if (nbufs == 0)
|
||||
return 0;
|
||||
|
||||
if (mask != 0xffffffff) {
|
||||
if (data->clear1.mask != 0xffffffff) {
|
||||
/* set mask */
|
||||
BEGIN_DMA(2);
|
||||
DMA_SET_REGISTERS(SAVAGE_BITPLANEWTMASK, 1);
|
||||
DMA_WRITE(mask);
|
||||
DMA_WRITE(data->clear1.mask);
|
||||
DMA_COMMIT();
|
||||
}
|
||||
for (i = 0; i < nbox; ++i) {
|
||||
drm_clip_rect_t box;
|
||||
unsigned int x, y, w, h;
|
||||
unsigned int buf;
|
||||
DRM_COPY_FROM_USER_UNCHECKED(&box, &usr_boxes[i], sizeof(box));
|
||||
x = box.x1, y = box.y1;
|
||||
w = box.x2 - box.x1;
|
||||
h = box.y2 - box.y1;
|
||||
x = boxes[i].x1, y = boxes[i].y1;
|
||||
w = boxes[i].x2 - boxes[i].x1;
|
||||
h = boxes[i].y2 - boxes[i].y1;
|
||||
BEGIN_DMA(nbufs * 6);
|
||||
for (buf = SAVAGE_FRONT; buf <= SAVAGE_DEPTH; buf <<= 1) {
|
||||
if (!(flags & buf))
|
||||
|
@ -862,13 +843,13 @@ static int savage_dispatch_clear(drm_savage_private_t * dev_priv,
|
|||
DMA_WRITE(dev_priv->depth_bd);
|
||||
break;
|
||||
}
|
||||
DMA_WRITE(value);
|
||||
DMA_WRITE(data->clear1.value);
|
||||
DMA_WRITE(BCI_X_Y(x, y));
|
||||
DMA_WRITE(BCI_W_H(w, h));
|
||||
}
|
||||
DMA_COMMIT();
|
||||
}
|
||||
if (mask != 0xffffffff) {
|
||||
if (data->clear1.mask != 0xffffffff) {
|
||||
/* reset mask */
|
||||
BEGIN_DMA(2);
|
||||
DMA_SET_REGISTERS(SAVAGE_BITPLANEWTMASK, 1);
|
||||
|
@ -880,8 +861,7 @@ static int savage_dispatch_clear(drm_savage_private_t * dev_priv,
|
|||
}
|
||||
|
||||
static int savage_dispatch_swap(drm_savage_private_t * dev_priv,
|
||||
unsigned int nbox,
|
||||
const drm_clip_rect_t __user * usr_boxes)
|
||||
unsigned int nbox, const drm_clip_rect_t *boxes)
|
||||
{
|
||||
unsigned int swap_cmd;
|
||||
unsigned int i;
|
||||
|
@ -895,16 +875,14 @@ static int savage_dispatch_swap(drm_savage_private_t * dev_priv,
|
|||
BCI_CMD_SET_ROP(swap_cmd, 0xCC);
|
||||
|
||||
for (i = 0; i < nbox; ++i) {
|
||||
drm_clip_rect_t box;
|
||||
DRM_COPY_FROM_USER_UNCHECKED(&box, &usr_boxes[i], sizeof(box));
|
||||
|
||||
BEGIN_DMA(6);
|
||||
DMA_WRITE(swap_cmd);
|
||||
DMA_WRITE(dev_priv->back_offset);
|
||||
DMA_WRITE(dev_priv->back_bd);
|
||||
DMA_WRITE(BCI_X_Y(box.x1, box.y1));
|
||||
DMA_WRITE(BCI_X_Y(box.x1, box.y1));
|
||||
DMA_WRITE(BCI_W_H(box.x2 - box.x1, box.y2 - box.y1));
|
||||
DMA_WRITE(BCI_X_Y(boxes[i].x1, boxes[i].y1));
|
||||
DMA_WRITE(BCI_X_Y(boxes[i].x1, boxes[i].y1));
|
||||
DMA_WRITE(BCI_W_H(boxes[i].x2 - boxes[i].x1,
|
||||
boxes[i].y2 - boxes[i].y1));
|
||||
DMA_COMMIT();
|
||||
}
|
||||
|
||||
|
@ -912,68 +890,52 @@ static int savage_dispatch_swap(drm_savage_private_t * dev_priv,
|
|||
}
|
||||
|
||||
static int savage_dispatch_draw(drm_savage_private_t * dev_priv,
|
||||
const drm_savage_cmd_header_t __user * start,
|
||||
const drm_savage_cmd_header_t __user * end,
|
||||
const drm_savage_cmd_header_t *start,
|
||||
const drm_savage_cmd_header_t *end,
|
||||
const drm_buf_t * dmabuf,
|
||||
const unsigned int __user * usr_vtxbuf,
|
||||
const unsigned int *vtxbuf,
|
||||
unsigned int vb_size, unsigned int vb_stride,
|
||||
unsigned int nbox,
|
||||
const drm_clip_rect_t __user * usr_boxes)
|
||||
const drm_clip_rect_t *boxes)
|
||||
{
|
||||
unsigned int i, j;
|
||||
int ret;
|
||||
|
||||
for (i = 0; i < nbox; ++i) {
|
||||
drm_clip_rect_t box;
|
||||
const drm_savage_cmd_header_t __user *usr_cmdbuf;
|
||||
DRM_COPY_FROM_USER_UNCHECKED(&box, &usr_boxes[i], sizeof(box));
|
||||
dev_priv->emit_clip_rect(dev_priv, &box);
|
||||
const drm_savage_cmd_header_t *cmdbuf;
|
||||
dev_priv->emit_clip_rect(dev_priv, &boxes[i]);
|
||||
|
||||
usr_cmdbuf = start;
|
||||
while (usr_cmdbuf < end) {
|
||||
cmdbuf = start;
|
||||
while (cmdbuf < end) {
|
||||
drm_savage_cmd_header_t cmd_header;
|
||||
DRM_COPY_FROM_USER_UNCHECKED(&cmd_header, usr_cmdbuf,
|
||||
sizeof(cmd_header));
|
||||
usr_cmdbuf++;
|
||||
cmd_header = *cmdbuf;
|
||||
cmdbuf++;
|
||||
switch (cmd_header.cmd.cmd) {
|
||||
case SAVAGE_CMD_DMA_PRIM:
|
||||
ret =
|
||||
savage_dispatch_dma_prim(dev_priv,
|
||||
&cmd_header,
|
||||
dmabuf);
|
||||
ret = savage_dispatch_dma_prim(
|
||||
dev_priv, &cmd_header, dmabuf);
|
||||
break;
|
||||
case SAVAGE_CMD_VB_PRIM:
|
||||
ret =
|
||||
savage_dispatch_vb_prim(dev_priv,
|
||||
&cmd_header,
|
||||
(const uint32_t
|
||||
__user *)
|
||||
usr_vtxbuf, vb_size,
|
||||
vb_stride);
|
||||
ret = savage_dispatch_vb_prim(
|
||||
dev_priv, &cmd_header,
|
||||
vtxbuf, vb_size, vb_stride);
|
||||
break;
|
||||
case SAVAGE_CMD_DMA_IDX:
|
||||
j = (cmd_header.idx.count + 3) / 4;
|
||||
/* j was check in savage_bci_cmdbuf */
|
||||
ret =
|
||||
savage_dispatch_dma_idx(dev_priv,
|
||||
&cmd_header,
|
||||
(const uint16_t
|
||||
__user *)
|
||||
usr_cmdbuf, dmabuf);
|
||||
usr_cmdbuf += j;
|
||||
ret = savage_dispatch_dma_idx(dev_priv,
|
||||
&cmd_header, (const uint16_t *)cmdbuf,
|
||||
dmabuf);
|
||||
cmdbuf += j;
|
||||
break;
|
||||
case SAVAGE_CMD_VB_IDX:
|
||||
j = (cmd_header.idx.count + 3) / 4;
|
||||
/* j was check in savage_bci_cmdbuf */
|
||||
ret =
|
||||
savage_dispatch_vb_idx(dev_priv,
|
||||
&cmd_header,
|
||||
(const uint16_t
|
||||
__user *)usr_cmdbuf,
|
||||
(const uint32_t
|
||||
__user *)usr_vtxbuf,
|
||||
vb_size, vb_stride);
|
||||
usr_cmdbuf += j;
|
||||
ret = savage_dispatch_vb_idx(dev_priv,
|
||||
&cmd_header, (const uint16_t *)cmdbuf,
|
||||
(const uint32_t *)vtxbuf, vb_size,
|
||||
vb_stride);
|
||||
cmdbuf += j;
|
||||
break;
|
||||
default:
|
||||
/* What's the best return code? EFAULT? */
|
||||
|
@ -998,10 +960,10 @@ int savage_bci_cmdbuf(DRM_IOCTL_ARGS)
|
|||
drm_device_dma_t *dma = dev->dma;
|
||||
drm_buf_t *dmabuf;
|
||||
drm_savage_cmdbuf_t cmdbuf;
|
||||
drm_savage_cmd_header_t __user *usr_cmdbuf;
|
||||
drm_savage_cmd_header_t __user *first_draw_cmd;
|
||||
unsigned int __user *usr_vtxbuf;
|
||||
drm_clip_rect_t __user *usr_boxes;
|
||||
drm_savage_cmd_header_t *kcmd_addr = NULL;
|
||||
drm_savage_cmd_header_t *first_draw_cmd;
|
||||
unsigned int *kvb_addr = NULL;
|
||||
drm_clip_rect_t *kbox_addr = NULL;
|
||||
unsigned int i, j;
|
||||
int ret = 0;
|
||||
|
||||
|
@ -1024,15 +986,53 @@ int savage_bci_cmdbuf(DRM_IOCTL_ARGS)
|
|||
dmabuf = NULL;
|
||||
}
|
||||
|
||||
usr_cmdbuf = (drm_savage_cmd_header_t __user *) cmdbuf.cmd_addr;
|
||||
usr_vtxbuf = (unsigned int __user *)cmdbuf.vb_addr;
|
||||
usr_boxes = (drm_clip_rect_t __user *) cmdbuf.box_addr;
|
||||
if ((cmdbuf.size && DRM_VERIFYAREA_READ(usr_cmdbuf, cmdbuf.size * 8)) ||
|
||||
(cmdbuf.vb_size && DRM_VERIFYAREA_READ(usr_vtxbuf, cmdbuf.vb_size))
|
||||
|| (cmdbuf.nbox
|
||||
&& DRM_VERIFYAREA_READ(usr_boxes,
|
||||
cmdbuf.nbox * sizeof(drm_clip_rect_t))))
|
||||
return DRM_ERR(EFAULT);
|
||||
/* Copy the user buffers into kernel temporary areas. This hasn't been
|
||||
* a performance loss compared to VERIFYAREA_READ/
|
||||
* COPY_FROM_USER_UNCHECKED when done in other drivers, and is correct
|
||||
* for locking on FreeBSD.
|
||||
*/
|
||||
if (cmdbuf.size) {
|
||||
kcmd_addr = drm_alloc(cmdbuf.size * 8, DRM_MEM_DRIVER);
|
||||
if (kcmd_addr == NULL)
|
||||
return ENOMEM;
|
||||
|
||||
if (DRM_COPY_FROM_USER(kcmd_addr, cmdbuf.cmd_addr,
|
||||
cmdbuf.size * 8))
|
||||
{
|
||||
drm_free(kcmd_addr, cmdbuf.size * 8, DRM_MEM_DRIVER);
|
||||
return DRM_ERR(EFAULT);
|
||||
}
|
||||
cmdbuf.cmd_addr = kcmd_addr;
|
||||
}
|
||||
if (cmdbuf.vb_size) {
|
||||
kvb_addr = drm_alloc(cmdbuf.vb_size, DRM_MEM_DRIVER);
|
||||
if (kvb_addr == NULL) {
|
||||
ret = DRM_ERR(ENOMEM);
|
||||
goto done;
|
||||
}
|
||||
|
||||
if (DRM_COPY_FROM_USER(kvb_addr, cmdbuf.vb_addr,
|
||||
cmdbuf.vb_size)) {
|
||||
ret = DRM_ERR(EFAULT);
|
||||
goto done;
|
||||
}
|
||||
cmdbuf.vb_addr = kvb_addr;
|
||||
}
|
||||
if (cmdbuf.nbox) {
|
||||
kbox_addr = drm_alloc(cmdbuf.nbox * sizeof(drm_clip_rect_t),
|
||||
DRM_MEM_DRIVER);
|
||||
if (kbox_addr == NULL) {
|
||||
ret = DRM_ERR(ENOMEM);
|
||||
goto done;
|
||||
}
|
||||
|
||||
if (DRM_COPY_FROM_USER(kbox_addr, cmdbuf.box_addr,
|
||||
cmdbuf.nbox * sizeof(drm_clip_rect_t))) {
|
||||
ret = DRM_ERR(EFAULT);
|
||||
goto done;
|
||||
}
|
||||
cmdbuf.box_addr = kbox_addr;
|
||||
}
|
||||
|
||||
/* Make sure writes to DMA buffers are finished before sending
|
||||
* DMA commands to the graphics hardware. */
|
||||
|
@ -1046,9 +1046,8 @@ int savage_bci_cmdbuf(DRM_IOCTL_ARGS)
|
|||
first_draw_cmd = NULL;
|
||||
while (i < cmdbuf.size) {
|
||||
drm_savage_cmd_header_t cmd_header;
|
||||
DRM_COPY_FROM_USER_UNCHECKED(&cmd_header, usr_cmdbuf,
|
||||
sizeof(cmd_header));
|
||||
usr_cmdbuf++;
|
||||
cmd_header = *(drm_savage_cmd_header_t *)cmdbuf.cmd_addr;
|
||||
cmdbuf.cmd_addr++;
|
||||
i++;
|
||||
|
||||
/* Group drawing commands with same state to minimize
|
||||
|
@ -1068,21 +1067,18 @@ int savage_bci_cmdbuf(DRM_IOCTL_ARGS)
|
|||
case SAVAGE_CMD_DMA_PRIM:
|
||||
case SAVAGE_CMD_VB_PRIM:
|
||||
if (!first_draw_cmd)
|
||||
first_draw_cmd = usr_cmdbuf - 1;
|
||||
usr_cmdbuf += j;
|
||||
first_draw_cmd = cmdbuf.cmd_addr - 1;
|
||||
cmdbuf.cmd_addr += j;
|
||||
i += j;
|
||||
break;
|
||||
default:
|
||||
if (first_draw_cmd) {
|
||||
ret =
|
||||
savage_dispatch_draw(dev_priv,
|
||||
first_draw_cmd,
|
||||
usr_cmdbuf - 1, dmabuf,
|
||||
usr_vtxbuf,
|
||||
cmdbuf.vb_size,
|
||||
cmdbuf.vb_stride,
|
||||
cmdbuf.nbox,
|
||||
usr_boxes);
|
||||
ret = savage_dispatch_draw(
|
||||
dev_priv, first_draw_cmd,
|
||||
cmdbuf.cmd_addr - 1,
|
||||
dmabuf, cmdbuf.vb_addr, cmdbuf.vb_size,
|
||||
cmdbuf.vb_stride,
|
||||
cmdbuf.nbox, cmdbuf.box_addr);
|
||||
if (ret != 0)
|
||||
return ret;
|
||||
first_draw_cmd = NULL;
|
||||
|
@ -1098,12 +1094,12 @@ int savage_bci_cmdbuf(DRM_IOCTL_ARGS)
|
|||
DRM_ERROR("command SAVAGE_CMD_STATE extends "
|
||||
"beyond end of command buffer\n");
|
||||
DMA_FLUSH();
|
||||
return DRM_ERR(EINVAL);
|
||||
ret = DRM_ERR(EINVAL);
|
||||
goto done;
|
||||
}
|
||||
ret = savage_dispatch_state(dev_priv, &cmd_header,
|
||||
(uint32_t __user *)
|
||||
usr_cmdbuf);
|
||||
usr_cmdbuf += j;
|
||||
(const uint32_t *)cmdbuf.cmd_addr);
|
||||
cmdbuf.cmd_addr += j;
|
||||
i += j;
|
||||
break;
|
||||
case SAVAGE_CMD_CLEAR:
|
||||
|
@ -1111,39 +1107,40 @@ int savage_bci_cmdbuf(DRM_IOCTL_ARGS)
|
|||
DRM_ERROR("command SAVAGE_CMD_CLEAR extends "
|
||||
"beyond end of command buffer\n");
|
||||
DMA_FLUSH();
|
||||
return DRM_ERR(EINVAL);
|
||||
ret = DRM_ERR(EINVAL);
|
||||
goto done;
|
||||
}
|
||||
ret = savage_dispatch_clear(dev_priv, &cmd_header,
|
||||
usr_cmdbuf,
|
||||
cmdbuf.nbox, usr_boxes);
|
||||
usr_cmdbuf++;
|
||||
cmdbuf.cmd_addr,
|
||||
cmdbuf.nbox, cmdbuf.box_addr);
|
||||
cmdbuf.cmd_addr++;
|
||||
i++;
|
||||
break;
|
||||
case SAVAGE_CMD_SWAP:
|
||||
ret = savage_dispatch_swap(dev_priv,
|
||||
cmdbuf.nbox, usr_boxes);
|
||||
ret = savage_dispatch_swap(dev_priv, cmdbuf.nbox,
|
||||
cmdbuf.box_addr);
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("invalid command 0x%x\n", cmd_header.cmd.cmd);
|
||||
DMA_FLUSH();
|
||||
return DRM_ERR(EINVAL);
|
||||
ret = DRM_ERR(EINVAL);
|
||||
goto done;
|
||||
}
|
||||
|
||||
if (ret != 0) {
|
||||
DMA_FLUSH();
|
||||
return ret;
|
||||
goto done;
|
||||
}
|
||||
}
|
||||
|
||||
if (first_draw_cmd) {
|
||||
ret =
|
||||
savage_dispatch_draw(dev_priv, first_draw_cmd, usr_cmdbuf,
|
||||
dmabuf, usr_vtxbuf, cmdbuf.vb_size,
|
||||
cmdbuf.vb_stride, cmdbuf.nbox,
|
||||
usr_boxes);
|
||||
ret = savage_dispatch_draw (
|
||||
dev_priv, first_draw_cmd, cmdbuf.cmd_addr, dmabuf,
|
||||
cmdbuf.vb_addr, cmdbuf.vb_size, cmdbuf.vb_stride,
|
||||
cmdbuf.nbox, cmdbuf.box_addr);
|
||||
if (ret != 0) {
|
||||
DMA_FLUSH();
|
||||
return ret;
|
||||
goto done;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1157,5 +1154,12 @@ int savage_bci_cmdbuf(DRM_IOCTL_ARGS)
|
|||
savage_freelist_put(dev, dmabuf);
|
||||
}
|
||||
|
||||
return 0;
|
||||
done:
|
||||
/* If we didn't need to allocate them, these'll be NULL */
|
||||
drm_free(kcmd_addr, cmdbuf.size * 8, DRM_MEM_DRIVER);
|
||||
drm_free(kvb_addr, cmdbuf.vb_size, DRM_MEM_DRIVER);
|
||||
drm_free(kbox_addr, cmdbuf.nbox * sizeof(drm_clip_rect_t),
|
||||
DRM_MEM_DRIVER);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -1,3 +1,28 @@
|
|||
/* sis_drv.h -- Private header for sis driver -*- linux-c -*- */
|
||||
/*
|
||||
* Copyright 2005 Eric Anholt
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef __SIS_DRM_H__
|
||||
#define __SIS_DRM_H__
|
||||
|
|
|
@ -32,31 +32,6 @@
|
|||
|
||||
#include "drm_pciids.h"
|
||||
|
||||
static int postinit(struct drm_device *dev, unsigned long flags)
|
||||
{
|
||||
DRM_INFO("Initialized %s %d.%d.%d %s on minor %d: %s\n",
|
||||
DRIVER_NAME,
|
||||
DRIVER_MAJOR,
|
||||
DRIVER_MINOR,
|
||||
DRIVER_PATCHLEVEL,
|
||||
DRIVER_DATE, dev->primary.minor, pci_pretty_name(dev->pdev)
|
||||
);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int version(drm_version_t * version)
|
||||
{
|
||||
int len;
|
||||
|
||||
version->version_major = DRIVER_MAJOR;
|
||||
version->version_minor = DRIVER_MINOR;
|
||||
version->version_patchlevel = DRIVER_PATCHLEVEL;
|
||||
DRM_COPY(version->name, DRIVER_NAME);
|
||||
DRM_COPY(version->date, DRIVER_DATE);
|
||||
DRM_COPY(version->desc, DRIVER_DESC);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct pci_device_id pciidlist[] = {
|
||||
sisdrv_PCI_IDS
|
||||
};
|
||||
|
@ -68,8 +43,6 @@ static struct drm_driver driver = {
|
|||
.reclaim_buffers = drm_core_reclaim_buffers,
|
||||
.get_map_ofs = drm_core_get_map_ofs,
|
||||
.get_reg_ofs = drm_core_get_reg_ofs,
|
||||
.postinit = postinit,
|
||||
.version = version,
|
||||
.ioctls = sis_ioctls,
|
||||
.fops = {
|
||||
.owner = THIS_MODULE,
|
||||
|
@ -79,11 +52,18 @@ static struct drm_driver driver = {
|
|||
.mmap = drm_mmap,
|
||||
.poll = drm_poll,
|
||||
.fasync = drm_fasync,
|
||||
},
|
||||
},
|
||||
.pci_driver = {
|
||||
.name = DRIVER_NAME,
|
||||
.id_table = pciidlist,
|
||||
}
|
||||
.name = DRIVER_NAME,
|
||||
.id_table = pciidlist,
|
||||
},
|
||||
|
||||
.name = DRIVER_NAME,
|
||||
.desc = DRIVER_DESC,
|
||||
.date = DRIVER_DATE,
|
||||
.major = DRIVER_MAJOR,
|
||||
.minor = DRIVER_MINOR,
|
||||
.patchlevel = DRIVER_PATCHLEVEL,
|
||||
};
|
||||
|
||||
static int __init sis_init(void)
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/* sis_drv.h -- Private header for sis driver -*- linux-c -*-
|
||||
*
|
||||
/* sis_drv.h -- Private header for sis driver -*- linux-c -*- */
|
||||
/*
|
||||
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
|
||||
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
|
||||
* All rights reserved.
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
/* sis_ds.h -- Private header for Direct Rendering Manager -*- linux-c -*-
|
||||
/* sis_ds.h -- Private header for Direct Rendering Manager -*- linux-c -*-
|
||||
* Created: Mon Jan 4 10:05:05 1999 by sclin@sis.com.tw
|
||||
*
|
||||
*/
|
||||
/*
|
||||
* Copyright 2000 Silicon Integrated Systems Corp, Inc., HsinChu, Taiwan.
|
||||
* All rights reserved.
|
||||
*
|
||||
|
@ -35,7 +36,7 @@
|
|||
|
||||
#define SET_SIZE 5000
|
||||
|
||||
typedef unsigned int ITEM_TYPE;
|
||||
typedef unsigned long ITEM_TYPE;
|
||||
|
||||
typedef struct {
|
||||
ITEM_TYPE val;
|
||||
|
|
|
@ -86,7 +86,7 @@ static int sis_fb_alloc(DRM_IOCTL_ARGS)
|
|||
{
|
||||
drm_sis_mem_t fb;
|
||||
struct sis_memreq req;
|
||||
drm_sis_mem_t __user *argp = (void __user *)data;
|
||||
drm_sis_mem_t __user *argp = (drm_sis_mem_t __user *)data;
|
||||
int retval = 0;
|
||||
|
||||
DRM_COPY_FROM_USER_IOCTL(fb, argp, sizeof(fb));
|
||||
|
@ -110,7 +110,7 @@ static int sis_fb_alloc(DRM_IOCTL_ARGS)
|
|||
|
||||
DRM_COPY_TO_USER_IOCTL(argp, fb, sizeof(fb));
|
||||
|
||||
DRM_DEBUG("alloc fb, size = %d, offset = %d\n", fb.size, req.offset);
|
||||
DRM_DEBUG("alloc fb, size = %d, offset = %ld\n", fb.size, req.offset);
|
||||
|
||||
return retval;
|
||||
}
|
||||
|
@ -127,9 +127,9 @@ static int sis_fb_free(DRM_IOCTL_ARGS)
|
|||
|
||||
if (!del_alloc_set(fb.context, VIDEO_TYPE, fb.free))
|
||||
retval = DRM_ERR(EINVAL);
|
||||
sis_free((u32) fb.free);
|
||||
sis_free(fb.free);
|
||||
|
||||
DRM_DEBUG("free fb, offset = %lu\n", fb.free);
|
||||
DRM_DEBUG("free fb, offset = 0x%lx\n", fb.free);
|
||||
|
||||
return retval;
|
||||
}
|
||||
|
@ -176,7 +176,7 @@ static int sis_fb_alloc(DRM_IOCTL_ARGS)
|
|||
{
|
||||
DRM_DEVICE;
|
||||
drm_sis_private_t *dev_priv = dev->dev_private;
|
||||
drm_sis_mem_t __user *argp = (void __user *)data;
|
||||
drm_sis_mem_t __user *argp = (drm_sis_mem_t __user *)data;
|
||||
drm_sis_mem_t fb;
|
||||
PMemBlock block;
|
||||
int retval = 0;
|
||||
|
@ -267,7 +267,7 @@ static int sis_ioctl_agp_alloc(DRM_IOCTL_ARGS)
|
|||
{
|
||||
DRM_DEVICE;
|
||||
drm_sis_private_t *dev_priv = dev->dev_private;
|
||||
drm_sis_mem_t __user *argp = (void __user *)data;
|
||||
drm_sis_mem_t __user *argp = (drm_sis_mem_t __user *)data;
|
||||
drm_sis_mem_t agp;
|
||||
PMemBlock block;
|
||||
int retval = 0;
|
||||
|
@ -367,7 +367,7 @@ int sis_final_context(struct drm_device *dev, int context)
|
|||
|
||||
if (i < MAX_CONTEXT) {
|
||||
set_t *set;
|
||||
unsigned int item;
|
||||
ITEM_TYPE item;
|
||||
int retval;
|
||||
|
||||
DRM_DEBUG("find socket %d, context = %d\n", i, context);
|
||||
|
@ -376,7 +376,7 @@ int sis_final_context(struct drm_device *dev, int context)
|
|||
set = global_ppriv[i].sets[0];
|
||||
retval = setFirst(set, &item);
|
||||
while (retval) {
|
||||
DRM_DEBUG("free video memory 0x%x\n", item);
|
||||
DRM_DEBUG("free video memory 0x%lx\n", item);
|
||||
#if defined(__linux__) && defined(CONFIG_FB_SIS)
|
||||
sis_free(item);
|
||||
#else
|
||||
|
@ -390,7 +390,7 @@ int sis_final_context(struct drm_device *dev, int context)
|
|||
set = global_ppriv[i].sets[1];
|
||||
retval = setFirst(set, &item);
|
||||
while (retval) {
|
||||
DRM_DEBUG("free agp memory 0x%x\n", item);
|
||||
DRM_DEBUG("free agp memory 0x%lx\n", item);
|
||||
mmFreeMem((PMemBlock) item);
|
||||
retval = setNext(set, &item);
|
||||
}
|
||||
|
@ -403,12 +403,12 @@ int sis_final_context(struct drm_device *dev, int context)
|
|||
}
|
||||
|
||||
drm_ioctl_desc_t sis_ioctls[] = {
|
||||
[DRM_IOCTL_NR(DRM_SIS_FB_ALLOC)] = {sis_fb_alloc, 1, 0},
|
||||
[DRM_IOCTL_NR(DRM_SIS_FB_FREE)] = {sis_fb_free, 1, 0},
|
||||
[DRM_IOCTL_NR(DRM_SIS_AGP_INIT)] = {sis_ioctl_agp_init, 1, 1},
|
||||
[DRM_IOCTL_NR(DRM_SIS_AGP_ALLOC)] = {sis_ioctl_agp_alloc, 1, 0},
|
||||
[DRM_IOCTL_NR(DRM_SIS_AGP_FREE)] = {sis_ioctl_agp_free, 1, 0},
|
||||
[DRM_IOCTL_NR(DRM_SIS_FB_INIT)] = {sis_fb_init, 1, 1}
|
||||
[DRM_IOCTL_NR(DRM_SIS_FB_ALLOC)] = {sis_fb_alloc, DRM_AUTH},
|
||||
[DRM_IOCTL_NR(DRM_SIS_FB_FREE)] = {sis_fb_free, DRM_AUTH},
|
||||
[DRM_IOCTL_NR(DRM_SIS_AGP_INIT)] = {sis_ioctl_agp_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
|
||||
[DRM_IOCTL_NR(DRM_SIS_AGP_ALLOC)] = {sis_ioctl_agp_alloc, DRM_AUTH},
|
||||
[DRM_IOCTL_NR(DRM_SIS_AGP_FREE)] = {sis_ioctl_agp_free, DRM_AUTH},
|
||||
[DRM_IOCTL_NR(DRM_SIS_FB_INIT)] = {sis_fb_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}
|
||||
};
|
||||
|
||||
int sis_max_ioctl = DRM_ARRAY_SIZE(sis_ioctls);
|
||||
|
|
|
@ -36,31 +36,6 @@
|
|||
|
||||
#include "drm_pciids.h"
|
||||
|
||||
static int postinit(struct drm_device *dev, unsigned long flags)
|
||||
{
|
||||
DRM_INFO("Initialized %s %d.%d.%d %s on minor %d: %s\n",
|
||||
DRIVER_NAME,
|
||||
DRIVER_MAJOR,
|
||||
DRIVER_MINOR,
|
||||
DRIVER_PATCHLEVEL,
|
||||
DRIVER_DATE, dev->primary.minor, pci_pretty_name(dev->pdev)
|
||||
);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int version(drm_version_t * version)
|
||||
{
|
||||
int len;
|
||||
|
||||
version->version_major = DRIVER_MAJOR;
|
||||
version->version_minor = DRIVER_MINOR;
|
||||
version->version_patchlevel = DRIVER_PATCHLEVEL;
|
||||
DRM_COPY(version->name, DRIVER_NAME);
|
||||
DRM_COPY(version->date, DRIVER_DATE);
|
||||
DRM_COPY(version->desc, DRIVER_DESC);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct pci_device_id pciidlist[] = {
|
||||
tdfx_PCI_IDS
|
||||
};
|
||||
|
@ -70,8 +45,6 @@ static struct drm_driver driver = {
|
|||
.reclaim_buffers = drm_core_reclaim_buffers,
|
||||
.get_map_ofs = drm_core_get_map_ofs,
|
||||
.get_reg_ofs = drm_core_get_reg_ofs,
|
||||
.postinit = postinit,
|
||||
.version = version,
|
||||
.fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = drm_open,
|
||||
|
@ -80,11 +53,18 @@ static struct drm_driver driver = {
|
|||
.mmap = drm_mmap,
|
||||
.poll = drm_poll,
|
||||
.fasync = drm_fasync,
|
||||
},
|
||||
},
|
||||
.pci_driver = {
|
||||
.name = DRIVER_NAME,
|
||||
.id_table = pciidlist,
|
||||
}
|
||||
.name = DRIVER_NAME,
|
||||
.id_table = pciidlist,
|
||||
},
|
||||
|
||||
.name = DRIVER_NAME,
|
||||
.desc = DRIVER_DESC,
|
||||
.date = DRIVER_DATE,
|
||||
.major = DRIVER_MAJOR,
|
||||
.minor = DRIVER_MINOR,
|
||||
.patchlevel = DRIVER_PATCHLEVEL,
|
||||
};
|
||||
|
||||
static int __init tdfx_init(void)
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
/* tdfx.h -- 3dfx DRM template customization -*- linux-c -*-
|
||||
* Created: Wed Feb 14 12:32:32 2001 by gareth@valinux.com
|
||||
*
|
||||
*/
|
||||
/*
|
||||
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
|
||||
* All Rights Reserved.
|
||||
*
|
||||
|
@ -30,10 +31,6 @@
|
|||
#ifndef __TDFX_H__
|
||||
#define __TDFX_H__
|
||||
|
||||
/* This remains constant for all DRM template files.
|
||||
*/
|
||||
#define DRM(x) tdfx_##x
|
||||
|
||||
/* General customization:
|
||||
*/
|
||||
|
||||
|
|
|
@ -213,7 +213,9 @@ static int via_initialize(drm_device_t * dev,
|
|||
dev_priv->dma_wrap = init->size;
|
||||
dev_priv->dma_offset = init->offset;
|
||||
dev_priv->last_pause_ptr = NULL;
|
||||
dev_priv->hw_addr_ptr = dev_priv->mmio->handle + init->reg_pause_addr;
|
||||
dev_priv->hw_addr_ptr =
|
||||
(volatile uint32_t *)((char *)dev_priv->mmio->handle +
|
||||
init->reg_pause_addr);
|
||||
|
||||
via_cmdbuf_start(dev_priv);
|
||||
|
||||
|
@ -232,13 +234,13 @@ int via_dma_init(DRM_IOCTL_ARGS)
|
|||
|
||||
switch (init.func) {
|
||||
case VIA_INIT_DMA:
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
if (!DRM_SUSER(DRM_CURPROC))
|
||||
retcode = DRM_ERR(EPERM);
|
||||
else
|
||||
retcode = via_initialize(dev, dev_priv, &init);
|
||||
break;
|
||||
case VIA_CLEANUP_DMA:
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
if (!DRM_SUSER(DRM_CURPROC))
|
||||
retcode = DRM_ERR(EPERM);
|
||||
else
|
||||
retcode = via_dma_cleanup(dev);
|
||||
|
@ -349,9 +351,6 @@ int via_cmdbuffer(DRM_IOCTL_ARGS)
|
|||
return 0;
|
||||
}
|
||||
|
||||
extern int
|
||||
via_parse_command_stream(drm_device_t * dev, const uint32_t * buf,
|
||||
unsigned int size);
|
||||
static int via_dispatch_pci_cmdbuffer(drm_device_t * dev,
|
||||
drm_via_cmdbuffer_t * cmd)
|
||||
{
|
||||
|
@ -450,9 +449,9 @@ static int via_hook_segment(drm_via_private_t * dev_priv,
|
|||
if ((count <= 8) && (count >= 0)) {
|
||||
uint32_t rgtr, ptr;
|
||||
rgtr = *(dev_priv->hw_addr_ptr);
|
||||
ptr = ((char *)dev_priv->last_pause_ptr - dev_priv->dma_ptr) +
|
||||
dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr + 4 -
|
||||
CMDBUF_ALIGNMENT_SIZE;
|
||||
ptr = ((volatile char *)dev_priv->last_pause_ptr -
|
||||
dev_priv->dma_ptr) + dev_priv->dma_offset +
|
||||
(uint32_t) dev_priv->agpAddr + 4 - CMDBUF_ALIGNMENT_SIZE;
|
||||
if (rgtr <= ptr) {
|
||||
DRM_ERROR
|
||||
("Command regulator\npaused at count %d, address %x, "
|
||||
|
@ -472,7 +471,7 @@ static int via_hook_segment(drm_via_private_t * dev_priv,
|
|||
&& count--) ;
|
||||
|
||||
rgtr = *(dev_priv->hw_addr_ptr);
|
||||
ptr = ((char *)paused_at - dev_priv->dma_ptr) +
|
||||
ptr = ((volatile char *)paused_at - dev_priv->dma_ptr) +
|
||||
dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr + 4;
|
||||
|
||||
ptr_low = (ptr > 3 * CMDBUF_ALIGNMENT_SIZE) ?
|
||||
|
@ -724,3 +723,22 @@ int via_cmdbuf_size(DRM_IOCTL_ARGS)
|
|||
sizeof(d_siz));
|
||||
return ret;
|
||||
}
|
||||
|
||||
drm_ioctl_desc_t via_ioctls[] = {
|
||||
[DRM_IOCTL_NR(DRM_VIA_ALLOCMEM)] = {via_mem_alloc, DRM_AUTH},
|
||||
[DRM_IOCTL_NR(DRM_VIA_FREEMEM)] = {via_mem_free, DRM_AUTH},
|
||||
[DRM_IOCTL_NR(DRM_VIA_AGP_INIT)] = {via_agp_init, DRM_AUTH|DRM_MASTER},
|
||||
[DRM_IOCTL_NR(DRM_VIA_FB_INIT)] = {via_fb_init, DRM_AUTH|DRM_MASTER},
|
||||
[DRM_IOCTL_NR(DRM_VIA_MAP_INIT)] = {via_map_init, DRM_AUTH|DRM_MASTER},
|
||||
[DRM_IOCTL_NR(DRM_VIA_DEC_FUTEX)] = {via_decoder_futex, DRM_AUTH},
|
||||
[DRM_IOCTL_NR(DRM_VIA_DMA_INIT)] = {via_dma_init, DRM_AUTH},
|
||||
[DRM_IOCTL_NR(DRM_VIA_CMDBUFFER)] = {via_cmdbuffer, DRM_AUTH},
|
||||
[DRM_IOCTL_NR(DRM_VIA_FLUSH)] = {via_flush_ioctl, DRM_AUTH},
|
||||
[DRM_IOCTL_NR(DRM_VIA_PCICMD)] = {via_pci_cmdbuffer, DRM_AUTH},
|
||||
[DRM_IOCTL_NR(DRM_VIA_CMDBUF_SIZE)] = {via_cmdbuf_size, DRM_AUTH},
|
||||
[DRM_IOCTL_NR(DRM_VIA_WAIT_IRQ)] = {via_wait_irq, DRM_AUTH},
|
||||
[DRM_IOCTL_NR(DRM_VIA_DMA_BLIT)] = {via_dma_blit, DRM_AUTH},
|
||||
[DRM_IOCTL_NR(DRM_VIA_BLIT_SYNC)] = {via_dma_blit_sync, DRM_AUTH}
|
||||
};
|
||||
|
||||
int via_max_ioctl = DRM_ARRAY_SIZE(via_ioctls);
|
||||
|
|
|
@ -0,0 +1,805 @@
|
|||
/* via_dmablit.c -- PCI DMA BitBlt support for the VIA Unichrome/Pro
|
||||
*
|
||||
* Copyright (C) 2005 Thomas Hellstrom, All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sub license,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors:
|
||||
* Thomas Hellstrom.
|
||||
* Partially based on code obtained from Digeo Inc.
|
||||
*/
|
||||
|
||||
|
||||
/*
|
||||
* Unmaps the DMA mappings.
|
||||
* FIXME: Is this a NoOp on x86? Also
|
||||
* FIXME: What happens if this one is called and a pending blit has previously done
|
||||
* the same DMA mappings?
|
||||
*/
|
||||
|
||||
#include "drmP.h"
|
||||
#include "via_drm.h"
|
||||
#include "via_drv.h"
|
||||
#include "via_dmablit.h"
|
||||
|
||||
#include <linux/pagemap.h>
|
||||
|
||||
#define VIA_PGDN(x) (((unsigned long)(x)) & PAGE_MASK)
|
||||
#define VIA_PGOFF(x) (((unsigned long)(x)) & ~PAGE_MASK)
|
||||
#define VIA_PFN(x) ((unsigned long)(x) >> PAGE_SHIFT)
|
||||
|
||||
typedef struct _drm_via_descriptor {
|
||||
uint32_t mem_addr;
|
||||
uint32_t dev_addr;
|
||||
uint32_t size;
|
||||
uint32_t next;
|
||||
} drm_via_descriptor_t;
|
||||
|
||||
|
||||
/*
|
||||
* Unmap a DMA mapping.
|
||||
*/
|
||||
|
||||
|
||||
|
||||
static void
|
||||
via_unmap_blit_from_device(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
|
||||
{
|
||||
int num_desc = vsg->num_desc;
|
||||
unsigned cur_descriptor_page = num_desc / vsg->descriptors_per_page;
|
||||
unsigned descriptor_this_page = num_desc % vsg->descriptors_per_page;
|
||||
drm_via_descriptor_t *desc_ptr = vsg->desc_pages[cur_descriptor_page] +
|
||||
descriptor_this_page;
|
||||
dma_addr_t next = vsg->chain_start;
|
||||
|
||||
while(num_desc--) {
|
||||
if (descriptor_this_page-- == 0) {
|
||||
cur_descriptor_page--;
|
||||
descriptor_this_page = vsg->descriptors_per_page - 1;
|
||||
desc_ptr = vsg->desc_pages[cur_descriptor_page] +
|
||||
descriptor_this_page;
|
||||
}
|
||||
dma_unmap_single(&pdev->dev, next, sizeof(*desc_ptr), DMA_TO_DEVICE);
|
||||
dma_unmap_page(&pdev->dev, desc_ptr->mem_addr, desc_ptr->size, vsg->direction);
|
||||
next = (dma_addr_t) desc_ptr->next;
|
||||
desc_ptr--;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* If mode = 0, count how many descriptors are needed.
|
||||
* If mode = 1, Map the DMA pages for the device, put together and map also the descriptors.
|
||||
* Descriptors are run in reverse order by the hardware because we are not allowed to update the
|
||||
* 'next' field without syncing calls when the descriptor is already mapped.
|
||||
*/
|
||||
|
||||
static void
|
||||
via_map_blit_for_device(struct pci_dev *pdev,
|
||||
const drm_via_dmablit_t *xfer,
|
||||
drm_via_sg_info_t *vsg,
|
||||
int mode)
|
||||
{
|
||||
unsigned cur_descriptor_page = 0;
|
||||
unsigned num_descriptors_this_page = 0;
|
||||
unsigned char *mem_addr = xfer->mem_addr;
|
||||
unsigned char *cur_mem;
|
||||
unsigned char *first_addr = (unsigned char *)VIA_PGDN(mem_addr);
|
||||
uint32_t fb_addr = xfer->fb_addr;
|
||||
uint32_t cur_fb;
|
||||
unsigned long line_len;
|
||||
unsigned remaining_len;
|
||||
int num_desc = 0;
|
||||
int cur_line;
|
||||
dma_addr_t next = 0 | VIA_DMA_DPR_EC;
|
||||
drm_via_descriptor_t *desc_ptr = 0;
|
||||
|
||||
if (mode == 1)
|
||||
desc_ptr = vsg->desc_pages[cur_descriptor_page];
|
||||
|
||||
for (cur_line = 0; cur_line < xfer->num_lines; ++cur_line) {
|
||||
|
||||
line_len = xfer->line_length;
|
||||
cur_fb = fb_addr;
|
||||
cur_mem = mem_addr;
|
||||
|
||||
while (line_len > 0) {
|
||||
|
||||
remaining_len = min(PAGE_SIZE-VIA_PGOFF(cur_mem), line_len);
|
||||
line_len -= remaining_len;
|
||||
|
||||
if (mode == 1) {
|
||||
desc_ptr->mem_addr =
|
||||
dma_map_page(&pdev->dev,
|
||||
vsg->pages[VIA_PFN(cur_mem) -
|
||||
VIA_PFN(first_addr)],
|
||||
VIA_PGOFF(cur_mem), remaining_len,
|
||||
vsg->direction);
|
||||
desc_ptr->dev_addr = cur_fb;
|
||||
|
||||
desc_ptr->size = remaining_len;
|
||||
desc_ptr->next = (uint32_t) next;
|
||||
next = dma_map_single(&pdev->dev, desc_ptr, sizeof(*desc_ptr),
|
||||
DMA_TO_DEVICE);
|
||||
desc_ptr++;
|
||||
if (++num_descriptors_this_page >= vsg->descriptors_per_page) {
|
||||
num_descriptors_this_page = 0;
|
||||
desc_ptr = vsg->desc_pages[++cur_descriptor_page];
|
||||
}
|
||||
}
|
||||
|
||||
num_desc++;
|
||||
cur_mem += remaining_len;
|
||||
cur_fb += remaining_len;
|
||||
}
|
||||
|
||||
mem_addr += xfer->mem_stride;
|
||||
fb_addr += xfer->fb_stride;
|
||||
}
|
||||
|
||||
if (mode == 1) {
|
||||
vsg->chain_start = next;
|
||||
vsg->state = dr_via_device_mapped;
|
||||
}
|
||||
vsg->num_desc = num_desc;
|
||||
}
|
||||
|
||||
/*
|
||||
* Function that frees up all resources for a blit. It is usable even if the
|
||||
* blit info has only be partially built as long as the status enum is consistent
|
||||
* with the actual status of the used resources.
|
||||
*/
|
||||
|
||||
|
||||
void
|
||||
via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
|
||||
{
|
||||
struct page *page;
|
||||
int i;
|
||||
|
||||
switch(vsg->state) {
|
||||
case dr_via_device_mapped:
|
||||
via_unmap_blit_from_device(pdev, vsg);
|
||||
case dr_via_desc_pages_alloc:
|
||||
for (i=0; i<vsg->num_desc_pages; ++i) {
|
||||
if (vsg->desc_pages[i] != NULL)
|
||||
free_page((unsigned long)vsg->desc_pages[i]);
|
||||
}
|
||||
kfree(vsg->desc_pages);
|
||||
case dr_via_pages_locked:
|
||||
for (i=0; i<vsg->num_pages; ++i) {
|
||||
if ( NULL != (page = vsg->pages[i])) {
|
||||
if (! PageReserved(page) && (DMA_FROM_DEVICE == vsg->direction))
|
||||
SetPageDirty(page);
|
||||
page_cache_release(page);
|
||||
}
|
||||
}
|
||||
case dr_via_pages_alloc:
|
||||
vfree(vsg->pages);
|
||||
default:
|
||||
vsg->state = dr_via_sg_init;
|
||||
}
|
||||
if (vsg->bounce_buffer) {
|
||||
vfree(vsg->bounce_buffer);
|
||||
vsg->bounce_buffer = NULL;
|
||||
}
|
||||
vsg->free_on_sequence = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Fire a blit engine.
|
||||
*/
|
||||
|
||||
static void
|
||||
via_fire_dmablit(drm_device_t *dev, drm_via_sg_info_t *vsg, int engine)
|
||||
{
|
||||
drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
|
||||
|
||||
VIA_WRITE(VIA_PCI_DMA_MAR0 + engine*0x10, 0);
|
||||
VIA_WRITE(VIA_PCI_DMA_DAR0 + engine*0x10, 0);
|
||||
VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DD | VIA_DMA_CSR_TD |
|
||||
VIA_DMA_CSR_DE);
|
||||
VIA_WRITE(VIA_PCI_DMA_MR0 + engine*0x04, VIA_DMA_MR_CM | VIA_DMA_MR_TDIE);
|
||||
VIA_WRITE(VIA_PCI_DMA_BCR0 + engine*0x10, 0);
|
||||
VIA_WRITE(VIA_PCI_DMA_DPR0 + engine*0x10, vsg->chain_start);
|
||||
VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DE | VIA_DMA_CSR_TS);
|
||||
}
|
||||
|
||||
/*
|
||||
* Obtain a page pointer array and lock all pages into system memory. A segmentation violation will
|
||||
* occur here if the calling user does not have access to the submitted address.
|
||||
*/
|
||||
|
||||
static int
|
||||
via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
|
||||
{
|
||||
int ret;
|
||||
unsigned long first_pfn = VIA_PFN(xfer->mem_addr);
|
||||
vsg->num_pages = VIA_PFN(xfer->mem_addr + (xfer->num_lines * xfer->mem_stride -1)) -
|
||||
first_pfn + 1;
|
||||
|
||||
if (NULL == (vsg->pages = vmalloc(sizeof(struct page *) * vsg->num_pages)))
|
||||
return DRM_ERR(ENOMEM);
|
||||
memset(vsg->pages, 0, sizeof(struct page *) * vsg->num_pages);
|
||||
down_read(¤t->mm->mmap_sem);
|
||||
ret = get_user_pages(current, current->mm, (unsigned long) xfer->mem_addr,
|
||||
vsg->num_pages, vsg->direction, 0, vsg->pages, NULL);
|
||||
|
||||
up_read(¤t->mm->mmap_sem);
|
||||
if (ret != vsg->num_pages) {
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
vsg->state = dr_via_pages_locked;
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
vsg->state = dr_via_pages_locked;
|
||||
DRM_DEBUG("DMA pages locked\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Allocate DMA capable memory for the blit descriptor chain, and an array that keeps track of the
|
||||
* pages we allocate. We don't want to use kmalloc for the descriptor chain because it may be
|
||||
* quite large for some blits, and pages don't need to be contingous.
|
||||
*/
|
||||
|
||||
static int
|
||||
via_alloc_desc_pages(drm_via_sg_info_t *vsg)
|
||||
{
|
||||
int i;
|
||||
|
||||
vsg->descriptors_per_page = PAGE_SIZE / sizeof( drm_via_descriptor_t);
|
||||
vsg->num_desc_pages = (vsg->num_desc + vsg->descriptors_per_page - 1) /
|
||||
vsg->descriptors_per_page;
|
||||
|
||||
if (NULL == (vsg->desc_pages = kmalloc(sizeof(void *) * vsg->num_desc_pages, GFP_KERNEL)))
|
||||
return DRM_ERR(ENOMEM);
|
||||
|
||||
memset(vsg->desc_pages, 0, sizeof(void *) * vsg->num_desc_pages);
|
||||
vsg->state = dr_via_desc_pages_alloc;
|
||||
for (i=0; i<vsg->num_desc_pages; ++i) {
|
||||
if (NULL == (vsg->desc_pages[i] =
|
||||
(drm_via_descriptor_t *) __get_free_page(GFP_KERNEL)))
|
||||
return DRM_ERR(ENOMEM);
|
||||
}
|
||||
DRM_DEBUG("Allocated %d pages for %d descriptors.\n", vsg->num_desc_pages,
|
||||
vsg->num_desc);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
via_abort_dmablit(drm_device_t *dev, int engine)
|
||||
{
|
||||
drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
|
||||
|
||||
VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TA);
|
||||
}
|
||||
|
||||
static void
|
||||
via_dmablit_engine_off(drm_device_t *dev, int engine)
|
||||
{
|
||||
drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
|
||||
|
||||
VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TD | VIA_DMA_CSR_DD);
|
||||
}
|
||||
|
||||
|
||||
|
||||
/*
|
||||
* The dmablit part of the IRQ handler. Trying to do only reasonably fast things here.
|
||||
* The rest, like unmapping and freeing memory for done blits is done in a separate workqueue
|
||||
* task. Basically the task of the interrupt handler is to submit a new blit to the engine, while
|
||||
* the workqueue task takes care of processing associated with the old blit.
|
||||
*/
|
||||
|
||||
void
|
||||
via_dmablit_handler(drm_device_t *dev, int engine, int from_irq)
|
||||
{
|
||||
drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
|
||||
drm_via_blitq_t *blitq = dev_priv->blit_queues + engine;
|
||||
int cur;
|
||||
int done_transfer;
|
||||
unsigned long irqsave=0;
|
||||
uint32_t status = 0;
|
||||
|
||||
DRM_DEBUG("DMA blit handler called. engine = %d, from_irq = %d, blitq = 0x%lx\n",
|
||||
engine, from_irq, (unsigned long) blitq);
|
||||
|
||||
if (from_irq) {
|
||||
spin_lock(&blitq->blit_lock);
|
||||
} else {
|
||||
spin_lock_irqsave(&blitq->blit_lock, irqsave);
|
||||
}
|
||||
|
||||
done_transfer = blitq->is_active &&
|
||||
(( status = VIA_READ(VIA_PCI_DMA_CSR0 + engine*0x04)) & VIA_DMA_CSR_TD);
|
||||
done_transfer = done_transfer || ( blitq->aborting && !(status & VIA_DMA_CSR_DE));
|
||||
|
||||
cur = blitq->cur;
|
||||
if (done_transfer) {
|
||||
|
||||
blitq->blits[cur]->aborted = blitq->aborting;
|
||||
blitq->done_blit_handle++;
|
||||
DRM_WAKEUP(blitq->blit_queue + cur);
|
||||
|
||||
cur++;
|
||||
if (cur >= VIA_NUM_BLIT_SLOTS)
|
||||
cur = 0;
|
||||
blitq->cur = cur;
|
||||
|
||||
/*
|
||||
* Clear transfer done flag.
|
||||
*/
|
||||
|
||||
VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TD);
|
||||
|
||||
blitq->is_active = 0;
|
||||
blitq->aborting = 0;
|
||||
schedule_work(&blitq->wq);
|
||||
|
||||
} else if (blitq->is_active && time_after_eq(jiffies, blitq->end)) {
|
||||
|
||||
/*
|
||||
* Abort transfer after one second.
|
||||
*/
|
||||
|
||||
via_abort_dmablit(dev, engine);
|
||||
blitq->aborting = 1;
|
||||
blitq->end = jiffies + DRM_HZ;
|
||||
}
|
||||
|
||||
if (!blitq->is_active) {
|
||||
if (blitq->num_outstanding) {
|
||||
via_fire_dmablit(dev, blitq->blits[cur], engine);
|
||||
blitq->is_active = 1;
|
||||
blitq->cur = cur;
|
||||
blitq->num_outstanding--;
|
||||
blitq->end = jiffies + DRM_HZ;
|
||||
if (!timer_pending(&blitq->poll_timer)) {
|
||||
blitq->poll_timer.expires = jiffies+1;
|
||||
add_timer(&blitq->poll_timer);
|
||||
}
|
||||
} else {
|
||||
if (timer_pending(&blitq->poll_timer)) {
|
||||
del_timer(&blitq->poll_timer);
|
||||
}
|
||||
via_dmablit_engine_off(dev, engine);
|
||||
}
|
||||
}
|
||||
|
||||
if (from_irq) {
|
||||
spin_unlock(&blitq->blit_lock);
|
||||
} else {
|
||||
spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
/*
|
||||
* Check whether this blit is still active, performing necessary locking.
|
||||
*/
|
||||
|
||||
static int
|
||||
via_dmablit_active(drm_via_blitq_t *blitq, int engine, uint32_t handle, wait_queue_head_t **queue)
|
||||
{
|
||||
unsigned long irqsave;
|
||||
uint32_t slot;
|
||||
int active;
|
||||
|
||||
spin_lock_irqsave(&blitq->blit_lock, irqsave);
|
||||
|
||||
/*
|
||||
* Allow for handle wraparounds.
|
||||
*/
|
||||
|
||||
active = ((blitq->done_blit_handle - handle) > (1 << 23)) &&
|
||||
((blitq->cur_blit_handle - handle) <= (1 << 23));
|
||||
|
||||
if (queue && active) {
|
||||
slot = handle - blitq->done_blit_handle + blitq->cur -1;
|
||||
if (slot >= VIA_NUM_BLIT_SLOTS) {
|
||||
slot -= VIA_NUM_BLIT_SLOTS;
|
||||
}
|
||||
*queue = blitq->blit_queue + slot;
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
|
||||
|
||||
return active;
|
||||
}
|
||||
|
||||
/*
|
||||
* Sync. Wait for at least three seconds for the blit to be performed.
|
||||
*/
|
||||
|
||||
static int
|
||||
via_dmablit_sync(drm_device_t *dev, uint32_t handle, int engine)
|
||||
{
|
||||
|
||||
drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
|
||||
drm_via_blitq_t *blitq = dev_priv->blit_queues + engine;
|
||||
wait_queue_head_t *queue;
|
||||
int ret = 0;
|
||||
|
||||
if (via_dmablit_active(blitq, engine, handle, &queue)) {
|
||||
DRM_WAIT_ON(ret, *queue, 3 * DRM_HZ,
|
||||
!via_dmablit_active(blitq, engine, handle, NULL));
|
||||
}
|
||||
DRM_DEBUG("DMA blit sync handle 0x%x engine %d returned %d\n",
|
||||
handle, engine, ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* A timer that regularly polls the blit engine in cases where we don't have interrupts:
|
||||
* a) Broken hardware (typically those that don't have any video capture facility).
|
||||
* b) Blit abort. The hardware doesn't send an interrupt when a blit is aborted.
|
||||
* The timer and hardware IRQ's can and do work in parallel. If the hardware has
|
||||
* irqs, it will shorten the latency somewhat.
|
||||
*/
|
||||
|
||||
|
||||
|
||||
static void
|
||||
via_dmablit_timer(unsigned long data)
|
||||
{
|
||||
drm_via_blitq_t *blitq = (drm_via_blitq_t *) data;
|
||||
drm_device_t *dev = blitq->dev;
|
||||
int engine = (int)
|
||||
(blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues);
|
||||
|
||||
DRM_DEBUG("Polling timer called for engine %d, jiffies %lu\n", engine,
|
||||
(unsigned long) jiffies);
|
||||
|
||||
via_dmablit_handler(dev, engine, 0);
|
||||
|
||||
if (!timer_pending(&blitq->poll_timer)) {
|
||||
blitq->poll_timer.expires = jiffies+1;
|
||||
add_timer(&blitq->poll_timer);
|
||||
}
|
||||
via_dmablit_handler(dev, engine, 0);
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
/*
|
||||
* Workqueue task that frees data and mappings associated with a blit.
|
||||
* Also wakes up waiting processes. Each of these tasks handles one
|
||||
* blit engine only and may not be called on each interrupt.
|
||||
*/
|
||||
|
||||
|
||||
static void
|
||||
via_dmablit_workqueue(void *data)
|
||||
{
|
||||
drm_via_blitq_t *blitq = (drm_via_blitq_t *) data;
|
||||
drm_device_t *dev = blitq->dev;
|
||||
unsigned long irqsave;
|
||||
drm_via_sg_info_t *cur_sg;
|
||||
int cur_released;
|
||||
|
||||
|
||||
DRM_DEBUG("Workqueue task called for blit engine %ld\n",(unsigned long)
|
||||
(blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues));
|
||||
|
||||
spin_lock_irqsave(&blitq->blit_lock, irqsave);
|
||||
|
||||
while(blitq->serviced != blitq->cur) {
|
||||
|
||||
cur_released = blitq->serviced++;
|
||||
|
||||
DRM_DEBUG("Releasing blit slot %d\n", cur_released);
|
||||
|
||||
if (blitq->serviced >= VIA_NUM_BLIT_SLOTS)
|
||||
blitq->serviced = 0;
|
||||
|
||||
cur_sg = blitq->blits[cur_released];
|
||||
blitq->num_free++;
|
||||
|
||||
spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
|
||||
|
||||
DRM_WAKEUP(&blitq->busy_queue);
|
||||
|
||||
via_free_sg_info(dev->pdev, cur_sg);
|
||||
kfree(cur_sg);
|
||||
|
||||
spin_lock_irqsave(&blitq->blit_lock, irqsave);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Init all blit engines. Currently we use two, but some hardware have 4.
|
||||
*/
|
||||
|
||||
|
||||
void
|
||||
via_init_dmablit(drm_device_t *dev)
|
||||
{
|
||||
int i,j;
|
||||
drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
|
||||
drm_via_blitq_t *blitq;
|
||||
|
||||
pci_set_master(dev->pdev);
|
||||
|
||||
for (i=0; i< VIA_NUM_BLIT_ENGINES; ++i) {
|
||||
blitq = dev_priv->blit_queues + i;
|
||||
blitq->dev = dev;
|
||||
blitq->cur_blit_handle = 0;
|
||||
blitq->done_blit_handle = 0;
|
||||
blitq->head = 0;
|
||||
blitq->cur = 0;
|
||||
blitq->serviced = 0;
|
||||
blitq->num_free = VIA_NUM_BLIT_SLOTS;
|
||||
blitq->num_outstanding = 0;
|
||||
blitq->is_active = 0;
|
||||
blitq->aborting = 0;
|
||||
blitq->blit_lock = SPIN_LOCK_UNLOCKED;
|
||||
for (j=0; j<VIA_NUM_BLIT_SLOTS; ++j) {
|
||||
DRM_INIT_WAITQUEUE(blitq->blit_queue + j);
|
||||
}
|
||||
DRM_INIT_WAITQUEUE(&blitq->busy_queue);
|
||||
INIT_WORK(&blitq->wq, via_dmablit_workqueue, blitq);
|
||||
init_timer(&blitq->poll_timer);
|
||||
blitq->poll_timer.function = &via_dmablit_timer;
|
||||
blitq->poll_timer.data = (unsigned long) blitq;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Build all info and do all mappings required for a blit.
|
||||
*/
|
||||
|
||||
|
||||
static int
|
||||
via_build_sg_info(drm_device_t *dev, drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
|
||||
{
|
||||
int draw = xfer->to_fb;
|
||||
int ret = 0;
|
||||
|
||||
vsg->direction = (draw) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
|
||||
vsg->bounce_buffer = 0;
|
||||
|
||||
vsg->state = dr_via_sg_init;
|
||||
|
||||
if (xfer->num_lines <= 0 || xfer->line_length <= 0) {
|
||||
DRM_ERROR("Zero size bitblt.\n");
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
|
||||
/*
|
||||
* Below check is a driver limitation, not a hardware one. We
|
||||
* don't want to lock unused pages, and don't want to incoporate the
|
||||
* extra logic of avoiding them. Make sure there are no.
|
||||
* (Not a big limitation anyway.)
|
||||
*/
|
||||
|
||||
if (((xfer->mem_stride - xfer->line_length) >= PAGE_SIZE) ||
|
||||
(xfer->mem_stride > 2048*4)) {
|
||||
DRM_ERROR("Too large system memory stride. Stride: %d, "
|
||||
"Length: %d\n", xfer->mem_stride, xfer->line_length);
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
|
||||
if (xfer->num_lines > 2048) {
|
||||
DRM_ERROR("Too many PCI DMA bitblt lines.\n");
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
|
||||
/*
|
||||
* we allow a negative fb stride to allow flipping of images in
|
||||
* transfer.
|
||||
*/
|
||||
|
||||
if (xfer->mem_stride < xfer->line_length ||
|
||||
abs(xfer->fb_stride) < xfer->line_length) {
|
||||
DRM_ERROR("Invalid frame-buffer / memory stride.\n");
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
|
||||
/*
|
||||
* A hardware bug seems to be worked around if system memory addresses start on
|
||||
* 16 byte boundaries. This seems a bit restrictive however. VIA is contacted
|
||||
* about this. Meanwhile, impose the following restrictions:
|
||||
*/
|
||||
|
||||
#ifdef VIA_BUGFREE
|
||||
if ((((unsigned long)xfer->mem_addr & 3) != ((unsigned long)xfer->fb_addr & 3)) ||
|
||||
((xfer->mem_stride & 3) != (xfer->fb_stride & 3))) {
|
||||
DRM_ERROR("Invalid DRM bitblt alignment.\n");
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
#else
|
||||
if ((((unsigned long)xfer->mem_addr & 15) ||
|
||||
((unsigned long)xfer->fb_addr & 3)) || (xfer->mem_stride & 15) ||
|
||||
(xfer->fb_stride & 3)) {
|
||||
DRM_ERROR("Invalid DRM bitblt alignment.\n");
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
#endif
|
||||
|
||||
if (0 != (ret = via_lock_all_dma_pages(vsg, xfer))) {
|
||||
DRM_ERROR("Could not lock DMA pages.\n");
|
||||
via_free_sg_info(dev->pdev, vsg);
|
||||
return ret;
|
||||
}
|
||||
|
||||
via_map_blit_for_device(dev->pdev, xfer, vsg, 0);
|
||||
if (0 != (ret = via_alloc_desc_pages(vsg))) {
|
||||
DRM_ERROR("Could not allocate DMA descriptor pages.\n");
|
||||
via_free_sg_info(dev->pdev, vsg);
|
||||
return ret;
|
||||
}
|
||||
via_map_blit_for_device(dev->pdev, xfer, vsg, 1);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Reserve one free slot in the blit queue. Will wait for one second for one
|
||||
* to become available. Otherwise -EBUSY is returned.
|
||||
*/
|
||||
|
||||
static int
|
||||
via_dmablit_grab_slot(drm_via_blitq_t *blitq, int engine)
|
||||
{
|
||||
int ret=0;
|
||||
unsigned long irqsave;
|
||||
|
||||
DRM_DEBUG("Num free is %d\n", blitq->num_free);
|
||||
spin_lock_irqsave(&blitq->blit_lock, irqsave);
|
||||
while(blitq->num_free == 0) {
|
||||
spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
|
||||
|
||||
DRM_WAIT_ON(ret, blitq->busy_queue, DRM_HZ, blitq->num_free > 0);
|
||||
if (ret) {
|
||||
return (DRM_ERR(EINTR) == ret) ? DRM_ERR(EAGAIN) : ret;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&blitq->blit_lock, irqsave);
|
||||
}
|
||||
|
||||
blitq->num_free--;
|
||||
spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Hand back a free slot if we changed our mind.
|
||||
*/
|
||||
|
||||
static void
|
||||
via_dmablit_release_slot(drm_via_blitq_t *blitq)
|
||||
{
|
||||
unsigned long irqsave;
|
||||
|
||||
spin_lock_irqsave(&blitq->blit_lock, irqsave);
|
||||
blitq->num_free++;
|
||||
spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
|
||||
DRM_WAKEUP( &blitq->busy_queue );
|
||||
}
|
||||
|
||||
/*
|
||||
* Grab a free slot. Build blit info and queue a blit.
|
||||
*/
|
||||
|
||||
|
||||
static int
|
||||
via_dmablit(drm_device_t *dev, drm_via_dmablit_t *xfer)
|
||||
{
|
||||
drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
|
||||
drm_via_sg_info_t *vsg;
|
||||
drm_via_blitq_t *blitq;
|
||||
int ret;
|
||||
int engine;
|
||||
unsigned long irqsave;
|
||||
|
||||
if (dev_priv == NULL) {
|
||||
DRM_ERROR("Called without initialization.\n");
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
|
||||
engine = (xfer->to_fb) ? 0 : 1;
|
||||
blitq = dev_priv->blit_queues + engine;
|
||||
if (0 != (ret = via_dmablit_grab_slot(blitq, engine))) {
|
||||
return ret;
|
||||
}
|
||||
if (NULL == (vsg = kmalloc(sizeof(*vsg), GFP_KERNEL))) {
|
||||
via_dmablit_release_slot(blitq);
|
||||
return DRM_ERR(ENOMEM);
|
||||
}
|
||||
if (0 != (ret = via_build_sg_info(dev, vsg, xfer))) {
|
||||
via_dmablit_release_slot(blitq);
|
||||
kfree(vsg);
|
||||
return ret;
|
||||
}
|
||||
spin_lock_irqsave(&blitq->blit_lock, irqsave);
|
||||
|
||||
blitq->blits[blitq->head++] = vsg;
|
||||
if (blitq->head >= VIA_NUM_BLIT_SLOTS)
|
||||
blitq->head = 0;
|
||||
blitq->num_outstanding++;
|
||||
xfer->sync.sync_handle = ++blitq->cur_blit_handle;
|
||||
|
||||
spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
|
||||
xfer->sync.engine = engine;
|
||||
|
||||
via_dmablit_handler(dev, engine, 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Sync on a previously submitted blit. Note that the X server use signals extensively, and
|
||||
* that there is a very big proability that this IOCTL will be interrupted by a signal. In that
|
||||
* case it returns with -EAGAIN for the signal to be delivered.
|
||||
* The caller should then reissue the IOCTL. This is similar to what is being done for drmGetLock().
|
||||
*/
|
||||
|
||||
int
|
||||
via_dma_blit_sync( DRM_IOCTL_ARGS )
|
||||
{
|
||||
drm_via_blitsync_t sync;
|
||||
int err;
|
||||
DRM_DEVICE;
|
||||
|
||||
DRM_COPY_FROM_USER_IOCTL(sync, (drm_via_blitsync_t *)data, sizeof(sync));
|
||||
|
||||
if (sync.engine >= VIA_NUM_BLIT_ENGINES)
|
||||
return DRM_ERR(EINVAL);
|
||||
|
||||
err = via_dmablit_sync(dev, sync.sync_handle, sync.engine);
|
||||
|
||||
if (DRM_ERR(EINTR) == err)
|
||||
err = DRM_ERR(EAGAIN);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Queue a blit and hand back a handle to be used for sync. This IOCTL may be interrupted by a signal
|
||||
* while waiting for a free slot in the blit queue. In that case it returns with -EAGAIN and should
|
||||
* be reissued. See the above IOCTL code.
|
||||
*/
|
||||
|
||||
int
|
||||
via_dma_blit( DRM_IOCTL_ARGS )
|
||||
{
|
||||
drm_via_dmablit_t xfer;
|
||||
int err;
|
||||
DRM_DEVICE;
|
||||
|
||||
DRM_COPY_FROM_USER_IOCTL(xfer, (drm_via_dmablit_t __user *)data, sizeof(xfer));
|
||||
|
||||
err = via_dmablit(dev, &xfer);
|
||||
|
||||
DRM_COPY_TO_USER_IOCTL((void __user *)data, xfer, sizeof(xfer));
|
||||
|
||||
return err;
|
||||
}
|
|
@ -0,0 +1,140 @@
|
|||
/* via_dmablit.h -- PCI DMA BitBlt support for the VIA Unichrome/Pro
|
||||
*
|
||||
* Copyright 2005 Thomas Hellstrom.
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sub license,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors:
|
||||
* Thomas Hellstrom.
|
||||
* Register info from Digeo Inc.
|
||||
*/
|
||||
|
||||
#ifndef _VIA_DMABLIT_H
|
||||
#define _VIA_DMABLIT_H
|
||||
|
||||
#include <linux/dma-mapping.h>
|
||||
|
||||
#define VIA_NUM_BLIT_ENGINES 2
|
||||
#define VIA_NUM_BLIT_SLOTS 8
|
||||
|
||||
struct _drm_via_descriptor;
|
||||
|
||||
typedef struct _drm_via_sg_info {
|
||||
struct page **pages;
|
||||
unsigned long num_pages;
|
||||
struct _drm_via_descriptor **desc_pages;
|
||||
int num_desc_pages;
|
||||
int num_desc;
|
||||
enum dma_data_direction direction;
|
||||
unsigned char *bounce_buffer;
|
||||
dma_addr_t chain_start;
|
||||
uint32_t free_on_sequence;
|
||||
unsigned int descriptors_per_page;
|
||||
int aborted;
|
||||
enum {
|
||||
dr_via_device_mapped,
|
||||
dr_via_desc_pages_alloc,
|
||||
dr_via_pages_locked,
|
||||
dr_via_pages_alloc,
|
||||
dr_via_sg_init
|
||||
} state;
|
||||
} drm_via_sg_info_t;
|
||||
|
||||
typedef struct _drm_via_blitq {
|
||||
drm_device_t *dev;
|
||||
uint32_t cur_blit_handle;
|
||||
uint32_t done_blit_handle;
|
||||
unsigned serviced;
|
||||
unsigned head;
|
||||
unsigned cur;
|
||||
unsigned num_free;
|
||||
unsigned num_outstanding;
|
||||
unsigned long end;
|
||||
int aborting;
|
||||
int is_active;
|
||||
drm_via_sg_info_t *blits[VIA_NUM_BLIT_SLOTS];
|
||||
spinlock_t blit_lock;
|
||||
wait_queue_head_t blit_queue[VIA_NUM_BLIT_SLOTS];
|
||||
wait_queue_head_t busy_queue;
|
||||
struct work_struct wq;
|
||||
struct timer_list poll_timer;
|
||||
} drm_via_blitq_t;
|
||||
|
||||
|
||||
/*
|
||||
* PCI DMA Registers
|
||||
* Channels 2 & 3 don't seem to be implemented in hardware.
|
||||
*/
|
||||
|
||||
#define VIA_PCI_DMA_MAR0 0xE40 /* Memory Address Register of Channel 0 */
|
||||
#define VIA_PCI_DMA_DAR0 0xE44 /* Device Address Register of Channel 0 */
|
||||
#define VIA_PCI_DMA_BCR0 0xE48 /* Byte Count Register of Channel 0 */
|
||||
#define VIA_PCI_DMA_DPR0 0xE4C /* Descriptor Pointer Register of Channel 0 */
|
||||
|
||||
#define VIA_PCI_DMA_MAR1 0xE50 /* Memory Address Register of Channel 1 */
|
||||
#define VIA_PCI_DMA_DAR1 0xE54 /* Device Address Register of Channel 1 */
|
||||
#define VIA_PCI_DMA_BCR1 0xE58 /* Byte Count Register of Channel 1 */
|
||||
#define VIA_PCI_DMA_DPR1 0xE5C /* Descriptor Pointer Register of Channel 1 */
|
||||
|
||||
#define VIA_PCI_DMA_MAR2 0xE60 /* Memory Address Register of Channel 2 */
|
||||
#define VIA_PCI_DMA_DAR2 0xE64 /* Device Address Register of Channel 2 */
|
||||
#define VIA_PCI_DMA_BCR2 0xE68 /* Byte Count Register of Channel 2 */
|
||||
#define VIA_PCI_DMA_DPR2 0xE6C /* Descriptor Pointer Register of Channel 2 */
|
||||
|
||||
#define VIA_PCI_DMA_MAR3 0xE70 /* Memory Address Register of Channel 3 */
|
||||
#define VIA_PCI_DMA_DAR3 0xE74 /* Device Address Register of Channel 3 */
|
||||
#define VIA_PCI_DMA_BCR3 0xE78 /* Byte Count Register of Channel 3 */
|
||||
#define VIA_PCI_DMA_DPR3 0xE7C /* Descriptor Pointer Register of Channel 3 */
|
||||
|
||||
#define VIA_PCI_DMA_MR0 0xE80 /* Mode Register of Channel 0 */
|
||||
#define VIA_PCI_DMA_MR1 0xE84 /* Mode Register of Channel 1 */
|
||||
#define VIA_PCI_DMA_MR2 0xE88 /* Mode Register of Channel 2 */
|
||||
#define VIA_PCI_DMA_MR3 0xE8C /* Mode Register of Channel 3 */
|
||||
|
||||
#define VIA_PCI_DMA_CSR0 0xE90 /* Command/Status Register of Channel 0 */
|
||||
#define VIA_PCI_DMA_CSR1 0xE94 /* Command/Status Register of Channel 1 */
|
||||
#define VIA_PCI_DMA_CSR2 0xE98 /* Command/Status Register of Channel 2 */
|
||||
#define VIA_PCI_DMA_CSR3 0xE9C /* Command/Status Register of Channel 3 */
|
||||
|
||||
#define VIA_PCI_DMA_PTR 0xEA0 /* Priority Type Register */
|
||||
|
||||
/* Define for DMA engine */
|
||||
/* DPR */
|
||||
#define VIA_DMA_DPR_EC (1<<1) /* end of chain */
|
||||
#define VIA_DMA_DPR_DDIE (1<<2) /* descriptor done interrupt enable */
|
||||
#define VIA_DMA_DPR_DT (1<<3) /* direction of transfer (RO) */
|
||||
|
||||
/* MR */
|
||||
#define VIA_DMA_MR_CM (1<<0) /* chaining mode */
|
||||
#define VIA_DMA_MR_TDIE (1<<1) /* transfer done interrupt enable */
|
||||
#define VIA_DMA_MR_HENDMACMD (1<<7) /* ? */
|
||||
|
||||
/* CSR */
|
||||
#define VIA_DMA_CSR_DE (1<<0) /* DMA enable */
|
||||
#define VIA_DMA_CSR_TS (1<<1) /* transfer start */
|
||||
#define VIA_DMA_CSR_TA (1<<2) /* transfer abort */
|
||||
#define VIA_DMA_CSR_TD (1<<3) /* transfer done */
|
||||
#define VIA_DMA_CSR_DD (1<<4) /* descriptor done */
|
||||
#define VIA_DMA_DPR_EC (1<<1) /* end of chain */
|
||||
|
||||
|
||||
|
||||
#endif
|
|
@ -75,6 +75,8 @@
|
|||
#define DRM_VIA_CMDBUF_SIZE 0x0b
|
||||
#define NOT_USED
|
||||
#define DRM_VIA_WAIT_IRQ 0x0d
|
||||
#define DRM_VIA_DMA_BLIT 0x0e
|
||||
#define DRM_VIA_BLIT_SYNC 0x0f
|
||||
|
||||
#define DRM_IOCTL_VIA_ALLOCMEM DRM_IOWR(DRM_COMMAND_BASE + DRM_VIA_ALLOCMEM, drm_via_mem_t)
|
||||
#define DRM_IOCTL_VIA_FREEMEM DRM_IOW( DRM_COMMAND_BASE + DRM_VIA_FREEMEM, drm_via_mem_t)
|
||||
|
@ -89,6 +91,8 @@
|
|||
#define DRM_IOCTL_VIA_CMDBUF_SIZE DRM_IOWR( DRM_COMMAND_BASE + DRM_VIA_CMDBUF_SIZE, \
|
||||
drm_via_cmdbuf_size_t)
|
||||
#define DRM_IOCTL_VIA_WAIT_IRQ DRM_IOWR( DRM_COMMAND_BASE + DRM_VIA_WAIT_IRQ, drm_via_irqwait_t)
|
||||
#define DRM_IOCTL_VIA_DMA_BLIT DRM_IOW(DRM_COMMAND_BASE + DRM_VIA_DMA_BLIT, drm_via_dmablit_t)
|
||||
#define DRM_IOCTL_VIA_BLIT_SYNC DRM_IOW(DRM_COMMAND_BASE + DRM_VIA_BLIT_SYNC, drm_via_blitsync_t)
|
||||
|
||||
/* Indices into buf.Setup where various bits of state are mirrored per
|
||||
* context and per buffer. These can be fired at the card as a unit,
|
||||
|
@ -103,8 +107,12 @@
|
|||
#define VIA_BACK 0x2
|
||||
#define VIA_DEPTH 0x4
|
||||
#define VIA_STENCIL 0x8
|
||||
#define VIDEO 0
|
||||
#define AGP 1
|
||||
#define VIA_MEM_VIDEO 0 /* matches drm constant */
|
||||
#define VIA_MEM_AGP 1 /* matches drm constant */
|
||||
#define VIA_MEM_SYSTEM 2
|
||||
#define VIA_MEM_MIXED 3
|
||||
#define VIA_MEM_UNKNOWN 4
|
||||
|
||||
typedef struct {
|
||||
uint32_t offset;
|
||||
uint32_t size;
|
||||
|
@ -192,6 +200,9 @@ typedef struct _drm_via_sarea {
|
|||
unsigned int XvMCSubPicOn[VIA_NR_XVMC_PORTS];
|
||||
unsigned int XvMCCtxNoGrabbed; /* Last context to hold decoder */
|
||||
|
||||
/* Used by the 3d driver only at this point, for pageflipping:
|
||||
*/
|
||||
unsigned int pfCurrentOffset;
|
||||
} drm_via_sarea_t;
|
||||
|
||||
typedef struct _drm_via_cmdbuf_size {
|
||||
|
@ -212,6 +223,16 @@ typedef enum {
|
|||
|
||||
#define VIA_IRQ_FLAGS_MASK 0xF0000000
|
||||
|
||||
enum drm_via_irqs {
|
||||
drm_via_irq_hqv0 = 0,
|
||||
drm_via_irq_hqv1,
|
||||
drm_via_irq_dma0_dd,
|
||||
drm_via_irq_dma0_td,
|
||||
drm_via_irq_dma1_dd,
|
||||
drm_via_irq_dma1_td,
|
||||
drm_via_irq_num
|
||||
};
|
||||
|
||||
struct drm_via_wait_irq_request {
|
||||
unsigned irq;
|
||||
via_irq_seq_type_t type;
|
||||
|
@ -224,20 +245,25 @@ typedef union drm_via_irqwait {
|
|||
struct drm_wait_vblank_reply reply;
|
||||
} drm_via_irqwait_t;
|
||||
|
||||
#ifdef __KERNEL__
|
||||
typedef struct drm_via_blitsync {
|
||||
uint32_t sync_handle;
|
||||
unsigned engine;
|
||||
} drm_via_blitsync_t;
|
||||
|
||||
int via_fb_init(DRM_IOCTL_ARGS);
|
||||
int via_mem_alloc(DRM_IOCTL_ARGS);
|
||||
int via_mem_free(DRM_IOCTL_ARGS);
|
||||
int via_agp_init(DRM_IOCTL_ARGS);
|
||||
int via_map_init(DRM_IOCTL_ARGS);
|
||||
int via_decoder_futex(DRM_IOCTL_ARGS);
|
||||
int via_dma_init(DRM_IOCTL_ARGS);
|
||||
int via_cmdbuffer(DRM_IOCTL_ARGS);
|
||||
int via_flush_ioctl(DRM_IOCTL_ARGS);
|
||||
int via_pci_cmdbuffer(DRM_IOCTL_ARGS);
|
||||
int via_cmdbuf_size(DRM_IOCTL_ARGS);
|
||||
int via_wait_irq(DRM_IOCTL_ARGS);
|
||||
typedef struct drm_via_dmablit {
|
||||
uint32_t num_lines;
|
||||
uint32_t line_length;
|
||||
|
||||
uint32_t fb_addr;
|
||||
uint32_t fb_stride;
|
||||
|
||||
unsigned char *mem_addr;
|
||||
uint32_t mem_stride;
|
||||
|
||||
int bounce_buffer;
|
||||
int to_fb;
|
||||
|
||||
drm_via_blitsync_t sync;
|
||||
} drm_via_dmablit_t;
|
||||
|
||||
#endif
|
||||
#endif /* _VIA_DRM_H_ */
|
||||
|
|
|
@ -29,54 +29,21 @@
|
|||
|
||||
#include "drm_pciids.h"
|
||||
|
||||
static int postinit(struct drm_device *dev, unsigned long flags)
|
||||
static int dri_library_name(struct drm_device *dev, char *buf)
|
||||
{
|
||||
DRM_INFO("Initialized %s %d.%d.%d %s on minor %d: %s\n",
|
||||
DRIVER_NAME,
|
||||
DRIVER_MAJOR,
|
||||
DRIVER_MINOR,
|
||||
DRIVER_PATCHLEVEL,
|
||||
DRIVER_DATE, dev->primary.minor, pci_pretty_name(dev->pdev)
|
||||
);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int version(drm_version_t * version)
|
||||
{
|
||||
int len;
|
||||
|
||||
version->version_major = DRIVER_MAJOR;
|
||||
version->version_minor = DRIVER_MINOR;
|
||||
version->version_patchlevel = DRIVER_PATCHLEVEL;
|
||||
DRM_COPY(version->name, DRIVER_NAME);
|
||||
DRM_COPY(version->date, DRIVER_DATE);
|
||||
DRM_COPY(version->desc, DRIVER_DESC);
|
||||
return 0;
|
||||
return snprintf(buf, PAGE_SIZE, "unichrome");
|
||||
}
|
||||
|
||||
static struct pci_device_id pciidlist[] = {
|
||||
viadrv_PCI_IDS
|
||||
};
|
||||
|
||||
static drm_ioctl_desc_t ioctls[] = {
|
||||
[DRM_IOCTL_NR(DRM_VIA_ALLOCMEM)] = {via_mem_alloc, 1, 0},
|
||||
[DRM_IOCTL_NR(DRM_VIA_FREEMEM)] = {via_mem_free, 1, 0},
|
||||
[DRM_IOCTL_NR(DRM_VIA_AGP_INIT)] = {via_agp_init, 1, 0},
|
||||
[DRM_IOCTL_NR(DRM_VIA_FB_INIT)] = {via_fb_init, 1, 0},
|
||||
[DRM_IOCTL_NR(DRM_VIA_MAP_INIT)] = {via_map_init, 1, 0},
|
||||
[DRM_IOCTL_NR(DRM_VIA_DEC_FUTEX)] = {via_decoder_futex, 1, 0},
|
||||
[DRM_IOCTL_NR(DRM_VIA_DMA_INIT)] = {via_dma_init, 1, 0},
|
||||
[DRM_IOCTL_NR(DRM_VIA_CMDBUFFER)] = {via_cmdbuffer, 1, 0},
|
||||
[DRM_IOCTL_NR(DRM_VIA_FLUSH)] = {via_flush_ioctl, 1, 0},
|
||||
[DRM_IOCTL_NR(DRM_VIA_PCICMD)] = {via_pci_cmdbuffer, 1, 0},
|
||||
[DRM_IOCTL_NR(DRM_VIA_CMDBUF_SIZE)] = {via_cmdbuf_size, 1, 0},
|
||||
[DRM_IOCTL_NR(DRM_VIA_WAIT_IRQ)] = {via_wait_irq, 1, 0}
|
||||
};
|
||||
|
||||
static struct drm_driver driver = {
|
||||
.driver_features =
|
||||
DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_HAVE_IRQ |
|
||||
DRIVER_IRQ_SHARED | DRIVER_IRQ_VBL,
|
||||
.load = via_driver_load,
|
||||
.unload = via_driver_unload,
|
||||
.context_ctor = via_init_context,
|
||||
.context_dtor = via_final_context,
|
||||
.vblank_wait = via_driver_vblank_wait,
|
||||
|
@ -85,13 +52,11 @@ static struct drm_driver driver = {
|
|||
.irq_uninstall = via_driver_irq_uninstall,
|
||||
.irq_handler = via_driver_irq_handler,
|
||||
.dma_quiescent = via_driver_dma_quiescent,
|
||||
.dri_library_name = dri_library_name,
|
||||
.reclaim_buffers = drm_core_reclaim_buffers,
|
||||
.get_map_ofs = drm_core_get_map_ofs,
|
||||
.get_reg_ofs = drm_core_get_reg_ofs,
|
||||
.postinit = postinit,
|
||||
.version = version,
|
||||
.ioctls = ioctls,
|
||||
.num_ioctls = DRM_ARRAY_SIZE(ioctls),
|
||||
.ioctls = via_ioctls,
|
||||
.fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = drm_open,
|
||||
|
@ -100,15 +65,23 @@ static struct drm_driver driver = {
|
|||
.mmap = drm_mmap,
|
||||
.poll = drm_poll,
|
||||
.fasync = drm_fasync,
|
||||
},
|
||||
},
|
||||
.pci_driver = {
|
||||
.name = DRIVER_NAME,
|
||||
.id_table = pciidlist,
|
||||
}
|
||||
.name = DRIVER_NAME,
|
||||
.id_table = pciidlist,
|
||||
},
|
||||
|
||||
.name = DRIVER_NAME,
|
||||
.desc = DRIVER_DESC,
|
||||
.date = DRIVER_DATE,
|
||||
.major = DRIVER_MAJOR,
|
||||
.minor = DRIVER_MINOR,
|
||||
.patchlevel = DRIVER_PATCHLEVEL,
|
||||
};
|
||||
|
||||
static int __init via_init(void)
|
||||
{
|
||||
driver.num_ioctls = via_max_ioctl;
|
||||
via_init_command_verifier();
|
||||
return drm_init(&driver);
|
||||
}
|
||||
|
|
|
@ -24,24 +24,26 @@
|
|||
#ifndef _VIA_DRV_H_
|
||||
#define _VIA_DRV_H_
|
||||
|
||||
#define DRIVER_AUTHOR "VIA"
|
||||
#define DRIVER_AUTHOR "Various"
|
||||
|
||||
#define DRIVER_NAME "via"
|
||||
#define DRIVER_DESC "VIA Unichrome / Pro"
|
||||
#define DRIVER_DATE "20050523"
|
||||
#define DRIVER_DATE "20051116"
|
||||
|
||||
#define DRIVER_MAJOR 2
|
||||
#define DRIVER_MINOR 6
|
||||
#define DRIVER_PATCHLEVEL 3
|
||||
#define DRIVER_MINOR 7
|
||||
#define DRIVER_PATCHLEVEL 4
|
||||
|
||||
#include "via_verifier.h"
|
||||
|
||||
#include "via_dmablit.h"
|
||||
|
||||
#define VIA_PCI_BUF_SIZE 60000
|
||||
#define VIA_FIRE_BUF_SIZE 1024
|
||||
#define VIA_NUM_IRQS 2
|
||||
#define VIA_NUM_IRQS 4
|
||||
|
||||
typedef struct drm_via_ring_buffer {
|
||||
drm_map_t map;
|
||||
drm_local_map_t map;
|
||||
char *virtual_start;
|
||||
} drm_via_ring_buffer_t;
|
||||
|
||||
|
@ -56,9 +58,9 @@ typedef struct drm_via_irq {
|
|||
|
||||
typedef struct drm_via_private {
|
||||
drm_via_sarea_t *sarea_priv;
|
||||
drm_map_t *sarea;
|
||||
drm_map_t *fb;
|
||||
drm_map_t *mmio;
|
||||
drm_local_map_t *sarea;
|
||||
drm_local_map_t *fb;
|
||||
drm_local_map_t *mmio;
|
||||
unsigned long agpAddr;
|
||||
wait_queue_head_t decoder_queue[VIA_NR_XVMC_LOCKS];
|
||||
char *dma_ptr;
|
||||
|
@ -82,8 +84,15 @@ typedef struct drm_via_private {
|
|||
maskarray_t *irq_masks;
|
||||
uint32_t irq_enable_mask;
|
||||
uint32_t irq_pending_mask;
|
||||
int *irq_map;
|
||||
drm_via_blitq_t blit_queues[VIA_NUM_BLIT_ENGINES];
|
||||
} drm_via_private_t;
|
||||
|
||||
enum via_family {
|
||||
VIA_OTHER = 0,
|
||||
VIA_PRO_GROUP_A,
|
||||
};
|
||||
|
||||
/* VIA MMIO register access */
|
||||
#define VIA_BASE ((dev_priv->mmio))
|
||||
|
||||
|
@ -92,12 +101,31 @@ typedef struct drm_via_private {
|
|||
#define VIA_READ8(reg) DRM_READ8(VIA_BASE, reg)
|
||||
#define VIA_WRITE8(reg,val) DRM_WRITE8(VIA_BASE, reg, val)
|
||||
|
||||
extern drm_ioctl_desc_t via_ioctls[];
|
||||
extern int via_max_ioctl;
|
||||
|
||||
extern int via_fb_init(DRM_IOCTL_ARGS);
|
||||
extern int via_mem_alloc(DRM_IOCTL_ARGS);
|
||||
extern int via_mem_free(DRM_IOCTL_ARGS);
|
||||
extern int via_agp_init(DRM_IOCTL_ARGS);
|
||||
extern int via_map_init(DRM_IOCTL_ARGS);
|
||||
extern int via_decoder_futex(DRM_IOCTL_ARGS);
|
||||
extern int via_dma_init(DRM_IOCTL_ARGS);
|
||||
extern int via_cmdbuffer(DRM_IOCTL_ARGS);
|
||||
extern int via_flush_ioctl(DRM_IOCTL_ARGS);
|
||||
extern int via_pci_cmdbuffer(DRM_IOCTL_ARGS);
|
||||
extern int via_cmdbuf_size(DRM_IOCTL_ARGS);
|
||||
extern int via_wait_irq(DRM_IOCTL_ARGS);
|
||||
extern int via_dma_blit_sync( DRM_IOCTL_ARGS );
|
||||
extern int via_dma_blit( DRM_IOCTL_ARGS );
|
||||
|
||||
extern int via_driver_load(drm_device_t *dev, unsigned long chipset);
|
||||
extern int via_driver_unload(drm_device_t *dev);
|
||||
|
||||
extern int via_init_context(drm_device_t * dev, int context);
|
||||
extern int via_final_context(drm_device_t * dev, int context);
|
||||
|
||||
extern int via_do_cleanup_map(drm_device_t * dev);
|
||||
extern int via_map_init(struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg);
|
||||
extern int via_driver_vblank_wait(drm_device_t * dev, unsigned int *sequence);
|
||||
|
||||
extern irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS);
|
||||
|
@ -111,8 +139,10 @@ extern int via_driver_dma_quiescent(drm_device_t * dev);
|
|||
extern void via_init_futex(drm_via_private_t * dev_priv);
|
||||
extern void via_cleanup_futex(drm_via_private_t * dev_priv);
|
||||
extern void via_release_futex(drm_via_private_t * dev_priv, int context);
|
||||
extern int via_driver_irq_wait(drm_device_t * dev, unsigned int irq,
|
||||
int force_sequence, unsigned int *sequence);
|
||||
|
||||
extern int via_parse_command_stream(drm_device_t * dev, const uint32_t * buf,
|
||||
unsigned int size);
|
||||
extern void via_dmablit_handler(drm_device_t *dev, int engine, int from_irq);
|
||||
extern void via_init_dmablit(drm_device_t *dev);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -22,14 +22,7 @@
|
|||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
#include <linux/module.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/poll.h>
|
||||
#include <linux/pci.h>
|
||||
#include <asm/io.h>
|
||||
#include "drmP.h"
|
||||
|
||||
#include "via_ds.h"
|
||||
extern unsigned int VIA_DEBUG;
|
||||
|
|
|
@ -50,6 +50,15 @@
|
|||
#define VIA_IRQ_HQV1_ENABLE (1 << 25)
|
||||
#define VIA_IRQ_HQV0_PENDING (1 << 9)
|
||||
#define VIA_IRQ_HQV1_PENDING (1 << 10)
|
||||
#define VIA_IRQ_DMA0_DD_ENABLE (1 << 20)
|
||||
#define VIA_IRQ_DMA0_TD_ENABLE (1 << 21)
|
||||
#define VIA_IRQ_DMA1_DD_ENABLE (1 << 22)
|
||||
#define VIA_IRQ_DMA1_TD_ENABLE (1 << 23)
|
||||
#define VIA_IRQ_DMA0_DD_PENDING (1 << 4)
|
||||
#define VIA_IRQ_DMA0_TD_PENDING (1 << 5)
|
||||
#define VIA_IRQ_DMA1_DD_PENDING (1 << 6)
|
||||
#define VIA_IRQ_DMA1_TD_PENDING (1 << 7)
|
||||
|
||||
|
||||
/*
|
||||
* Device-specific IRQs go here. This type might need to be extended with
|
||||
|
@ -61,13 +70,24 @@ static maskarray_t via_pro_group_a_irqs[] = {
|
|||
{VIA_IRQ_HQV0_ENABLE, VIA_IRQ_HQV0_PENDING, 0x000003D0, 0x00008010,
|
||||
0x00000000},
|
||||
{VIA_IRQ_HQV1_ENABLE, VIA_IRQ_HQV1_PENDING, 0x000013D0, 0x00008010,
|
||||
0x00000000}
|
||||
0x00000000},
|
||||
{VIA_IRQ_DMA0_TD_ENABLE, VIA_IRQ_DMA0_TD_PENDING, VIA_PCI_DMA_CSR0,
|
||||
VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008},
|
||||
{VIA_IRQ_DMA1_TD_ENABLE, VIA_IRQ_DMA1_TD_PENDING, VIA_PCI_DMA_CSR1,
|
||||
VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008},
|
||||
};
|
||||
static int via_num_pro_group_a =
|
||||
sizeof(via_pro_group_a_irqs) / sizeof(maskarray_t);
|
||||
static int via_irqmap_pro_group_a[] = {0, 1, -1, 2, -1, 3};
|
||||
|
||||
static maskarray_t via_unichrome_irqs[] = { };
|
||||
static maskarray_t via_unichrome_irqs[] = {
|
||||
{VIA_IRQ_DMA0_TD_ENABLE, VIA_IRQ_DMA0_TD_PENDING, VIA_PCI_DMA_CSR0,
|
||||
VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008},
|
||||
{VIA_IRQ_DMA1_TD_ENABLE, VIA_IRQ_DMA1_TD_PENDING, VIA_PCI_DMA_CSR1,
|
||||
VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008}
|
||||
};
|
||||
static int via_num_unichrome = sizeof(via_unichrome_irqs) / sizeof(maskarray_t);
|
||||
static int via_irqmap_unichrome[] = {-1, -1, -1, 0, -1, 1};
|
||||
|
||||
static unsigned time_diff(struct timeval *now, struct timeval *then)
|
||||
{
|
||||
|
@ -113,6 +133,11 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
|
|||
atomic_inc(&cur_irq->irq_received);
|
||||
DRM_WAKEUP(&cur_irq->irq_queue);
|
||||
handled = 1;
|
||||
if (dev_priv->irq_map[drm_via_irq_dma0_td] == i) {
|
||||
via_dmablit_handler(dev, 0, 1);
|
||||
} else if (dev_priv->irq_map[drm_via_irq_dma1_td] == i) {
|
||||
via_dmablit_handler(dev, 1, 1);
|
||||
}
|
||||
}
|
||||
cur_irq++;
|
||||
}
|
||||
|
@ -165,7 +190,7 @@ int via_driver_vblank_wait(drm_device_t * dev, unsigned int *sequence)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int
|
||||
int
|
||||
via_driver_irq_wait(drm_device_t * dev, unsigned int irq, int force_sequence,
|
||||
unsigned int *sequence)
|
||||
{
|
||||
|
@ -174,6 +199,7 @@ via_driver_irq_wait(drm_device_t * dev, unsigned int irq, int force_sequence,
|
|||
drm_via_irq_t *cur_irq = dev_priv->via_irqs;
|
||||
int ret = 0;
|
||||
maskarray_t *masks = dev_priv->irq_masks;
|
||||
int real_irq;
|
||||
|
||||
DRM_DEBUG("%s\n", __FUNCTION__);
|
||||
|
||||
|
@ -182,15 +208,23 @@ via_driver_irq_wait(drm_device_t * dev, unsigned int irq, int force_sequence,
|
|||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
|
||||
if (irq >= dev_priv->num_irqs) {
|
||||
if (irq >= drm_via_irq_num) {
|
||||
DRM_ERROR("%s Trying to wait on unknown irq %d\n", __FUNCTION__,
|
||||
irq);
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
|
||||
cur_irq += irq;
|
||||
real_irq = dev_priv->irq_map[irq];
|
||||
|
||||
if (masks[irq][2] && !force_sequence) {
|
||||
if (real_irq < 0) {
|
||||
DRM_ERROR("%s Video IRQ %d not available on this hardware.\n",
|
||||
__FUNCTION__, irq);
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
|
||||
cur_irq += real_irq;
|
||||
|
||||
if (masks[real_irq][2] && !force_sequence) {
|
||||
DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
|
||||
((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
|
||||
masks[irq][4]));
|
||||
|
@ -226,6 +260,8 @@ void via_driver_irq_preinstall(drm_device_t * dev)
|
|||
via_pro_group_a_irqs : via_unichrome_irqs;
|
||||
dev_priv->num_irqs = (dev_priv->pro_group_a) ?
|
||||
via_num_pro_group_a : via_num_unichrome;
|
||||
dev_priv->irq_map = (dev_priv->pro_group_a) ?
|
||||
via_irqmap_pro_group_a : via_irqmap_unichrome;
|
||||
|
||||
for (i = 0; i < dev_priv->num_irqs; ++i) {
|
||||
atomic_set(&cur_irq->irq_received, 0);
|
||||
|
@ -241,7 +277,7 @@ void via_driver_irq_preinstall(drm_device_t * dev)
|
|||
|
||||
dev_priv->last_vblank_valid = 0;
|
||||
|
||||
// Clear VSync interrupt regs
|
||||
/* Clear VSync interrupt regs */
|
||||
status = VIA_READ(VIA_REG_INTERRUPT);
|
||||
VIA_WRITE(VIA_REG_INTERRUPT, status &
|
||||
~(dev_priv->irq_enable_mask));
|
||||
|
@ -291,8 +327,7 @@ void via_driver_irq_uninstall(drm_device_t * dev)
|
|||
|
||||
int via_wait_irq(DRM_IOCTL_ARGS)
|
||||
{
|
||||
drm_file_t *priv = filp->private_data;
|
||||
drm_device_t *dev = priv->head->dev;
|
||||
DRM_DEVICE;
|
||||
drm_via_irqwait_t __user *argp = (void __user *)data;
|
||||
drm_via_irqwait_t irqwait;
|
||||
struct timeval now;
|
||||
|
|
|
@ -27,16 +27,10 @@
|
|||
|
||||
static int via_do_init_map(drm_device_t * dev, drm_via_init_t * init)
|
||||
{
|
||||
drm_via_private_t *dev_priv;
|
||||
drm_via_private_t *dev_priv = dev->dev_private;
|
||||
|
||||
DRM_DEBUG("%s\n", __FUNCTION__);
|
||||
|
||||
dev_priv = drm_alloc(sizeof(drm_via_private_t), DRM_MEM_DRIVER);
|
||||
if (dev_priv == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
memset(dev_priv, 0, sizeof(drm_via_private_t));
|
||||
|
||||
DRM_GETSAREA();
|
||||
if (!dev_priv->sarea) {
|
||||
DRM_ERROR("could not find sarea!\n");
|
||||
|
@ -67,7 +61,8 @@ static int via_do_init_map(drm_device_t * dev, drm_via_init_t * init)
|
|||
dev_priv->agpAddr = init->agpAddr;
|
||||
|
||||
via_init_futex(dev_priv);
|
||||
dev_priv->pro_group_a = (dev->pdev->device == 0x3118);
|
||||
|
||||
via_init_dmablit(dev);
|
||||
|
||||
dev->dev_private = (void *)dev_priv;
|
||||
return 0;
|
||||
|
@ -75,15 +70,7 @@ static int via_do_init_map(drm_device_t * dev, drm_via_init_t * init)
|
|||
|
||||
int via_do_cleanup_map(drm_device_t * dev)
|
||||
{
|
||||
if (dev->dev_private) {
|
||||
|
||||
drm_via_private_t *dev_priv = dev->dev_private;
|
||||
|
||||
via_dma_cleanup(dev);
|
||||
|
||||
drm_free(dev_priv, sizeof(drm_via_private_t), DRM_MEM_DRIVER);
|
||||
dev->dev_private = NULL;
|
||||
}
|
||||
via_dma_cleanup(dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -107,3 +94,29 @@ int via_map_init(DRM_IOCTL_ARGS)
|
|||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
int via_driver_load(drm_device_t *dev, unsigned long chipset)
|
||||
{
|
||||
drm_via_private_t *dev_priv;
|
||||
|
||||
dev_priv = drm_calloc(1, sizeof(drm_via_private_t), DRM_MEM_DRIVER);
|
||||
if (dev_priv == NULL)
|
||||
return DRM_ERR(ENOMEM);
|
||||
|
||||
dev->dev_private = (void *)dev_priv;
|
||||
|
||||
if (chipset == VIA_PRO_GROUP_A)
|
||||
dev_priv->pro_group_a = 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int via_driver_unload(drm_device_t *dev)
|
||||
{
|
||||
drm_via_private_t *dev_priv = dev->dev_private;
|
||||
|
||||
drm_free(dev_priv, sizeof(drm_via_private_t), DRM_MEM_DRIVER);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -42,7 +42,7 @@ static int via_agp_free(drm_via_mem_t * mem);
|
|||
static int via_fb_alloc(drm_via_mem_t * mem);
|
||||
static int via_fb_free(drm_via_mem_t * mem);
|
||||
|
||||
static int add_alloc_set(int context, int type, unsigned int val)
|
||||
static int add_alloc_set(int context, int type, unsigned long val)
|
||||
{
|
||||
int i, retval = 0;
|
||||
|
||||
|
@ -56,7 +56,7 @@ static int add_alloc_set(int context, int type, unsigned int val)
|
|||
return retval;
|
||||
}
|
||||
|
||||
static int del_alloc_set(int context, int type, unsigned int val)
|
||||
static int del_alloc_set(int context, int type, unsigned long val)
|
||||
{
|
||||
int i, retval = 0;
|
||||
|
||||
|
@ -199,13 +199,13 @@ int via_mem_alloc(DRM_IOCTL_ARGS)
|
|||
sizeof(mem));
|
||||
|
||||
switch (mem.type) {
|
||||
case VIDEO:
|
||||
case VIA_MEM_VIDEO:
|
||||
if (via_fb_alloc(&mem) < 0)
|
||||
return -EFAULT;
|
||||
DRM_COPY_TO_USER_IOCTL((drm_via_mem_t __user *) data, mem,
|
||||
sizeof(mem));
|
||||
return 0;
|
||||
case AGP:
|
||||
case VIA_MEM_AGP:
|
||||
if (via_agp_alloc(&mem) < 0)
|
||||
return -EFAULT;
|
||||
DRM_COPY_TO_USER_IOCTL((drm_via_mem_t __user *) data, mem,
|
||||
|
@ -232,7 +232,7 @@ static int via_fb_alloc(drm_via_mem_t * mem)
|
|||
if (block) {
|
||||
fb.offset = block->ofs;
|
||||
fb.free = (unsigned long)block;
|
||||
if (!add_alloc_set(fb.context, VIDEO, fb.free)) {
|
||||
if (!add_alloc_set(fb.context, VIA_MEM_VIDEO, fb.free)) {
|
||||
DRM_DEBUG("adding to allocation set fails\n");
|
||||
via_mmFreeMem((PMemBlock) fb.free);
|
||||
retval = -1;
|
||||
|
@ -269,7 +269,7 @@ static int via_agp_alloc(drm_via_mem_t * mem)
|
|||
if (block) {
|
||||
agp.offset = block->ofs;
|
||||
agp.free = (unsigned long)block;
|
||||
if (!add_alloc_set(agp.context, AGP, agp.free)) {
|
||||
if (!add_alloc_set(agp.context, VIA_MEM_AGP, agp.free)) {
|
||||
DRM_DEBUG("adding to allocation set fails\n");
|
||||
via_mmFreeMem((PMemBlock) agp.free);
|
||||
retval = -1;
|
||||
|
@ -297,11 +297,11 @@ int via_mem_free(DRM_IOCTL_ARGS)
|
|||
|
||||
switch (mem.type) {
|
||||
|
||||
case VIDEO:
|
||||
case VIA_MEM_VIDEO:
|
||||
if (via_fb_free(&mem) == 0)
|
||||
return 0;
|
||||
break;
|
||||
case AGP:
|
||||
case VIA_MEM_AGP:
|
||||
if (via_agp_free(&mem) == 0)
|
||||
return 0;
|
||||
break;
|
||||
|
@ -329,7 +329,7 @@ static int via_fb_free(drm_via_mem_t * mem)
|
|||
|
||||
via_mmFreeMem((PMemBlock) fb.free);
|
||||
|
||||
if (!del_alloc_set(fb.context, VIDEO, fb.free)) {
|
||||
if (!del_alloc_set(fb.context, VIA_MEM_VIDEO, fb.free)) {
|
||||
retval = -1;
|
||||
}
|
||||
|
||||
|
@ -352,7 +352,7 @@ static int via_agp_free(drm_via_mem_t * mem)
|
|||
|
||||
via_mmFreeMem((PMemBlock) agp.free);
|
||||
|
||||
if (!del_alloc_set(agp.context, AGP, agp.free)) {
|
||||
if (!del_alloc_set(agp.context, VIA_MEM_AGP, agp.free)) {
|
||||
retval = -1;
|
||||
}
|
||||
|
||||
|
|
|
@ -237,7 +237,7 @@ static hazard_t table3[256];
|
|||
static __inline__ int
|
||||
eat_words(const uint32_t ** buf, const uint32_t * buf_end, unsigned num_words)
|
||||
{
|
||||
if ((*buf - buf_end) >= num_words) {
|
||||
if ((buf_end - *buf) >= num_words) {
|
||||
*buf += num_words;
|
||||
return 0;
|
||||
}
|
||||
|
@ -249,14 +249,14 @@ eat_words(const uint32_t ** buf, const uint32_t * buf_end, unsigned num_words)
|
|||
* Partially stolen from drm_memory.h
|
||||
*/
|
||||
|
||||
static __inline__ drm_map_t *via_drm_lookup_agp_map(drm_via_state_t * seq,
|
||||
static __inline__ drm_local_map_t *via_drm_lookup_agp_map(drm_via_state_t *seq,
|
||||
unsigned long offset,
|
||||
unsigned long size,
|
||||
drm_device_t * dev)
|
||||
{
|
||||
struct list_head *list;
|
||||
drm_map_list_t *r_list;
|
||||
drm_map_t *map = seq->map_cache;
|
||||
drm_local_map_t *map = seq->map_cache;
|
||||
|
||||
if (map && map->offset <= offset
|
||||
&& (offset + size) <= (map->offset + map->size)) {
|
||||
|
|
|
@ -47,7 +47,7 @@ typedef struct {
|
|||
int agp_texture;
|
||||
int multitex;
|
||||
drm_device_t *dev;
|
||||
drm_map_t *map_cache;
|
||||
drm_local_map_t *map_cache;
|
||||
uint32_t vertex_count;
|
||||
int agp;
|
||||
const uint32_t *buf_start;
|
||||
|
@ -55,5 +55,7 @@ typedef struct {
|
|||
|
||||
extern int via_verify_command_stream(const uint32_t * buf, unsigned int size,
|
||||
drm_device_t * dev, int agp);
|
||||
extern int via_parse_command_stream(drm_device_t *dev, const uint32_t *buf,
|
||||
unsigned int size);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -50,8 +50,11 @@ void via_release_futex(drm_via_private_t * dev_priv, int context)
|
|||
unsigned int i;
|
||||
volatile int *lock;
|
||||
|
||||
if (!dev_priv->sarea_priv)
|
||||
return;
|
||||
|
||||
for (i = 0; i < VIA_NR_XVMC_LOCKS; ++i) {
|
||||
lock = (int *)XVMCLOCKPTR(dev_priv->sarea_priv, i);
|
||||
lock = (volatile int *)XVMCLOCKPTR(dev_priv->sarea_priv, i);
|
||||
if ((_DRM_LOCKING_CONTEXT(*lock) == context)) {
|
||||
if (_DRM_LOCK_IS_HELD(*lock)
|
||||
&& (*lock & _DRM_LOCK_CONT)) {
|
||||
|
@ -79,7 +82,7 @@ int via_decoder_futex(DRM_IOCTL_ARGS)
|
|||
if (fx.lock > VIA_NR_XVMC_LOCKS)
|
||||
return -EFAULT;
|
||||
|
||||
lock = (int *)XVMCLOCKPTR(sAPriv, fx.lock);
|
||||
lock = (volatile int *)XVMCLOCKPTR(sAPriv, fx.lock);
|
||||
|
||||
switch (fx.func) {
|
||||
case VIA_FUTEX_WAIT:
|
||||
|
|
Loading…
Reference in New Issue