drm: run cleanfile across drm tree
Signed-off-by: Dave Airlie <airlied@linux.ie>
This commit is contained in:
parent
8562b3f25d
commit
bc5f4523f7
|
@ -38,7 +38,7 @@ config DRM_RADEON
|
|||
Choose this option if you have an ATI Radeon graphics card. There
|
||||
are both PCI and AGP versions. You don't need to choose this to
|
||||
run the Radeon in plain VGA mode.
|
||||
|
||||
|
||||
If M is selected, the module will be called radeon.
|
||||
|
||||
config DRM_I810
|
||||
|
@ -71,9 +71,9 @@ config DRM_I915
|
|||
852GM, 855GM 865G or 915G integrated graphics. If M is selected, the
|
||||
module will be called i915. AGP support is required for this driver
|
||||
to work. This driver is used by the Intel driver in X.org 6.8 and
|
||||
XFree86 4.4 and above. If unsure, build this and i830 as modules and
|
||||
XFree86 4.4 and above. If unsure, build this and i830 as modules and
|
||||
the X server will load the correct one.
|
||||
|
||||
|
||||
endchoice
|
||||
|
||||
config DRM_MGA
|
||||
|
@ -88,7 +88,7 @@ config DRM_SIS
|
|||
tristate "SiS video cards"
|
||||
depends on DRM && AGP
|
||||
help
|
||||
Choose this option if you have a SiS 630 or compatible video
|
||||
Choose this option if you have a SiS 630 or compatible video
|
||||
chipset. If M is selected the module will be called sis. AGP
|
||||
support is required for this driver to work.
|
||||
|
||||
|
@ -105,4 +105,3 @@ config DRM_SAVAGE
|
|||
help
|
||||
Choose this option if you have a Savage3D/4/SuperSavage/Pro/Twister
|
||||
chipset. If M is selected the module will be called savage.
|
||||
|
||||
|
|
|
@ -38,5 +38,3 @@ obj-$(CONFIG_DRM_I915) += i915.o
|
|||
obj-$(CONFIG_DRM_SIS) += sis.o
|
||||
obj-$(CONFIG_DRM_SAVAGE)+= savage.o
|
||||
obj-$(CONFIG_DRM_VIA) +=via.o
|
||||
|
||||
|
||||
|
|
|
@ -41,4 +41,3 @@ For specific information about kernel-level support, see:
|
|||
|
||||
A Security Analysis of the Direct Rendering Infrastructure
|
||||
http://dri.sourceforge.net/doc/security_low_level.html
|
||||
|
||||
|
|
|
@ -184,7 +184,7 @@ static int drm_addmap_core(struct drm_device * dev, unsigned int offset,
|
|||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
break;
|
||||
case _DRM_SHM:
|
||||
list = drm_find_matching_map(dev, map);
|
||||
|
@ -814,9 +814,9 @@ int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
|
|||
page_count = 0;
|
||||
|
||||
while (entry->buf_count < count) {
|
||||
|
||||
|
||||
dmah = drm_pci_alloc(dev, PAGE_SIZE << page_order, 0x1000, 0xfffffffful);
|
||||
|
||||
|
||||
if (!dmah) {
|
||||
/* Set count correctly so we free the proper amount. */
|
||||
entry->buf_count = count;
|
||||
|
@ -1592,5 +1592,3 @@ int drm_order(unsigned long size)
|
|||
return order;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_order);
|
||||
|
||||
|
||||
|
|
|
@ -159,7 +159,7 @@ int drm_getsareactx(struct drm_device *dev, void *data,
|
|||
request->handle = NULL;
|
||||
list_for_each_entry(_entry, &dev->maplist, head) {
|
||||
if (_entry->map == map) {
|
||||
request->handle =
|
||||
request->handle =
|
||||
(void *)(unsigned long)_entry->user_token;
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -80,7 +80,7 @@ void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key)
|
|||
}
|
||||
}
|
||||
|
||||
static struct hlist_node *drm_ht_find_key(struct drm_open_hash *ht,
|
||||
static struct hlist_node *drm_ht_find_key(struct drm_open_hash *ht,
|
||||
unsigned long key)
|
||||
{
|
||||
struct drm_hash_item *entry;
|
||||
|
@ -129,7 +129,7 @@ int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item)
|
|||
}
|
||||
|
||||
/*
|
||||
* Just insert an item and return any "bits" bit key that hasn't been
|
||||
* Just insert an item and return any "bits" bit key that hasn't been
|
||||
* used before.
|
||||
*/
|
||||
int drm_ht_just_insert_please(struct drm_open_hash *ht, struct drm_hash_item *item,
|
||||
|
@ -200,4 +200,3 @@ void drm_ht_remove(struct drm_open_hash *ht)
|
|||
ht->table = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -65,4 +65,3 @@ extern void drm_ht_remove(struct drm_open_hash *ht);
|
|||
|
||||
|
||||
#endif
|
||||
|
||||
|
|
|
@ -234,7 +234,7 @@ int drm_getclient(struct drm_device *dev, void *data,
|
|||
|
||||
idx = client->idx;
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
|
||||
|
||||
if (list_empty(&dev->filelist)) {
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
return -EINVAL;
|
||||
|
|
|
@ -179,4 +179,3 @@ void drm_core_ioremapfree(struct drm_map *map, struct drm_device *dev)
|
|||
iounmap(map->handle);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_core_ioremapfree);
|
||||
|
||||
|
|
|
@ -293,4 +293,3 @@ void drm_mm_takedown(struct drm_mm * mm)
|
|||
|
||||
drm_free(entry, sizeof(*entry), DRM_MEM_MM);
|
||||
}
|
||||
|
||||
|
|
|
@ -69,9 +69,9 @@ static __inline__ int mtrr_del(int reg, unsigned long base, unsigned long size)
|
|||
#define DRM_COPY_TO_USER(arg1, arg2, arg3) \
|
||||
copy_to_user(arg1, arg2, arg3)
|
||||
/* Macros for copyfrom user, but checking readability only once */
|
||||
#define DRM_VERIFYAREA_READ( uaddr, size ) \
|
||||
#define DRM_VERIFYAREA_READ( uaddr, size ) \
|
||||
(access_ok( VERIFY_READ, uaddr, size ) ? 0 : -EFAULT)
|
||||
#define DRM_COPY_FROM_USER_UNCHECKED(arg1, arg2, arg3) \
|
||||
#define DRM_COPY_FROM_USER_UNCHECKED(arg1, arg2, arg3) \
|
||||
__copy_from_user(arg1, arg2, arg3)
|
||||
#define DRM_COPY_TO_USER_UNCHECKED(arg1, arg2, arg3) \
|
||||
__copy_to_user(arg1, arg2, arg3)
|
||||
|
|
|
@ -312,4 +312,3 @@
|
|||
{0x8086, 0x2a02, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
|
||||
{0x8086, 0x2a12, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
|
||||
{0, 0, 0}
|
||||
|
||||
|
|
|
@ -45,7 +45,7 @@
|
|||
#endif
|
||||
|
||||
/** Maximum number of drawables in the SAREA */
|
||||
#define SAREA_MAX_DRAWABLES 256
|
||||
#define SAREA_MAX_DRAWABLES 256
|
||||
|
||||
#define SAREA_DRAWABLE_CLAIMED_ENTRY 0x80000000
|
||||
|
||||
|
|
|
@ -224,7 +224,7 @@ int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
|
|||
}
|
||||
if ((ret = drm_get_head(dev, &dev->primary)))
|
||||
goto err_g2;
|
||||
|
||||
|
||||
DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
|
||||
driver->name, driver->major, driver->minor, driver->patchlevel,
|
||||
driver->date, dev->primary.minor);
|
||||
|
|
|
@ -40,7 +40,7 @@
|
|||
|
||||
#define I810_BUF_FREE 2
|
||||
#define I810_BUF_CLIENT 1
|
||||
#define I810_BUF_HARDWARE 0
|
||||
#define I810_BUF_HARDWARE 0
|
||||
|
||||
#define I810_BUF_UNMAPPED 0
|
||||
#define I810_BUF_MAPPED 1
|
||||
|
@ -848,7 +848,7 @@ static void i810_dma_quiescent(struct drm_device * dev)
|
|||
drm_i810_private_t *dev_priv = dev->dev_private;
|
||||
RING_LOCALS;
|
||||
|
||||
/* printk("%s\n", __FUNCTION__); */
|
||||
/* printk("%s\n", __FUNCTION__); */
|
||||
|
||||
i810_kernel_lost_context(dev);
|
||||
|
||||
|
@ -869,7 +869,7 @@ static int i810_flush_queue(struct drm_device * dev)
|
|||
int i, ret = 0;
|
||||
RING_LOCALS;
|
||||
|
||||
/* printk("%s\n", __FUNCTION__); */
|
||||
/* printk("%s\n", __FUNCTION__); */
|
||||
|
||||
i810_kernel_lost_context(dev);
|
||||
|
||||
|
|
|
@ -25,7 +25,7 @@
|
|||
* DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Rickard E. (Rik) Faith <faith@valinux.com>
|
||||
* Jeff Hartmann <jhartmann@valinux.com>
|
||||
* Jeff Hartmann <jhartmann@valinux.com>
|
||||
*
|
||||
*/
|
||||
|
||||
|
@ -134,7 +134,7 @@ extern int i810_max_ioctl;
|
|||
#define I810_ADDR(reg) (I810_BASE(reg) + reg)
|
||||
#define I810_DEREF(reg) *(__volatile__ int *)I810_ADDR(reg)
|
||||
#define I810_READ(reg) I810_DEREF(reg)
|
||||
#define I810_WRITE(reg,val) do { I810_DEREF(reg) = val; } while (0)
|
||||
#define I810_WRITE(reg,val) do { I810_DEREF(reg) = val; } while (0)
|
||||
#define I810_DEREF16(reg) *(__volatile__ u16 *)I810_ADDR(reg)
|
||||
#define I810_READ16(reg) I810_DEREF16(reg)
|
||||
#define I810_WRITE16(reg,val) do { I810_DEREF16(reg) = val; } while (0)
|
||||
|
@ -155,19 +155,19 @@ extern int i810_max_ioctl;
|
|||
} while (0)
|
||||
|
||||
#define ADVANCE_LP_RING() do { \
|
||||
if (I810_VERBOSE) DRM_DEBUG("ADVANCE_LP_RING\n"); \
|
||||
dev_priv->ring.tail = outring; \
|
||||
if (I810_VERBOSE) DRM_DEBUG("ADVANCE_LP_RING\n"); \
|
||||
dev_priv->ring.tail = outring; \
|
||||
I810_WRITE(LP_RING + RING_TAIL, outring); \
|
||||
} while(0)
|
||||
|
||||
#define OUT_RING(n) do { \
|
||||
#define OUT_RING(n) do { \
|
||||
if (I810_VERBOSE) DRM_DEBUG(" OUT_RING %x\n", (int)(n)); \
|
||||
*(volatile unsigned int *)(virt + outring) = n; \
|
||||
outring += 4; \
|
||||
outring &= ringmask; \
|
||||
} while (0)
|
||||
|
||||
#define GFX_OP_USER_INTERRUPT ((0<<29)|(2<<23))
|
||||
#define GFX_OP_USER_INTERRUPT ((0<<29)|(2<<23))
|
||||
#define GFX_OP_BREAKPOINT_INTERRUPT ((0<<29)|(1<<23))
|
||||
#define CMD_REPORT_HEAD (7<<23)
|
||||
#define CMD_STORE_DWORD_IDX ((0x21<<23) | 0x1)
|
||||
|
@ -184,28 +184,28 @@ extern int i810_max_ioctl;
|
|||
|
||||
#define I810REG_HWSTAM 0x02098
|
||||
#define I810REG_INT_IDENTITY_R 0x020a4
|
||||
#define I810REG_INT_MASK_R 0x020a8
|
||||
#define I810REG_INT_MASK_R 0x020a8
|
||||
#define I810REG_INT_ENABLE_R 0x020a0
|
||||
|
||||
#define LP_RING 0x2030
|
||||
#define HP_RING 0x2040
|
||||
#define RING_TAIL 0x00
|
||||
#define LP_RING 0x2030
|
||||
#define HP_RING 0x2040
|
||||
#define RING_TAIL 0x00
|
||||
#define TAIL_ADDR 0x000FFFF8
|
||||
#define RING_HEAD 0x04
|
||||
#define HEAD_WRAP_COUNT 0xFFE00000
|
||||
#define HEAD_WRAP_ONE 0x00200000
|
||||
#define HEAD_ADDR 0x001FFFFC
|
||||
#define RING_START 0x08
|
||||
#define START_ADDR 0x00FFFFF8
|
||||
#define RING_LEN 0x0C
|
||||
#define RING_NR_PAGES 0x000FF000
|
||||
#define RING_REPORT_MASK 0x00000006
|
||||
#define RING_REPORT_64K 0x00000002
|
||||
#define RING_REPORT_128K 0x00000004
|
||||
#define RING_NO_REPORT 0x00000000
|
||||
#define RING_VALID_MASK 0x00000001
|
||||
#define RING_VALID 0x00000001
|
||||
#define RING_INVALID 0x00000000
|
||||
#define RING_HEAD 0x04
|
||||
#define HEAD_WRAP_COUNT 0xFFE00000
|
||||
#define HEAD_WRAP_ONE 0x00200000
|
||||
#define HEAD_ADDR 0x001FFFFC
|
||||
#define RING_START 0x08
|
||||
#define START_ADDR 0x00FFFFF8
|
||||
#define RING_LEN 0x0C
|
||||
#define RING_NR_PAGES 0x000FF000
|
||||
#define RING_REPORT_MASK 0x00000006
|
||||
#define RING_REPORT_64K 0x00000002
|
||||
#define RING_REPORT_128K 0x00000004
|
||||
#define RING_NO_REPORT 0x00000000
|
||||
#define RING_VALID_MASK 0x00000001
|
||||
#define RING_VALID 0x00000001
|
||||
#define RING_INVALID 0x00000000
|
||||
|
||||
#define GFX_OP_SCISSOR ((0x3<<29)|(0x1c<<24)|(0x10<<19))
|
||||
#define SC_UPDATE_SCISSOR (0x1<<1)
|
||||
|
|
|
@ -42,7 +42,7 @@
|
|||
|
||||
#define I830_BUF_FREE 2
|
||||
#define I830_BUF_CLIENT 1
|
||||
#define I830_BUF_HARDWARE 0
|
||||
#define I830_BUF_HARDWARE 0
|
||||
|
||||
#define I830_BUF_UNMAPPED 0
|
||||
#define I830_BUF_MAPPED 1
|
||||
|
|
|
@ -12,9 +12,9 @@
|
|||
#define _I830_DEFINES_
|
||||
|
||||
#define I830_DMA_BUF_ORDER 12
|
||||
#define I830_DMA_BUF_SZ (1<<I830_DMA_BUF_ORDER)
|
||||
#define I830_DMA_BUF_NR 256
|
||||
#define I830_NR_SAREA_CLIPRECTS 8
|
||||
#define I830_DMA_BUF_SZ (1<<I830_DMA_BUF_ORDER)
|
||||
#define I830_DMA_BUF_NR 256
|
||||
#define I830_NR_SAREA_CLIPRECTS 8
|
||||
|
||||
/* Each region is a minimum of 64k, and there are at most 64 of them.
|
||||
*/
|
||||
|
@ -58,7 +58,7 @@
|
|||
#define I830_UPLOAD_TEXBLEND_MASK 0xf00000
|
||||
#define I830_UPLOAD_TEX_PALETTE_N(n) (0x1000000 << (n))
|
||||
#define I830_UPLOAD_TEX_PALETTE_SHARED 0x4000000
|
||||
#define I830_UPLOAD_STIPPLE 0x8000000
|
||||
#define I830_UPLOAD_STIPPLE 0x8000000
|
||||
|
||||
/* Indices into buf.Setup where various bits of state are mirrored per
|
||||
* context and per buffer. These can be fired at the card as a unit,
|
||||
|
|
|
@ -25,7 +25,7 @@
|
|||
* DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Rickard E. (Rik) Faith <faith@valinux.com>
|
||||
* Jeff Hartmann <jhartmann@valinux.com>
|
||||
* Jeff Hartmann <jhartmann@valinux.com>
|
||||
*
|
||||
*/
|
||||
|
||||
|
@ -183,7 +183,7 @@ extern int i830_driver_device_is_agp(struct drm_device * dev);
|
|||
|
||||
extern int i830_wait_ring(struct drm_device * dev, int n, const char *caller);
|
||||
|
||||
#define GFX_OP_USER_INTERRUPT ((0<<29)|(2<<23))
|
||||
#define GFX_OP_USER_INTERRUPT ((0<<29)|(2<<23))
|
||||
#define GFX_OP_BREAKPOINT_INTERRUPT ((0<<29)|(1<<23))
|
||||
#define CMD_REPORT_HEAD (7<<23)
|
||||
#define CMD_STORE_DWORD_IDX ((0x21<<23) | 0x1)
|
||||
|
@ -203,30 +203,30 @@ extern int i830_wait_ring(struct drm_device * dev, int n, const char *caller);
|
|||
|
||||
#define I830REG_HWSTAM 0x02098
|
||||
#define I830REG_INT_IDENTITY_R 0x020a4
|
||||
#define I830REG_INT_MASK_R 0x020a8
|
||||
#define I830REG_INT_MASK_R 0x020a8
|
||||
#define I830REG_INT_ENABLE_R 0x020a0
|
||||
|
||||
#define I830_IRQ_RESERVED ((1<<13)|(3<<2))
|
||||
|
||||
#define LP_RING 0x2030
|
||||
#define HP_RING 0x2040
|
||||
#define RING_TAIL 0x00
|
||||
#define LP_RING 0x2030
|
||||
#define HP_RING 0x2040
|
||||
#define RING_TAIL 0x00
|
||||
#define TAIL_ADDR 0x001FFFF8
|
||||
#define RING_HEAD 0x04
|
||||
#define HEAD_WRAP_COUNT 0xFFE00000
|
||||
#define HEAD_WRAP_ONE 0x00200000
|
||||
#define HEAD_ADDR 0x001FFFFC
|
||||
#define RING_START 0x08
|
||||
#define START_ADDR 0x0xFFFFF000
|
||||
#define RING_LEN 0x0C
|
||||
#define RING_NR_PAGES 0x001FF000
|
||||
#define RING_REPORT_MASK 0x00000006
|
||||
#define RING_REPORT_64K 0x00000002
|
||||
#define RING_REPORT_128K 0x00000004
|
||||
#define RING_NO_REPORT 0x00000000
|
||||
#define RING_VALID_MASK 0x00000001
|
||||
#define RING_VALID 0x00000001
|
||||
#define RING_INVALID 0x00000000
|
||||
#define RING_HEAD 0x04
|
||||
#define HEAD_WRAP_COUNT 0xFFE00000
|
||||
#define HEAD_WRAP_ONE 0x00200000
|
||||
#define HEAD_ADDR 0x001FFFFC
|
||||
#define RING_START 0x08
|
||||
#define START_ADDR 0x0xFFFFF000
|
||||
#define RING_LEN 0x0C
|
||||
#define RING_NR_PAGES 0x001FF000
|
||||
#define RING_REPORT_MASK 0x00000006
|
||||
#define RING_REPORT_64K 0x00000002
|
||||
#define RING_REPORT_128K 0x00000004
|
||||
#define RING_NO_REPORT 0x00000000
|
||||
#define RING_VALID_MASK 0x00000001
|
||||
#define RING_VALID 0x00000001
|
||||
#define RING_INVALID 0x00000000
|
||||
|
||||
#define GFX_OP_SCISSOR ((0x3<<29)|(0x1c<<24)|(0x10<<19))
|
||||
#define SC_UPDATE_SCISSOR (0x1<<1)
|
||||
|
@ -279,9 +279,9 @@ extern int i830_wait_ring(struct drm_device * dev, int n, const char *caller);
|
|||
#define XY_SRC_COPY_BLT_WRITE_ALPHA (1<<21)
|
||||
#define XY_SRC_COPY_BLT_WRITE_RGB (1<<20)
|
||||
|
||||
#define MI_BATCH_BUFFER ((0x30<<23)|1)
|
||||
#define MI_BATCH_BUFFER_START (0x31<<23)
|
||||
#define MI_BATCH_BUFFER_END (0xA<<23)
|
||||
#define MI_BATCH_BUFFER ((0x30<<23)|1)
|
||||
#define MI_BATCH_BUFFER_START (0x31<<23)
|
||||
#define MI_BATCH_BUFFER_END (0xA<<23)
|
||||
#define MI_BATCH_NON_SECURE (1)
|
||||
|
||||
#define MI_WAIT_FOR_EVENT ((0x3<<23))
|
||||
|
|
|
@ -144,7 +144,7 @@ int i830_irq_wait(struct drm_device *dev, void *data,
|
|||
struct drm_file *file_priv)
|
||||
{
|
||||
drm_i830_private_t *dev_priv = dev->dev_private;
|
||||
drm_i830_irq_wait_t *irqwait = data;
|
||||
drm_i830_irq_wait_t *irqwait = data;
|
||||
|
||||
if (!dev_priv) {
|
||||
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
|
||||
|
|
|
@ -351,7 +351,7 @@ static int validate_cmd(int cmd)
|
|||
{
|
||||
int ret = do_validate_cmd(cmd);
|
||||
|
||||
/* printk("validate_cmd( %x ): %d\n", cmd, ret); */
|
||||
/* printk("validate_cmd( %x ): %d\n", cmd, ret); */
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -77,7 +77,7 @@ static struct drm_driver driver = {
|
|||
.name = DRIVER_NAME,
|
||||
.id_table = pciidlist,
|
||||
},
|
||||
|
||||
|
||||
.name = DRIVER_NAME,
|
||||
.desc = DRIVER_DESC,
|
||||
.date = DRIVER_DATE,
|
||||
|
|
|
@ -163,7 +163,7 @@ extern void i915_mem_release(struct drm_device * dev,
|
|||
|
||||
#define I915_READ(reg) DRM_READ32(dev_priv->mmio_map, (reg))
|
||||
#define I915_WRITE(reg,val) DRM_WRITE32(dev_priv->mmio_map, (reg), (val))
|
||||
#define I915_READ16(reg) DRM_READ16(dev_priv->mmio_map, (reg))
|
||||
#define I915_READ16(reg) DRM_READ16(dev_priv->mmio_map, (reg))
|
||||
#define I915_WRITE16(reg,val) DRM_WRITE16(dev_priv->mmio_map, (reg), (val))
|
||||
|
||||
#define I915_VERBOSE 0
|
||||
|
@ -200,7 +200,7 @@ extern void i915_mem_release(struct drm_device * dev,
|
|||
|
||||
extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
|
||||
|
||||
#define GFX_OP_USER_INTERRUPT ((0<<29)|(2<<23))
|
||||
#define GFX_OP_USER_INTERRUPT ((0<<29)|(2<<23))
|
||||
#define GFX_OP_BREAKPOINT_INTERRUPT ((0<<29)|(1<<23))
|
||||
#define CMD_REPORT_HEAD (7<<23)
|
||||
#define CMD_STORE_DWORD_IDX ((0x21<<23) | 0x1)
|
||||
|
@ -217,7 +217,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
|
|||
|
||||
#define I915REG_HWSTAM 0x02098
|
||||
#define I915REG_INT_IDENTITY_R 0x020a4
|
||||
#define I915REG_INT_MASK_R 0x020a8
|
||||
#define I915REG_INT_MASK_R 0x020a8
|
||||
#define I915REG_INT_ENABLE_R 0x020a0
|
||||
|
||||
#define I915REG_PIPEASTAT 0x70024
|
||||
|
@ -229,7 +229,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
|
|||
#define SRX_INDEX 0x3c4
|
||||
#define SRX_DATA 0x3c5
|
||||
#define SR01 1
|
||||
#define SR01_SCREEN_OFF (1<<5)
|
||||
#define SR01_SCREEN_OFF (1<<5)
|
||||
|
||||
#define PPCR 0x61204
|
||||
#define PPCR_ON (1<<0)
|
||||
|
@ -249,25 +249,25 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
|
|||
#define ADPA_DPMS_OFF (3<<10)
|
||||
|
||||
#define NOPID 0x2094
|
||||
#define LP_RING 0x2030
|
||||
#define HP_RING 0x2040
|
||||
#define RING_TAIL 0x00
|
||||
#define LP_RING 0x2030
|
||||
#define HP_RING 0x2040
|
||||
#define RING_TAIL 0x00
|
||||
#define TAIL_ADDR 0x001FFFF8
|
||||
#define RING_HEAD 0x04
|
||||
#define HEAD_WRAP_COUNT 0xFFE00000
|
||||
#define HEAD_WRAP_ONE 0x00200000
|
||||
#define HEAD_ADDR 0x001FFFFC
|
||||
#define RING_START 0x08
|
||||
#define START_ADDR 0x0xFFFFF000
|
||||
#define RING_LEN 0x0C
|
||||
#define RING_NR_PAGES 0x001FF000
|
||||
#define RING_REPORT_MASK 0x00000006
|
||||
#define RING_REPORT_64K 0x00000002
|
||||
#define RING_REPORT_128K 0x00000004
|
||||
#define RING_NO_REPORT 0x00000000
|
||||
#define RING_VALID_MASK 0x00000001
|
||||
#define RING_VALID 0x00000001
|
||||
#define RING_INVALID 0x00000000
|
||||
#define RING_HEAD 0x04
|
||||
#define HEAD_WRAP_COUNT 0xFFE00000
|
||||
#define HEAD_WRAP_ONE 0x00200000
|
||||
#define HEAD_ADDR 0x001FFFFC
|
||||
#define RING_START 0x08
|
||||
#define START_ADDR 0x0xFFFFF000
|
||||
#define RING_LEN 0x0C
|
||||
#define RING_NR_PAGES 0x001FF000
|
||||
#define RING_REPORT_MASK 0x00000006
|
||||
#define RING_REPORT_64K 0x00000002
|
||||
#define RING_REPORT_128K 0x00000004
|
||||
#define RING_NO_REPORT 0x00000000
|
||||
#define RING_VALID_MASK 0x00000001
|
||||
#define RING_VALID 0x00000001
|
||||
#define RING_INVALID 0x00000000
|
||||
|
||||
#define GFX_OP_SCISSOR ((0x3<<29)|(0x1c<<24)|(0x10<<19))
|
||||
#define SC_UPDATE_SCISSOR (0x1<<1)
|
||||
|
@ -294,9 +294,9 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
|
|||
#define XY_SRC_COPY_BLT_WRITE_ALPHA (1<<21)
|
||||
#define XY_SRC_COPY_BLT_WRITE_RGB (1<<20)
|
||||
|
||||
#define MI_BATCH_BUFFER ((0x30<<23)|1)
|
||||
#define MI_BATCH_BUFFER_START (0x31<<23)
|
||||
#define MI_BATCH_BUFFER_END (0xA<<23)
|
||||
#define MI_BATCH_BUFFER ((0x30<<23)|1)
|
||||
#define MI_BATCH_BUFFER_START (0x31<<23)
|
||||
#define MI_BATCH_BUFFER_END (0xA<<23)
|
||||
#define MI_BATCH_NON_SECURE (1)
|
||||
#define MI_BATCH_NON_SECURE_I965 (1<<8)
|
||||
|
||||
|
|
|
@ -291,7 +291,7 @@ static int i915_emit_irq(struct drm_device * dev)
|
|||
OUT_RING(0);
|
||||
OUT_RING(GFX_OP_USER_INTERRUPT);
|
||||
ADVANCE_LP_RING();
|
||||
|
||||
|
||||
return dev_priv->counter;
|
||||
}
|
||||
|
||||
|
@ -336,7 +336,7 @@ static int i915_driver_vblank_do_wait(struct drm_device *dev, unsigned int *sequ
|
|||
DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
|
||||
(((cur_vblank = atomic_read(counter))
|
||||
- *sequence) <= (1<<23)));
|
||||
|
||||
|
||||
*sequence = cur_vblank;
|
||||
|
||||
return ret;
|
||||
|
@ -423,7 +423,7 @@ int i915_vblank_pipe_set(struct drm_device *dev, void *data,
|
|||
}
|
||||
|
||||
if (pipe->pipe & ~(DRM_I915_VBLANK_PIPE_A|DRM_I915_VBLANK_PIPE_B)) {
|
||||
DRM_ERROR("%s called with invalid pipe 0x%x\n",
|
||||
DRM_ERROR("%s called with invalid pipe 0x%x\n",
|
||||
__FUNCTION__, pipe->pipe);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
|
|
@ -375,7 +375,7 @@ int i915_mem_destroy_heap( struct drm_device *dev, void *data,
|
|||
DRM_ERROR("get_heap failed");
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
|
||||
if (!*heap) {
|
||||
DRM_ERROR("heap not initialized?");
|
||||
return -EFAULT;
|
||||
|
@ -384,4 +384,3 @@ int i915_mem_destroy_heap( struct drm_device *dev, void *data,
|
|||
i915_mem_takedown( heap );
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -493,7 +493,7 @@ static int mga_do_agp_dma_bootstrap(struct drm_device * dev,
|
|||
dma_bs->agp_size);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
||||
dev_priv->agp_size = agp_size;
|
||||
dev_priv->agp_handle = agp_req.handle;
|
||||
|
||||
|
@ -550,7 +550,7 @@ static int mga_do_agp_dma_bootstrap(struct drm_device * dev,
|
|||
{
|
||||
struct drm_map_list *_entry;
|
||||
unsigned long agp_token = 0;
|
||||
|
||||
|
||||
list_for_each_entry(_entry, &dev->maplist, head) {
|
||||
if (_entry->map == dev->agp_buffer_map)
|
||||
agp_token = _entry->user_token;
|
||||
|
@ -964,7 +964,7 @@ static int mga_do_cleanup_dma(struct drm_device *dev, int full_cleanup)
|
|||
|
||||
free_req.handle = dev_priv->agp_handle;
|
||||
drm_agp_free(dev, &free_req);
|
||||
|
||||
|
||||
dev_priv->agp_textures = NULL;
|
||||
dev_priv->agp_size = 0;
|
||||
dev_priv->agp_handle = 0;
|
||||
|
|
|
@ -216,8 +216,8 @@ static inline u32 _MGA_READ(u32 * addr)
|
|||
#define MGA_WRITE( reg, val ) DRM_WRITE32(dev_priv->mmio, (reg), (val))
|
||||
#endif
|
||||
|
||||
#define DWGREG0 0x1c00
|
||||
#define DWGREG0_END 0x1dff
|
||||
#define DWGREG0 0x1c00
|
||||
#define DWGREG0_END 0x1dff
|
||||
#define DWGREG1 0x2c00
|
||||
#define DWGREG1_END 0x2dff
|
||||
|
||||
|
@ -394,22 +394,22 @@ do { \
|
|||
#define MGA_VINTCLR (1 << 4)
|
||||
#define MGA_VINTEN (1 << 5)
|
||||
|
||||
#define MGA_ALPHACTRL 0x2c7c
|
||||
#define MGA_AR0 0x1c60
|
||||
#define MGA_AR1 0x1c64
|
||||
#define MGA_AR2 0x1c68
|
||||
#define MGA_AR3 0x1c6c
|
||||
#define MGA_AR4 0x1c70
|
||||
#define MGA_AR5 0x1c74
|
||||
#define MGA_AR6 0x1c78
|
||||
#define MGA_ALPHACTRL 0x2c7c
|
||||
#define MGA_AR0 0x1c60
|
||||
#define MGA_AR1 0x1c64
|
||||
#define MGA_AR2 0x1c68
|
||||
#define MGA_AR3 0x1c6c
|
||||
#define MGA_AR4 0x1c70
|
||||
#define MGA_AR5 0x1c74
|
||||
#define MGA_AR6 0x1c78
|
||||
|
||||
#define MGA_CXBNDRY 0x1c80
|
||||
#define MGA_CXLEFT 0x1ca0
|
||||
#define MGA_CXLEFT 0x1ca0
|
||||
#define MGA_CXRIGHT 0x1ca4
|
||||
|
||||
#define MGA_DMAPAD 0x1c54
|
||||
#define MGA_DSTORG 0x2cb8
|
||||
#define MGA_DWGCTL 0x1c00
|
||||
#define MGA_DMAPAD 0x1c54
|
||||
#define MGA_DSTORG 0x2cb8
|
||||
#define MGA_DWGCTL 0x1c00
|
||||
# define MGA_OPCOD_MASK (15 << 0)
|
||||
# define MGA_OPCOD_TRAP (4 << 0)
|
||||
# define MGA_OPCOD_TEXTURE_TRAP (6 << 0)
|
||||
|
@ -455,27 +455,27 @@ do { \
|
|||
# define MGA_CLIPDIS (1 << 31)
|
||||
#define MGA_DWGSYNC 0x2c4c
|
||||
|
||||
#define MGA_FCOL 0x1c24
|
||||
#define MGA_FIFOSTATUS 0x1e10
|
||||
#define MGA_FOGCOL 0x1cf4
|
||||
#define MGA_FCOL 0x1c24
|
||||
#define MGA_FIFOSTATUS 0x1e10
|
||||
#define MGA_FOGCOL 0x1cf4
|
||||
#define MGA_FXBNDRY 0x1c84
|
||||
#define MGA_FXLEFT 0x1ca8
|
||||
#define MGA_FXLEFT 0x1ca8
|
||||
#define MGA_FXRIGHT 0x1cac
|
||||
|
||||
#define MGA_ICLEAR 0x1e18
|
||||
#define MGA_ICLEAR 0x1e18
|
||||
# define MGA_SOFTRAPICLR (1 << 0)
|
||||
# define MGA_VLINEICLR (1 << 5)
|
||||
#define MGA_IEN 0x1e1c
|
||||
#define MGA_IEN 0x1e1c
|
||||
# define MGA_SOFTRAPIEN (1 << 0)
|
||||
# define MGA_VLINEIEN (1 << 5)
|
||||
|
||||
#define MGA_LEN 0x1c5c
|
||||
#define MGA_LEN 0x1c5c
|
||||
|
||||
#define MGA_MACCESS 0x1c04
|
||||
|
||||
#define MGA_PITCH 0x1c8c
|
||||
#define MGA_PLNWT 0x1c1c
|
||||
#define MGA_PRIMADDRESS 0x1e58
|
||||
#define MGA_PITCH 0x1c8c
|
||||
#define MGA_PLNWT 0x1c1c
|
||||
#define MGA_PRIMADDRESS 0x1e58
|
||||
# define MGA_DMA_GENERAL (0 << 0)
|
||||
# define MGA_DMA_BLIT (1 << 0)
|
||||
# define MGA_DMA_VECTOR (2 << 0)
|
||||
|
@ -487,43 +487,43 @@ do { \
|
|||
# define MGA_PRIMPTREN0 (1 << 0)
|
||||
# define MGA_PRIMPTREN1 (1 << 1)
|
||||
|
||||
#define MGA_RST 0x1e40
|
||||
#define MGA_RST 0x1e40
|
||||
# define MGA_SOFTRESET (1 << 0)
|
||||
# define MGA_SOFTEXTRST (1 << 1)
|
||||
|
||||
#define MGA_SECADDRESS 0x2c40
|
||||
#define MGA_SECEND 0x2c44
|
||||
#define MGA_SETUPADDRESS 0x2cd0
|
||||
#define MGA_SETUPEND 0x2cd4
|
||||
#define MGA_SECADDRESS 0x2c40
|
||||
#define MGA_SECEND 0x2c44
|
||||
#define MGA_SETUPADDRESS 0x2cd0
|
||||
#define MGA_SETUPEND 0x2cd4
|
||||
#define MGA_SGN 0x1c58
|
||||
#define MGA_SOFTRAP 0x2c48
|
||||
#define MGA_SRCORG 0x2cb4
|
||||
#define MGA_SRCORG 0x2cb4
|
||||
# define MGA_SRMMAP_MASK (1 << 0)
|
||||
# define MGA_SRCMAP_FB (0 << 0)
|
||||
# define MGA_SRCMAP_SYSMEM (1 << 0)
|
||||
# define MGA_SRCACC_MASK (1 << 1)
|
||||
# define MGA_SRCACC_PCI (0 << 1)
|
||||
# define MGA_SRCACC_AGP (1 << 1)
|
||||
#define MGA_STATUS 0x1e14
|
||||
#define MGA_STATUS 0x1e14
|
||||
# define MGA_SOFTRAPEN (1 << 0)
|
||||
# define MGA_VSYNCPEN (1 << 4)
|
||||
# define MGA_VLINEPEN (1 << 5)
|
||||
# define MGA_DWGENGSTS (1 << 16)
|
||||
# define MGA_ENDPRDMASTS (1 << 17)
|
||||
#define MGA_STENCIL 0x2cc8
|
||||
#define MGA_STENCILCTL 0x2ccc
|
||||
#define MGA_STENCILCTL 0x2ccc
|
||||
|
||||
#define MGA_TDUALSTAGE0 0x2cf8
|
||||
#define MGA_TDUALSTAGE1 0x2cfc
|
||||
#define MGA_TEXBORDERCOL 0x2c5c
|
||||
#define MGA_TEXCTL 0x2c30
|
||||
#define MGA_TDUALSTAGE0 0x2cf8
|
||||
#define MGA_TDUALSTAGE1 0x2cfc
|
||||
#define MGA_TEXBORDERCOL 0x2c5c
|
||||
#define MGA_TEXCTL 0x2c30
|
||||
#define MGA_TEXCTL2 0x2c3c
|
||||
# define MGA_DUALTEX (1 << 7)
|
||||
# define MGA_G400_TC2_MAGIC (1 << 15)
|
||||
# define MGA_MAP1_ENABLE (1 << 31)
|
||||
#define MGA_TEXFILTER 0x2c58
|
||||
#define MGA_TEXHEIGHT 0x2c2c
|
||||
#define MGA_TEXORG 0x2c24
|
||||
#define MGA_TEXFILTER 0x2c58
|
||||
#define MGA_TEXHEIGHT 0x2c2c
|
||||
#define MGA_TEXORG 0x2c24
|
||||
# define MGA_TEXORGMAP_MASK (1 << 0)
|
||||
# define MGA_TEXORGMAP_FB (0 << 0)
|
||||
# define MGA_TEXORGMAP_SYSMEM (1 << 0)
|
||||
|
@ -534,45 +534,45 @@ do { \
|
|||
#define MGA_TEXORG2 0x2ca8
|
||||
#define MGA_TEXORG3 0x2cac
|
||||
#define MGA_TEXORG4 0x2cb0
|
||||
#define MGA_TEXTRANS 0x2c34
|
||||
#define MGA_TEXTRANSHIGH 0x2c38
|
||||
#define MGA_TEXWIDTH 0x2c28
|
||||
#define MGA_TEXTRANS 0x2c34
|
||||
#define MGA_TEXTRANSHIGH 0x2c38
|
||||
#define MGA_TEXWIDTH 0x2c28
|
||||
|
||||
#define MGA_WACCEPTSEQ 0x1dd4
|
||||
#define MGA_WCODEADDR 0x1e6c
|
||||
#define MGA_WFLAG 0x1dc4
|
||||
#define MGA_WFLAG1 0x1de0
|
||||
#define MGA_WACCEPTSEQ 0x1dd4
|
||||
#define MGA_WCODEADDR 0x1e6c
|
||||
#define MGA_WFLAG 0x1dc4
|
||||
#define MGA_WFLAG1 0x1de0
|
||||
#define MGA_WFLAGNB 0x1e64
|
||||
#define MGA_WFLAGNB1 0x1e08
|
||||
#define MGA_WFLAGNB1 0x1e08
|
||||
#define MGA_WGETMSB 0x1dc8
|
||||
#define MGA_WIADDR 0x1dc0
|
||||
#define MGA_WIADDR 0x1dc0
|
||||
#define MGA_WIADDR2 0x1dd8
|
||||
# define MGA_WMODE_SUSPEND (0 << 0)
|
||||
# define MGA_WMODE_RESUME (1 << 0)
|
||||
# define MGA_WMODE_JUMP (2 << 0)
|
||||
# define MGA_WMODE_START (3 << 0)
|
||||
# define MGA_WAGP_ENABLE (1 << 2)
|
||||
#define MGA_WMISC 0x1e70
|
||||
#define MGA_WMISC 0x1e70
|
||||
# define MGA_WUCODECACHE_ENABLE (1 << 0)
|
||||
# define MGA_WMASTER_ENABLE (1 << 1)
|
||||
# define MGA_WCACHEFLUSH_ENABLE (1 << 3)
|
||||
#define MGA_WVRTXSZ 0x1dcc
|
||||
|
||||
#define MGA_YBOT 0x1c9c
|
||||
#define MGA_YDST 0x1c90
|
||||
#define MGA_YBOT 0x1c9c
|
||||
#define MGA_YDST 0x1c90
|
||||
#define MGA_YDSTLEN 0x1c88
|
||||
#define MGA_YDSTORG 0x1c94
|
||||
#define MGA_YTOP 0x1c98
|
||||
#define MGA_YTOP 0x1c98
|
||||
|
||||
#define MGA_ZORG 0x1c0c
|
||||
#define MGA_ZORG 0x1c0c
|
||||
|
||||
/* This finishes the current batch of commands
|
||||
*/
|
||||
#define MGA_EXEC 0x0100
|
||||
#define MGA_EXEC 0x0100
|
||||
|
||||
/* AGP PLL encoding (for G200 only).
|
||||
*/
|
||||
#define MGA_AGP_PLL 0x1e4c
|
||||
#define MGA_AGP_PLL 0x1e4c
|
||||
# define MGA_AGP2XPLL_DISABLE (0 << 0)
|
||||
# define MGA_AGP2XPLL_ENABLE (1 << 0)
|
||||
|
||||
|
|
|
@ -150,8 +150,8 @@ static __inline__ void mga_g400_emit_tex0(drm_mga_private_t * dev_priv)
|
|||
drm_mga_texture_regs_t *tex = &sarea_priv->tex_state[0];
|
||||
DMA_LOCALS;
|
||||
|
||||
/* printk("mga_g400_emit_tex0 %x %x %x\n", tex->texorg, */
|
||||
/* tex->texctl, tex->texctl2); */
|
||||
/* printk("mga_g400_emit_tex0 %x %x %x\n", tex->texorg, */
|
||||
/* tex->texctl, tex->texctl2); */
|
||||
|
||||
BEGIN_DMA(6);
|
||||
|
||||
|
@ -190,8 +190,8 @@ static __inline__ void mga_g400_emit_tex1(drm_mga_private_t * dev_priv)
|
|||
drm_mga_texture_regs_t *tex = &sarea_priv->tex_state[1];
|
||||
DMA_LOCALS;
|
||||
|
||||
/* printk("mga_g400_emit_tex1 %x %x %x\n", tex->texorg, */
|
||||
/* tex->texctl, tex->texctl2); */
|
||||
/* printk("mga_g400_emit_tex1 %x %x %x\n", tex->texorg, */
|
||||
/* tex->texctl, tex->texctl2); */
|
||||
|
||||
BEGIN_DMA(5);
|
||||
|
||||
|
@ -256,7 +256,7 @@ static __inline__ void mga_g400_emit_pipe(drm_mga_private_t * dev_priv)
|
|||
unsigned int pipe = sarea_priv->warp_pipe;
|
||||
DMA_LOCALS;
|
||||
|
||||
/* printk("mga_g400_emit_pipe %x\n", pipe); */
|
||||
/* printk("mga_g400_emit_pipe %x\n", pipe); */
|
||||
|
||||
BEGIN_DMA(10);
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* r128_cce.c -- ATI Rage 128 driver -*- linux-c -*-
|
||||
/* r128_cce.c -- ATI Rage 128 driver -*- linux-c -*-
|
||||
* Created: Wed Apr 5 19:24:19 2000 by kevin@precisioninsight.com
|
||||
*/
|
||||
/*
|
||||
|
|
|
@ -493,7 +493,7 @@ do { \
|
|||
write * sizeof(u32) ); \
|
||||
} \
|
||||
if (((dev_priv->ring.tail + _nr) & tail_mask) != write) { \
|
||||
DRM_ERROR( \
|
||||
DRM_ERROR( \
|
||||
"ADVANCE_RING(): mismatch: nr: %x write: %x line: %d\n", \
|
||||
((dev_priv->ring.tail + _nr) & tail_mask), \
|
||||
write, __LINE__); \
|
||||
|
|
|
@ -486,7 +486,7 @@ static __inline__ int r300_emit_bitblt_multi(drm_radeon_private_t *dev_priv,
|
|||
if (cmd[0] & 0x8000) {
|
||||
u32 offset;
|
||||
|
||||
if (cmd[1] & (RADEON_GMC_SRC_PITCH_OFFSET_CNTL
|
||||
if (cmd[1] & (RADEON_GMC_SRC_PITCH_OFFSET_CNTL
|
||||
| RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
|
||||
offset = cmd[2] << 10;
|
||||
ret = !radeon_check_offset(dev_priv, offset);
|
||||
|
@ -504,7 +504,7 @@ static __inline__ int r300_emit_bitblt_multi(drm_radeon_private_t *dev_priv,
|
|||
DRM_ERROR("Invalid bitblt second offset is %08X\n", offset);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -723,54 +723,54 @@ static int r300_scratch(drm_radeon_private_t *dev_priv,
|
|||
u32 *ref_age_base;
|
||||
u32 i, buf_idx, h_pending;
|
||||
RING_LOCALS;
|
||||
|
||||
if (cmdbuf->bufsz <
|
||||
|
||||
if (cmdbuf->bufsz <
|
||||
(sizeof(u64) + header.scratch.n_bufs * sizeof(buf_idx))) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
||||
if (header.scratch.reg >= 5) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
||||
dev_priv->scratch_ages[header.scratch.reg]++;
|
||||
|
||||
|
||||
ref_age_base = (u32 *)(unsigned long)*((uint64_t *)cmdbuf->buf);
|
||||
|
||||
|
||||
cmdbuf->buf += sizeof(u64);
|
||||
cmdbuf->bufsz -= sizeof(u64);
|
||||
|
||||
|
||||
for (i=0; i < header.scratch.n_bufs; i++) {
|
||||
buf_idx = *(u32 *)cmdbuf->buf;
|
||||
buf_idx *= 2; /* 8 bytes per buf */
|
||||
|
||||
|
||||
if (DRM_COPY_TO_USER(ref_age_base + buf_idx, &dev_priv->scratch_ages[header.scratch.reg], sizeof(u32))) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
||||
if (DRM_COPY_FROM_USER(&h_pending, ref_age_base + buf_idx + 1, sizeof(u32))) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
||||
if (h_pending == 0) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
||||
h_pending--;
|
||||
|
||||
|
||||
if (DRM_COPY_TO_USER(ref_age_base + buf_idx + 1, &h_pending, sizeof(u32))) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
||||
cmdbuf->buf += sizeof(buf_idx);
|
||||
cmdbuf->bufsz -= sizeof(buf_idx);
|
||||
}
|
||||
|
||||
|
||||
BEGIN_RING(2);
|
||||
OUT_RING( CP_PACKET0( RADEON_SCRATCH_REG0 + header.scratch.reg * 4, 0 ) );
|
||||
OUT_RING( dev_priv->scratch_ages[header.scratch.reg] );
|
||||
ADVANCE_RING();
|
||||
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -919,7 +919,7 @@ int r300_do_cp_cmdbuf(struct drm_device *dev,
|
|||
goto cleanup;
|
||||
}
|
||||
break;
|
||||
|
||||
|
||||
default:
|
||||
DRM_ERROR("bad cmd_type %i at %p\n",
|
||||
header.header.cmd_type,
|
||||
|
|
|
@ -853,13 +853,13 @@ USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|||
# define R300_TX_FORMAT_W8Z8Y8X8 0xC
|
||||
# define R300_TX_FORMAT_W2Z10Y10X10 0xD
|
||||
# define R300_TX_FORMAT_W16Z16Y16X16 0xE
|
||||
# define R300_TX_FORMAT_DXT1 0xF
|
||||
# define R300_TX_FORMAT_DXT3 0x10
|
||||
# define R300_TX_FORMAT_DXT5 0x11
|
||||
# define R300_TX_FORMAT_DXT1 0xF
|
||||
# define R300_TX_FORMAT_DXT3 0x10
|
||||
# define R300_TX_FORMAT_DXT5 0x11
|
||||
# define R300_TX_FORMAT_D3DMFT_CxV8U8 0x12 /* no swizzle */
|
||||
# define R300_TX_FORMAT_A8R8G8B8 0x13 /* no swizzle */
|
||||
# define R300_TX_FORMAT_B8G8_B8G8 0x14 /* no swizzle */
|
||||
# define R300_TX_FORMAT_G8R8_G8B8 0x15 /* no swizzle */
|
||||
# define R300_TX_FORMAT_A8R8G8B8 0x13 /* no swizzle */
|
||||
# define R300_TX_FORMAT_B8G8_B8G8 0x14 /* no swizzle */
|
||||
# define R300_TX_FORMAT_G8R8_G8B8 0x15 /* no swizzle */
|
||||
/* 0x16 - some 16 bit green format.. ?? */
|
||||
# define R300_TX_FORMAT_UNK25 (1 << 25) /* no swizzle */
|
||||
# define R300_TX_FORMAT_CUBIC_MAP (1 << 26)
|
||||
|
@ -867,19 +867,19 @@ USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|||
/* gap */
|
||||
/* Floating point formats */
|
||||
/* Note - hardware supports both 16 and 32 bit floating point */
|
||||
# define R300_TX_FORMAT_FL_I16 0x18
|
||||
# define R300_TX_FORMAT_FL_I16A16 0x19
|
||||
# define R300_TX_FORMAT_FL_I16 0x18
|
||||
# define R300_TX_FORMAT_FL_I16A16 0x19
|
||||
# define R300_TX_FORMAT_FL_R16G16B16A16 0x1A
|
||||
# define R300_TX_FORMAT_FL_I32 0x1B
|
||||
# define R300_TX_FORMAT_FL_I32A32 0x1C
|
||||
# define R300_TX_FORMAT_FL_I32 0x1B
|
||||
# define R300_TX_FORMAT_FL_I32A32 0x1C
|
||||
# define R300_TX_FORMAT_FL_R32G32B32A32 0x1D
|
||||
/* alpha modes, convenience mostly */
|
||||
/* if you have alpha, pick constant appropriate to the
|
||||
number of channels (1 for I8, 2 for I8A8, 4 for R8G8B8A8, etc */
|
||||
# define R300_TX_FORMAT_ALPHA_1CH 0x000
|
||||
# define R300_TX_FORMAT_ALPHA_2CH 0x200
|
||||
# define R300_TX_FORMAT_ALPHA_4CH 0x600
|
||||
# define R300_TX_FORMAT_ALPHA_NONE 0xA00
|
||||
# define R300_TX_FORMAT_ALPHA_1CH 0x000
|
||||
# define R300_TX_FORMAT_ALPHA_2CH 0x200
|
||||
# define R300_TX_FORMAT_ALPHA_4CH 0x600
|
||||
# define R300_TX_FORMAT_ALPHA_NONE 0xA00
|
||||
/* Swizzling */
|
||||
/* constants */
|
||||
# define R300_TX_FORMAT_X 0
|
||||
|
@ -1360,11 +1360,11 @@ USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|||
# define R300_RB3D_Z_DISABLED_2 0x00000014
|
||||
# define R300_RB3D_Z_TEST 0x00000012
|
||||
# define R300_RB3D_Z_TEST_AND_WRITE 0x00000016
|
||||
# define R300_RB3D_Z_WRITE_ONLY 0x00000006
|
||||
# define R300_RB3D_Z_WRITE_ONLY 0x00000006
|
||||
|
||||
# define R300_RB3D_Z_TEST 0x00000012
|
||||
# define R300_RB3D_Z_TEST_AND_WRITE 0x00000016
|
||||
# define R300_RB3D_Z_WRITE_ONLY 0x00000006
|
||||
# define R300_RB3D_Z_WRITE_ONLY 0x00000006
|
||||
# define R300_RB3D_STENCIL_ENABLE 0x00000001
|
||||
|
||||
#define R300_RB3D_ZSTENCIL_CNTL_1 0x4F04
|
||||
|
|
|
@ -1127,7 +1127,7 @@ static void radeon_cp_init_ring_buffer(struct drm_device * dev,
|
|||
{
|
||||
u32 ring_start, cur_read_ptr;
|
||||
u32 tmp;
|
||||
|
||||
|
||||
/* Initialize the memory controller. With new memory map, the fb location
|
||||
* is not changed, it should have been properly initialized already. Part
|
||||
* of the problem is that the code below is bogus, assuming the GART is
|
||||
|
@ -1358,7 +1358,7 @@ static void radeon_set_pcigart(drm_radeon_private_t * dev_priv, int on)
|
|||
return;
|
||||
}
|
||||
|
||||
tmp = RADEON_READ(RADEON_AIC_CNTL);
|
||||
tmp = RADEON_READ(RADEON_AIC_CNTL);
|
||||
|
||||
if (on) {
|
||||
RADEON_WRITE(RADEON_AIC_CNTL,
|
||||
|
@ -1583,7 +1583,7 @@ static int radeon_do_init_cp(struct drm_device * dev, drm_radeon_init_t * init)
|
|||
|
||||
dev_priv->fb_location = (RADEON_READ(RADEON_MC_FB_LOCATION)
|
||||
& 0xffff) << 16;
|
||||
dev_priv->fb_size =
|
||||
dev_priv->fb_size =
|
||||
((RADEON_READ(RADEON_MC_FB_LOCATION) & 0xffff0000u) + 0x10000)
|
||||
- dev_priv->fb_location;
|
||||
|
||||
|
@ -1630,7 +1630,7 @@ static int radeon_do_init_cp(struct drm_device * dev, drm_radeon_init_t * init)
|
|||
((base + dev_priv->gart_size) & 0xfffffffful) < base)
|
||||
base = dev_priv->fb_location
|
||||
- dev_priv->gart_size;
|
||||
}
|
||||
}
|
||||
dev_priv->gart_vm_start = base & 0xffc00000u;
|
||||
if (dev_priv->gart_vm_start != base)
|
||||
DRM_INFO("GART aligned down from 0x%08x to 0x%08x\n",
|
||||
|
|
|
@ -223,10 +223,10 @@ typedef union {
|
|||
#define R300_CMD_CP_DELAY 5
|
||||
#define R300_CMD_DMA_DISCARD 6
|
||||
#define R300_CMD_WAIT 7
|
||||
# define R300_WAIT_2D 0x1
|
||||
# define R300_WAIT_3D 0x2
|
||||
# define R300_WAIT_2D_CLEAN 0x3
|
||||
# define R300_WAIT_3D_CLEAN 0x4
|
||||
# define R300_WAIT_2D 0x1
|
||||
# define R300_WAIT_3D 0x2
|
||||
# define R300_WAIT_2D_CLEAN 0x3
|
||||
# define R300_WAIT_3D_CLEAN 0x4
|
||||
#define R300_CMD_SCRATCH 8
|
||||
|
||||
typedef union {
|
||||
|
@ -722,7 +722,7 @@ typedef struct drm_radeon_surface_free {
|
|||
unsigned int address;
|
||||
} drm_radeon_surface_free_t;
|
||||
|
||||
#define DRM_RADEON_VBLANK_CRTC1 1
|
||||
#define DRM_RADEON_VBLANK_CRTC2 2
|
||||
#define DRM_RADEON_VBLANK_CRTC1 1
|
||||
#define DRM_RADEON_VBLANK_CRTC2 2
|
||||
|
||||
#endif
|
||||
|
|
|
@ -429,7 +429,7 @@ extern int r300_do_cp_cmdbuf(struct drm_device * dev,
|
|||
#define RADEON_PCIE_INDEX 0x0030
|
||||
#define RADEON_PCIE_DATA 0x0034
|
||||
#define RADEON_PCIE_TX_GART_CNTL 0x10
|
||||
# define RADEON_PCIE_TX_GART_EN (1 << 0)
|
||||
# define RADEON_PCIE_TX_GART_EN (1 << 0)
|
||||
# define RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_PASS_THRU (0<<1)
|
||||
# define RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_CLAMP_LO (1<<1)
|
||||
# define RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD (3<<1)
|
||||
|
@ -439,7 +439,7 @@ extern int r300_do_cp_cmdbuf(struct drm_device * dev,
|
|||
# define RADEON_PCIE_TX_GART_INVALIDATE_TLB (1<<8)
|
||||
#define RADEON_PCIE_TX_DISCARD_RD_ADDR_LO 0x11
|
||||
#define RADEON_PCIE_TX_DISCARD_RD_ADDR_HI 0x12
|
||||
#define RADEON_PCIE_TX_GART_BASE 0x13
|
||||
#define RADEON_PCIE_TX_GART_BASE 0x13
|
||||
#define RADEON_PCIE_TX_GART_START_LO 0x14
|
||||
#define RADEON_PCIE_TX_GART_START_HI 0x15
|
||||
#define RADEON_PCIE_TX_GART_END_LO 0x16
|
||||
|
@ -512,12 +512,12 @@ extern int r300_do_cp_cmdbuf(struct drm_device * dev,
|
|||
|
||||
#define RADEON_GEN_INT_STATUS 0x0044
|
||||
# define RADEON_CRTC_VBLANK_STAT (1 << 0)
|
||||
# define RADEON_CRTC_VBLANK_STAT_ACK (1 << 0)
|
||||
# define RADEON_CRTC_VBLANK_STAT_ACK (1 << 0)
|
||||
# define RADEON_CRTC2_VBLANK_STAT (1 << 9)
|
||||
# define RADEON_CRTC2_VBLANK_STAT_ACK (1 << 9)
|
||||
# define RADEON_CRTC2_VBLANK_STAT_ACK (1 << 9)
|
||||
# define RADEON_GUI_IDLE_INT_TEST_ACK (1 << 19)
|
||||
# define RADEON_SW_INT_TEST (1 << 25)
|
||||
# define RADEON_SW_INT_TEST_ACK (1 << 25)
|
||||
# define RADEON_SW_INT_TEST_ACK (1 << 25)
|
||||
# define RADEON_SW_INT_FIRE (1 << 26)
|
||||
|
||||
#define RADEON_HOST_PATH_CNTL 0x0130
|
||||
|
@ -1133,7 +1133,7 @@ do { \
|
|||
write, dev_priv->ring.tail ); \
|
||||
} \
|
||||
if (((dev_priv->ring.tail + _nr) & mask) != write) { \
|
||||
DRM_ERROR( \
|
||||
DRM_ERROR( \
|
||||
"ADVANCE_RING(): mismatch: nr: %x write: %x line: %d\n", \
|
||||
((dev_priv->ring.tail + _nr) & mask), \
|
||||
write, __LINE__); \
|
||||
|
|
|
@ -512,7 +512,7 @@ static int savage_dispatch_vb_prim(drm_savage_private_t * dev_priv,
|
|||
DMA_DRAW_PRIMITIVE(count, prim, skip);
|
||||
|
||||
if (vb_stride == vtx_size) {
|
||||
DMA_COPY(&vtxbuf[vb_stride * start],
|
||||
DMA_COPY(&vtxbuf[vb_stride * start],
|
||||
vtx_size * count);
|
||||
} else {
|
||||
for (i = start; i < start + count; ++i) {
|
||||
|
@ -742,7 +742,7 @@ static int savage_dispatch_vb_idx(drm_savage_private_t * dev_priv,
|
|||
while (n != 0) {
|
||||
/* Can emit up to 255 vertices (85 triangles) at once. */
|
||||
unsigned int count = n > 255 ? 255 : n;
|
||||
|
||||
|
||||
/* Check indices */
|
||||
for (i = 0; i < count; ++i) {
|
||||
if (idx[i] > vb_size / (vb_stride * 4)) {
|
||||
|
@ -933,7 +933,7 @@ static int savage_dispatch_draw(drm_savage_private_t * dev_priv,
|
|||
/* j was check in savage_bci_cmdbuf */
|
||||
ret = savage_dispatch_vb_idx(dev_priv,
|
||||
&cmd_header, (const uint16_t *)cmdbuf,
|
||||
(const uint32_t *)vtxbuf, vb_size,
|
||||
(const uint32_t *)vtxbuf, vb_size,
|
||||
vb_stride);
|
||||
cmdbuf += j;
|
||||
break;
|
||||
|
|
|
@ -249,7 +249,7 @@ int sis_idle(struct drm_device *dev)
|
|||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Implement a device switch here if needed
|
||||
*/
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/* via_dmablit.c -- PCI DMA BitBlt support for the VIA Unichrome/Pro
|
||||
*
|
||||
*
|
||||
* Copyright (C) 2005 Thomas Hellstrom, All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
|
@ -16,22 +16,22 @@
|
|||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors:
|
||||
* Authors:
|
||||
* Thomas Hellstrom.
|
||||
* Partially based on code obtained from Digeo Inc.
|
||||
*/
|
||||
|
||||
|
||||
/*
|
||||
* Unmaps the DMA mappings.
|
||||
* FIXME: Is this a NoOp on x86? Also
|
||||
* FIXME: What happens if this one is called and a pending blit has previously done
|
||||
* the same DMA mappings?
|
||||
* Unmaps the DMA mappings.
|
||||
* FIXME: Is this a NoOp on x86? Also
|
||||
* FIXME: What happens if this one is called and a pending blit has previously done
|
||||
* the same DMA mappings?
|
||||
*/
|
||||
|
||||
#include "drmP.h"
|
||||
|
@ -65,7 +65,7 @@ via_unmap_blit_from_device(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
|
|||
int num_desc = vsg->num_desc;
|
||||
unsigned cur_descriptor_page = num_desc / vsg->descriptors_per_page;
|
||||
unsigned descriptor_this_page = num_desc % vsg->descriptors_per_page;
|
||||
drm_via_descriptor_t *desc_ptr = vsg->desc_pages[cur_descriptor_page] +
|
||||
drm_via_descriptor_t *desc_ptr = vsg->desc_pages[cur_descriptor_page] +
|
||||
descriptor_this_page;
|
||||
dma_addr_t next = vsg->chain_start;
|
||||
|
||||
|
@ -73,7 +73,7 @@ via_unmap_blit_from_device(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
|
|||
if (descriptor_this_page-- == 0) {
|
||||
cur_descriptor_page--;
|
||||
descriptor_this_page = vsg->descriptors_per_page - 1;
|
||||
desc_ptr = vsg->desc_pages[cur_descriptor_page] +
|
||||
desc_ptr = vsg->desc_pages[cur_descriptor_page] +
|
||||
descriptor_this_page;
|
||||
}
|
||||
dma_unmap_single(&pdev->dev, next, sizeof(*desc_ptr), DMA_TO_DEVICE);
|
||||
|
@ -93,7 +93,7 @@ via_unmap_blit_from_device(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
|
|||
static void
|
||||
via_map_blit_for_device(struct pci_dev *pdev,
|
||||
const drm_via_dmablit_t *xfer,
|
||||
drm_via_sg_info_t *vsg,
|
||||
drm_via_sg_info_t *vsg,
|
||||
int mode)
|
||||
{
|
||||
unsigned cur_descriptor_page = 0;
|
||||
|
@ -110,7 +110,7 @@ via_map_blit_for_device(struct pci_dev *pdev,
|
|||
dma_addr_t next = 0 | VIA_DMA_DPR_EC;
|
||||
drm_via_descriptor_t *desc_ptr = NULL;
|
||||
|
||||
if (mode == 1)
|
||||
if (mode == 1)
|
||||
desc_ptr = vsg->desc_pages[cur_descriptor_page];
|
||||
|
||||
for (cur_line = 0; cur_line < xfer->num_lines; ++cur_line) {
|
||||
|
@ -118,24 +118,24 @@ via_map_blit_for_device(struct pci_dev *pdev,
|
|||
line_len = xfer->line_length;
|
||||
cur_fb = fb_addr;
|
||||
cur_mem = mem_addr;
|
||||
|
||||
|
||||
while (line_len > 0) {
|
||||
|
||||
remaining_len = min(PAGE_SIZE-VIA_PGOFF(cur_mem), line_len);
|
||||
line_len -= remaining_len;
|
||||
|
||||
if (mode == 1) {
|
||||
desc_ptr->mem_addr =
|
||||
dma_map_page(&pdev->dev,
|
||||
vsg->pages[VIA_PFN(cur_mem) -
|
||||
desc_ptr->mem_addr =
|
||||
dma_map_page(&pdev->dev,
|
||||
vsg->pages[VIA_PFN(cur_mem) -
|
||||
VIA_PFN(first_addr)],
|
||||
VIA_PGOFF(cur_mem), remaining_len,
|
||||
VIA_PGOFF(cur_mem), remaining_len,
|
||||
vsg->direction);
|
||||
desc_ptr->dev_addr = cur_fb;
|
||||
|
||||
|
||||
desc_ptr->size = remaining_len;
|
||||
desc_ptr->next = (uint32_t) next;
|
||||
next = dma_map_single(&pdev->dev, desc_ptr, sizeof(*desc_ptr),
|
||||
next = dma_map_single(&pdev->dev, desc_ptr, sizeof(*desc_ptr),
|
||||
DMA_TO_DEVICE);
|
||||
desc_ptr++;
|
||||
if (++num_descriptors_this_page >= vsg->descriptors_per_page) {
|
||||
|
@ -143,12 +143,12 @@ via_map_blit_for_device(struct pci_dev *pdev,
|
|||
desc_ptr = vsg->desc_pages[++cur_descriptor_page];
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
num_desc++;
|
||||
cur_mem += remaining_len;
|
||||
cur_fb += remaining_len;
|
||||
}
|
||||
|
||||
|
||||
mem_addr += xfer->mem_stride;
|
||||
fb_addr += xfer->fb_stride;
|
||||
}
|
||||
|
@ -161,14 +161,14 @@ via_map_blit_for_device(struct pci_dev *pdev,
|
|||
}
|
||||
|
||||
/*
|
||||
* Function that frees up all resources for a blit. It is usable even if the
|
||||
* Function that frees up all resources for a blit. It is usable even if the
|
||||
* blit info has only been partially built as long as the status enum is consistent
|
||||
* with the actual status of the used resources.
|
||||
*/
|
||||
|
||||
|
||||
static void
|
||||
via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
|
||||
via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
|
||||
{
|
||||
struct page *page;
|
||||
int i;
|
||||
|
@ -185,7 +185,7 @@ via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
|
|||
case dr_via_pages_locked:
|
||||
for (i=0; i<vsg->num_pages; ++i) {
|
||||
if ( NULL != (page = vsg->pages[i])) {
|
||||
if (! PageReserved(page) && (DMA_FROM_DEVICE == vsg->direction))
|
||||
if (! PageReserved(page) && (DMA_FROM_DEVICE == vsg->direction))
|
||||
SetPageDirty(page);
|
||||
page_cache_release(page);
|
||||
}
|
||||
|
@ -200,7 +200,7 @@ via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
|
|||
vsg->bounce_buffer = NULL;
|
||||
}
|
||||
vsg->free_on_sequence = 0;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Fire a blit engine.
|
||||
|
@ -213,7 +213,7 @@ via_fire_dmablit(struct drm_device *dev, drm_via_sg_info_t *vsg, int engine)
|
|||
|
||||
VIA_WRITE(VIA_PCI_DMA_MAR0 + engine*0x10, 0);
|
||||
VIA_WRITE(VIA_PCI_DMA_DAR0 + engine*0x10, 0);
|
||||
VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DD | VIA_DMA_CSR_TD |
|
||||
VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DD | VIA_DMA_CSR_TD |
|
||||
VIA_DMA_CSR_DE);
|
||||
VIA_WRITE(VIA_PCI_DMA_MR0 + engine*0x04, VIA_DMA_MR_CM | VIA_DMA_MR_TDIE);
|
||||
VIA_WRITE(VIA_PCI_DMA_BCR0 + engine*0x10, 0);
|
||||
|
@ -233,9 +233,9 @@ via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
|
|||
{
|
||||
int ret;
|
||||
unsigned long first_pfn = VIA_PFN(xfer->mem_addr);
|
||||
vsg->num_pages = VIA_PFN(xfer->mem_addr + (xfer->num_lines * xfer->mem_stride -1)) -
|
||||
vsg->num_pages = VIA_PFN(xfer->mem_addr + (xfer->num_lines * xfer->mem_stride -1)) -
|
||||
first_pfn + 1;
|
||||
|
||||
|
||||
if (NULL == (vsg->pages = vmalloc(sizeof(struct page *) * vsg->num_pages)))
|
||||
return -ENOMEM;
|
||||
memset(vsg->pages, 0, sizeof(struct page *) * vsg->num_pages);
|
||||
|
@ -248,7 +248,7 @@ via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
|
|||
|
||||
up_read(¤t->mm->mmap_sem);
|
||||
if (ret != vsg->num_pages) {
|
||||
if (ret < 0)
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
vsg->state = dr_via_pages_locked;
|
||||
return -EINVAL;
|
||||
|
@ -264,21 +264,21 @@ via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
|
|||
* quite large for some blits, and pages don't need to be contingous.
|
||||
*/
|
||||
|
||||
static int
|
||||
static int
|
||||
via_alloc_desc_pages(drm_via_sg_info_t *vsg)
|
||||
{
|
||||
int i;
|
||||
|
||||
|
||||
vsg->descriptors_per_page = PAGE_SIZE / sizeof( drm_via_descriptor_t);
|
||||
vsg->num_desc_pages = (vsg->num_desc + vsg->descriptors_per_page - 1) /
|
||||
vsg->num_desc_pages = (vsg->num_desc + vsg->descriptors_per_page - 1) /
|
||||
vsg->descriptors_per_page;
|
||||
|
||||
if (NULL == (vsg->desc_pages = kcalloc(vsg->num_desc_pages, sizeof(void *), GFP_KERNEL)))
|
||||
return -ENOMEM;
|
||||
|
||||
|
||||
vsg->state = dr_via_desc_pages_alloc;
|
||||
for (i=0; i<vsg->num_desc_pages; ++i) {
|
||||
if (NULL == (vsg->desc_pages[i] =
|
||||
if (NULL == (vsg->desc_pages[i] =
|
||||
(drm_via_descriptor_t *) __get_free_page(GFP_KERNEL)))
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
@ -286,7 +286,7 @@ via_alloc_desc_pages(drm_via_sg_info_t *vsg)
|
|||
vsg->num_desc);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static void
|
||||
via_abort_dmablit(struct drm_device *dev, int engine)
|
||||
{
|
||||
|
@ -300,7 +300,7 @@ via_dmablit_engine_off(struct drm_device *dev, int engine)
|
|||
{
|
||||
drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
|
||||
|
||||
VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TD | VIA_DMA_CSR_DD);
|
||||
VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TD | VIA_DMA_CSR_DD);
|
||||
}
|
||||
|
||||
|
||||
|
@ -311,7 +311,7 @@ via_dmablit_engine_off(struct drm_device *dev, int engine)
|
|||
* task. Basically the task of the interrupt handler is to submit a new blit to the engine, while
|
||||
* the workqueue task takes care of processing associated with the old blit.
|
||||
*/
|
||||
|
||||
|
||||
void
|
||||
via_dmablit_handler(struct drm_device *dev, int engine, int from_irq)
|
||||
{
|
||||
|
@ -331,19 +331,19 @@ via_dmablit_handler(struct drm_device *dev, int engine, int from_irq)
|
|||
spin_lock_irqsave(&blitq->blit_lock, irqsave);
|
||||
}
|
||||
|
||||
done_transfer = blitq->is_active &&
|
||||
done_transfer = blitq->is_active &&
|
||||
(( status = VIA_READ(VIA_PCI_DMA_CSR0 + engine*0x04)) & VIA_DMA_CSR_TD);
|
||||
done_transfer = done_transfer || ( blitq->aborting && !(status & VIA_DMA_CSR_DE));
|
||||
done_transfer = done_transfer || ( blitq->aborting && !(status & VIA_DMA_CSR_DE));
|
||||
|
||||
cur = blitq->cur;
|
||||
if (done_transfer) {
|
||||
|
||||
blitq->blits[cur]->aborted = blitq->aborting;
|
||||
blitq->done_blit_handle++;
|
||||
DRM_WAKEUP(blitq->blit_queue + cur);
|
||||
DRM_WAKEUP(blitq->blit_queue + cur);
|
||||
|
||||
cur++;
|
||||
if (cur >= VIA_NUM_BLIT_SLOTS)
|
||||
if (cur >= VIA_NUM_BLIT_SLOTS)
|
||||
cur = 0;
|
||||
blitq->cur = cur;
|
||||
|
||||
|
@ -355,7 +355,7 @@ via_dmablit_handler(struct drm_device *dev, int engine, int from_irq)
|
|||
|
||||
blitq->is_active = 0;
|
||||
blitq->aborting = 0;
|
||||
schedule_work(&blitq->wq);
|
||||
schedule_work(&blitq->wq);
|
||||
|
||||
} else if (blitq->is_active && time_after_eq(jiffies, blitq->end)) {
|
||||
|
||||
|
@ -367,7 +367,7 @@ via_dmablit_handler(struct drm_device *dev, int engine, int from_irq)
|
|||
blitq->aborting = 1;
|
||||
blitq->end = jiffies + DRM_HZ;
|
||||
}
|
||||
|
||||
|
||||
if (!blitq->is_active) {
|
||||
if (blitq->num_outstanding) {
|
||||
via_fire_dmablit(dev, blitq->blits[cur], engine);
|
||||
|
@ -383,14 +383,14 @@ via_dmablit_handler(struct drm_device *dev, int engine, int from_irq)
|
|||
}
|
||||
via_dmablit_engine_off(dev, engine);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (from_irq) {
|
||||
spin_unlock(&blitq->blit_lock);
|
||||
} else {
|
||||
spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
@ -426,13 +426,13 @@ via_dmablit_active(drm_via_blitq_t *blitq, int engine, uint32_t handle, wait_que
|
|||
|
||||
return active;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Sync. Wait for at least three seconds for the blit to be performed.
|
||||
*/
|
||||
|
||||
static int
|
||||
via_dmablit_sync(struct drm_device *dev, uint32_t handle, int engine)
|
||||
via_dmablit_sync(struct drm_device *dev, uint32_t handle, int engine)
|
||||
{
|
||||
|
||||
drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
|
||||
|
@ -441,12 +441,12 @@ via_dmablit_sync(struct drm_device *dev, uint32_t handle, int engine)
|
|||
int ret = 0;
|
||||
|
||||
if (via_dmablit_active(blitq, engine, handle, &queue)) {
|
||||
DRM_WAIT_ON(ret, *queue, 3 * DRM_HZ,
|
||||
DRM_WAIT_ON(ret, *queue, 3 * DRM_HZ,
|
||||
!via_dmablit_active(blitq, engine, handle, NULL));
|
||||
}
|
||||
DRM_DEBUG("DMA blit sync handle 0x%x engine %d returned %d\n",
|
||||
handle, engine, ret);
|
||||
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -468,12 +468,12 @@ via_dmablit_timer(unsigned long data)
|
|||
struct drm_device *dev = blitq->dev;
|
||||
int engine = (int)
|
||||
(blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues);
|
||||
|
||||
DRM_DEBUG("Polling timer called for engine %d, jiffies %lu\n", engine,
|
||||
|
||||
DRM_DEBUG("Polling timer called for engine %d, jiffies %lu\n", engine,
|
||||
(unsigned long) jiffies);
|
||||
|
||||
via_dmablit_handler(dev, engine, 0);
|
||||
|
||||
|
||||
if (!timer_pending(&blitq->poll_timer)) {
|
||||
mod_timer(&blitq->poll_timer, jiffies + 1);
|
||||
|
||||
|
@ -497,7 +497,7 @@ via_dmablit_timer(unsigned long data)
|
|||
*/
|
||||
|
||||
|
||||
static void
|
||||
static void
|
||||
via_dmablit_workqueue(struct work_struct *work)
|
||||
{
|
||||
drm_via_blitq_t *blitq = container_of(work, drm_via_blitq_t, wq);
|
||||
|
@ -505,38 +505,38 @@ via_dmablit_workqueue(struct work_struct *work)
|
|||
unsigned long irqsave;
|
||||
drm_via_sg_info_t *cur_sg;
|
||||
int cur_released;
|
||||
|
||||
|
||||
DRM_DEBUG("Workqueue task called for blit engine %ld\n",(unsigned long)
|
||||
|
||||
|
||||
DRM_DEBUG("Workqueue task called for blit engine %ld\n",(unsigned long)
|
||||
(blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues));
|
||||
|
||||
spin_lock_irqsave(&blitq->blit_lock, irqsave);
|
||||
|
||||
|
||||
while(blitq->serviced != blitq->cur) {
|
||||
|
||||
cur_released = blitq->serviced++;
|
||||
|
||||
DRM_DEBUG("Releasing blit slot %d\n", cur_released);
|
||||
|
||||
if (blitq->serviced >= VIA_NUM_BLIT_SLOTS)
|
||||
if (blitq->serviced >= VIA_NUM_BLIT_SLOTS)
|
||||
blitq->serviced = 0;
|
||||
|
||||
|
||||
cur_sg = blitq->blits[cur_released];
|
||||
blitq->num_free++;
|
||||
|
||||
|
||||
spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
|
||||
|
||||
|
||||
DRM_WAKEUP(&blitq->busy_queue);
|
||||
|
||||
|
||||
via_free_sg_info(dev->pdev, cur_sg);
|
||||
kfree(cur_sg);
|
||||
|
||||
|
||||
spin_lock_irqsave(&blitq->blit_lock, irqsave);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
|
||||
}
|
||||
|
||||
|
||||
|
||||
/*
|
||||
* Init all blit engines. Currently we use two, but some hardware have 4.
|
||||
|
@ -550,8 +550,8 @@ via_init_dmablit(struct drm_device *dev)
|
|||
drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
|
||||
drm_via_blitq_t *blitq;
|
||||
|
||||
pci_set_master(dev->pdev);
|
||||
|
||||
pci_set_master(dev->pdev);
|
||||
|
||||
for (i=0; i< VIA_NUM_BLIT_ENGINES; ++i) {
|
||||
blitq = dev_priv->blit_queues + i;
|
||||
blitq->dev = dev;
|
||||
|
@ -572,20 +572,20 @@ via_init_dmablit(struct drm_device *dev)
|
|||
INIT_WORK(&blitq->wq, via_dmablit_workqueue);
|
||||
setup_timer(&blitq->poll_timer, via_dmablit_timer,
|
||||
(unsigned long)blitq);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Build all info and do all mappings required for a blit.
|
||||
*/
|
||||
|
||||
|
||||
|
||||
static int
|
||||
via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
|
||||
{
|
||||
int draw = xfer->to_fb;
|
||||
int ret = 0;
|
||||
|
||||
|
||||
vsg->direction = (draw) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
|
||||
vsg->bounce_buffer = NULL;
|
||||
|
||||
|
@ -599,7 +599,7 @@ via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmabli
|
|||
/*
|
||||
* Below check is a driver limitation, not a hardware one. We
|
||||
* don't want to lock unused pages, and don't want to incoporate the
|
||||
* extra logic of avoiding them. Make sure there are no.
|
||||
* extra logic of avoiding them. Make sure there are no.
|
||||
* (Not a big limitation anyway.)
|
||||
*/
|
||||
|
||||
|
@ -625,11 +625,11 @@ via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmabli
|
|||
if (xfer->num_lines > 2048 || (xfer->num_lines*xfer->mem_stride > (2048*2048*4))) {
|
||||
DRM_ERROR("Too large PCI DMA bitblt.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
/*
|
||||
* we allow a negative fb stride to allow flipping of images in
|
||||
* transfer.
|
||||
* transfer.
|
||||
*/
|
||||
|
||||
if (xfer->mem_stride < xfer->line_length ||
|
||||
|
@ -653,11 +653,11 @@ via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmabli
|
|||
#else
|
||||
if ((((unsigned long)xfer->mem_addr & 15) ||
|
||||
((unsigned long)xfer->fb_addr & 3)) ||
|
||||
((xfer->num_lines > 1) &&
|
||||
((xfer->num_lines > 1) &&
|
||||
((xfer->mem_stride & 15) || (xfer->fb_stride & 3)))) {
|
||||
DRM_ERROR("Invalid DRM bitblt alignment.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
if (0 != (ret = via_lock_all_dma_pages(vsg, xfer))) {
|
||||
|
@ -673,17 +673,17 @@ via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmabli
|
|||
return ret;
|
||||
}
|
||||
via_map_blit_for_device(dev->pdev, xfer, vsg, 1);
|
||||
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
|
||||
/*
|
||||
* Reserve one free slot in the blit queue. Will wait for one second for one
|
||||
* to become available. Otherwise -EBUSY is returned.
|
||||
*/
|
||||
|
||||
static int
|
||||
static int
|
||||
via_dmablit_grab_slot(drm_via_blitq_t *blitq, int engine)
|
||||
{
|
||||
int ret=0;
|
||||
|
@ -698,10 +698,10 @@ via_dmablit_grab_slot(drm_via_blitq_t *blitq, int engine)
|
|||
if (ret) {
|
||||
return (-EINTR == ret) ? -EAGAIN : ret;
|
||||
}
|
||||
|
||||
|
||||
spin_lock_irqsave(&blitq->blit_lock, irqsave);
|
||||
}
|
||||
|
||||
|
||||
blitq->num_free--;
|
||||
spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
|
||||
|
||||
|
@ -712,7 +712,7 @@ via_dmablit_grab_slot(drm_via_blitq_t *blitq, int engine)
|
|||
* Hand back a free slot if we changed our mind.
|
||||
*/
|
||||
|
||||
static void
|
||||
static void
|
||||
via_dmablit_release_slot(drm_via_blitq_t *blitq)
|
||||
{
|
||||
unsigned long irqsave;
|
||||
|
@ -728,8 +728,8 @@ via_dmablit_release_slot(drm_via_blitq_t *blitq)
|
|||
*/
|
||||
|
||||
|
||||
static int
|
||||
via_dmablit(struct drm_device *dev, drm_via_dmablit_t *xfer)
|
||||
static int
|
||||
via_dmablit(struct drm_device *dev, drm_via_dmablit_t *xfer)
|
||||
{
|
||||
drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
|
||||
drm_via_sg_info_t *vsg;
|
||||
|
@ -760,15 +760,15 @@ via_dmablit(struct drm_device *dev, drm_via_dmablit_t *xfer)
|
|||
spin_lock_irqsave(&blitq->blit_lock, irqsave);
|
||||
|
||||
blitq->blits[blitq->head++] = vsg;
|
||||
if (blitq->head >= VIA_NUM_BLIT_SLOTS)
|
||||
if (blitq->head >= VIA_NUM_BLIT_SLOTS)
|
||||
blitq->head = 0;
|
||||
blitq->num_outstanding++;
|
||||
xfer->sync.sync_handle = ++blitq->cur_blit_handle;
|
||||
xfer->sync.sync_handle = ++blitq->cur_blit_handle;
|
||||
|
||||
spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
|
||||
xfer->sync.engine = engine;
|
||||
|
||||
via_dmablit_handler(dev, engine, 0);
|
||||
via_dmablit_handler(dev, engine, 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -776,7 +776,7 @@ via_dmablit(struct drm_device *dev, drm_via_dmablit_t *xfer)
|
|||
/*
|
||||
* Sync on a previously submitted blit. Note that the X server use signals extensively, and
|
||||
* that there is a very big probability that this IOCTL will be interrupted by a signal. In that
|
||||
* case it returns with -EAGAIN for the signal to be delivered.
|
||||
* case it returns with -EAGAIN for the signal to be delivered.
|
||||
* The caller should then reissue the IOCTL. This is similar to what is being done for drmGetLock().
|
||||
*/
|
||||
|
||||
|
@ -786,7 +786,7 @@ via_dma_blit_sync( struct drm_device *dev, void *data, struct drm_file *file_pri
|
|||
drm_via_blitsync_t *sync = data;
|
||||
int err;
|
||||
|
||||
if (sync->engine >= VIA_NUM_BLIT_ENGINES)
|
||||
if (sync->engine >= VIA_NUM_BLIT_ENGINES)
|
||||
return -EINVAL;
|
||||
|
||||
err = via_dmablit_sync(dev, sync->sync_handle, sync->engine);
|
||||
|
@ -796,15 +796,15 @@ via_dma_blit_sync( struct drm_device *dev, void *data, struct drm_file *file_pri
|
|||
|
||||
return err;
|
||||
}
|
||||
|
||||
|
||||
|
||||
/*
|
||||
* Queue a blit and hand back a handle to be used for sync. This IOCTL may be interrupted by a signal
|
||||
* while waiting for a free slot in the blit queue. In that case it returns with -EAGAIN and should
|
||||
* while waiting for a free slot in the blit queue. In that case it returns with -EAGAIN and should
|
||||
* be reissued. See the above IOCTL code.
|
||||
*/
|
||||
|
||||
int
|
||||
int
|
||||
via_dma_blit( struct drm_device *dev, void *data, struct drm_file *file_priv )
|
||||
{
|
||||
drm_via_dmablit_t *xfer = data;
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/* via_dmablit.h -- PCI DMA BitBlt support for the VIA Unichrome/Pro
|
||||
*
|
||||
*
|
||||
* Copyright 2005 Thomas Hellstrom.
|
||||
* All Rights Reserved.
|
||||
*
|
||||
|
@ -17,12 +17,12 @@
|
|||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors:
|
||||
* Authors:
|
||||
* Thomas Hellstrom.
|
||||
* Register info from Digeo Inc.
|
||||
*/
|
||||
|
@ -67,7 +67,7 @@ typedef struct _drm_via_blitq {
|
|||
unsigned cur;
|
||||
unsigned num_free;
|
||||
unsigned num_outstanding;
|
||||
unsigned long end;
|
||||
unsigned long end;
|
||||
int aborting;
|
||||
int is_active;
|
||||
drm_via_sg_info_t *blits[VIA_NUM_BLIT_SLOTS];
|
||||
|
@ -77,46 +77,46 @@ typedef struct _drm_via_blitq {
|
|||
struct work_struct wq;
|
||||
struct timer_list poll_timer;
|
||||
} drm_via_blitq_t;
|
||||
|
||||
|
||||
/*
|
||||
|
||||
/*
|
||||
* PCI DMA Registers
|
||||
* Channels 2 & 3 don't seem to be implemented in hardware.
|
||||
*/
|
||||
|
||||
#define VIA_PCI_DMA_MAR0 0xE40 /* Memory Address Register of Channel 0 */
|
||||
#define VIA_PCI_DMA_DAR0 0xE44 /* Device Address Register of Channel 0 */
|
||||
#define VIA_PCI_DMA_BCR0 0xE48 /* Byte Count Register of Channel 0 */
|
||||
#define VIA_PCI_DMA_DPR0 0xE4C /* Descriptor Pointer Register of Channel 0 */
|
||||
|
||||
#define VIA_PCI_DMA_MAR1 0xE50 /* Memory Address Register of Channel 1 */
|
||||
#define VIA_PCI_DMA_DAR1 0xE54 /* Device Address Register of Channel 1 */
|
||||
#define VIA_PCI_DMA_BCR1 0xE58 /* Byte Count Register of Channel 1 */
|
||||
#define VIA_PCI_DMA_DPR1 0xE5C /* Descriptor Pointer Register of Channel 1 */
|
||||
#define VIA_PCI_DMA_MAR0 0xE40 /* Memory Address Register of Channel 0 */
|
||||
#define VIA_PCI_DMA_DAR0 0xE44 /* Device Address Register of Channel 0 */
|
||||
#define VIA_PCI_DMA_BCR0 0xE48 /* Byte Count Register of Channel 0 */
|
||||
#define VIA_PCI_DMA_DPR0 0xE4C /* Descriptor Pointer Register of Channel 0 */
|
||||
|
||||
#define VIA_PCI_DMA_MAR2 0xE60 /* Memory Address Register of Channel 2 */
|
||||
#define VIA_PCI_DMA_DAR2 0xE64 /* Device Address Register of Channel 2 */
|
||||
#define VIA_PCI_DMA_BCR2 0xE68 /* Byte Count Register of Channel 2 */
|
||||
#define VIA_PCI_DMA_DPR2 0xE6C /* Descriptor Pointer Register of Channel 2 */
|
||||
#define VIA_PCI_DMA_MAR1 0xE50 /* Memory Address Register of Channel 1 */
|
||||
#define VIA_PCI_DMA_DAR1 0xE54 /* Device Address Register of Channel 1 */
|
||||
#define VIA_PCI_DMA_BCR1 0xE58 /* Byte Count Register of Channel 1 */
|
||||
#define VIA_PCI_DMA_DPR1 0xE5C /* Descriptor Pointer Register of Channel 1 */
|
||||
|
||||
#define VIA_PCI_DMA_MAR3 0xE70 /* Memory Address Register of Channel 3 */
|
||||
#define VIA_PCI_DMA_DAR3 0xE74 /* Device Address Register of Channel 3 */
|
||||
#define VIA_PCI_DMA_BCR3 0xE78 /* Byte Count Register of Channel 3 */
|
||||
#define VIA_PCI_DMA_DPR3 0xE7C /* Descriptor Pointer Register of Channel 3 */
|
||||
#define VIA_PCI_DMA_MAR2 0xE60 /* Memory Address Register of Channel 2 */
|
||||
#define VIA_PCI_DMA_DAR2 0xE64 /* Device Address Register of Channel 2 */
|
||||
#define VIA_PCI_DMA_BCR2 0xE68 /* Byte Count Register of Channel 2 */
|
||||
#define VIA_PCI_DMA_DPR2 0xE6C /* Descriptor Pointer Register of Channel 2 */
|
||||
|
||||
#define VIA_PCI_DMA_MR0 0xE80 /* Mode Register of Channel 0 */
|
||||
#define VIA_PCI_DMA_MR1 0xE84 /* Mode Register of Channel 1 */
|
||||
#define VIA_PCI_DMA_MR2 0xE88 /* Mode Register of Channel 2 */
|
||||
#define VIA_PCI_DMA_MR3 0xE8C /* Mode Register of Channel 3 */
|
||||
#define VIA_PCI_DMA_MAR3 0xE70 /* Memory Address Register of Channel 3 */
|
||||
#define VIA_PCI_DMA_DAR3 0xE74 /* Device Address Register of Channel 3 */
|
||||
#define VIA_PCI_DMA_BCR3 0xE78 /* Byte Count Register of Channel 3 */
|
||||
#define VIA_PCI_DMA_DPR3 0xE7C /* Descriptor Pointer Register of Channel 3 */
|
||||
|
||||
#define VIA_PCI_DMA_CSR0 0xE90 /* Command/Status Register of Channel 0 */
|
||||
#define VIA_PCI_DMA_CSR1 0xE94 /* Command/Status Register of Channel 1 */
|
||||
#define VIA_PCI_DMA_CSR2 0xE98 /* Command/Status Register of Channel 2 */
|
||||
#define VIA_PCI_DMA_CSR3 0xE9C /* Command/Status Register of Channel 3 */
|
||||
#define VIA_PCI_DMA_MR0 0xE80 /* Mode Register of Channel 0 */
|
||||
#define VIA_PCI_DMA_MR1 0xE84 /* Mode Register of Channel 1 */
|
||||
#define VIA_PCI_DMA_MR2 0xE88 /* Mode Register of Channel 2 */
|
||||
#define VIA_PCI_DMA_MR3 0xE8C /* Mode Register of Channel 3 */
|
||||
|
||||
#define VIA_PCI_DMA_PTR 0xEA0 /* Priority Type Register */
|
||||
#define VIA_PCI_DMA_CSR0 0xE90 /* Command/Status Register of Channel 0 */
|
||||
#define VIA_PCI_DMA_CSR1 0xE94 /* Command/Status Register of Channel 1 */
|
||||
#define VIA_PCI_DMA_CSR2 0xE98 /* Command/Status Register of Channel 2 */
|
||||
#define VIA_PCI_DMA_CSR3 0xE9C /* Command/Status Register of Channel 3 */
|
||||
|
||||
/* Define for DMA engine */
|
||||
#define VIA_PCI_DMA_PTR 0xEA0 /* Priority Type Register */
|
||||
|
||||
/* Define for DMA engine */
|
||||
/* DPR */
|
||||
#define VIA_DMA_DPR_EC (1<<1) /* end of chain */
|
||||
#define VIA_DMA_DPR_DDIE (1<<2) /* descriptor done interrupt enable */
|
||||
|
|
|
@ -35,7 +35,7 @@
|
|||
#include "via_drmclient.h"
|
||||
#endif
|
||||
|
||||
#define VIA_NR_SAREA_CLIPRECTS 8
|
||||
#define VIA_NR_SAREA_CLIPRECTS 8
|
||||
#define VIA_NR_XVMC_PORTS 10
|
||||
#define VIA_NR_XVMC_LOCKS 5
|
||||
#define VIA_MAX_CACHELINE_SIZE 64
|
||||
|
@ -259,7 +259,7 @@ typedef struct drm_via_blitsync {
|
|||
typedef struct drm_via_dmablit {
|
||||
uint32_t num_lines;
|
||||
uint32_t line_length;
|
||||
|
||||
|
||||
uint32_t fb_addr;
|
||||
uint32_t fb_stride;
|
||||
|
||||
|
|
|
@ -71,7 +71,7 @@ static struct drm_driver driver = {
|
|||
.name = DRIVER_NAME,
|
||||
.id_table = pciidlist,
|
||||
},
|
||||
|
||||
|
||||
.name = DRIVER_NAME,
|
||||
.desc = DRIVER_DESC,
|
||||
.date = DRIVER_DATE,
|
||||
|
|
|
@ -121,4 +121,3 @@ int via_driver_unload(struct drm_device *dev)
|
|||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -113,7 +113,7 @@ void via_lastclose(struct drm_device *dev)
|
|||
dev_priv->vram_initialized = 0;
|
||||
dev_priv->agp_initialized = 0;
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
}
|
||||
}
|
||||
|
||||
int via_mem_alloc(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
|
|
Loading…
Reference in New Issue