drm/vmwgfx: Add DRM driver for VMware Virtual GPU
This commit adds the vmwgfx driver for the VWware Virtual GPU aka SVGA. The driver is under staging the same as Nouveau and Radeon KMS. Hopefully the 2D ioctls are bug free and don't need changing, so that part of the API should be stable. But there there is a pretty big chance that the 3D API will change in the future. Signed-off-by: Thomas Hellström <thellstrom@vmware.com> Signed-off-by: Jakob Bornecrantz <jakob@vmware.com> Signed-off-by: Dave Airlie <airlied@redhat.com>
This commit is contained in:
parent
632f61178d
commit
fb1d9738ca
|
@ -30,4 +30,5 @@ obj-$(CONFIG_DRM_I830) += i830/
|
|||
obj-$(CONFIG_DRM_I915) += i915/
|
||||
obj-$(CONFIG_DRM_SIS) += sis/
|
||||
obj-$(CONFIG_DRM_SAVAGE)+= savage/
|
||||
obj-$(CONFIG_DRM_VMWGFX)+= vmwgfx/
|
||||
obj-$(CONFIG_DRM_VIA) +=via/
|
||||
|
|
|
@ -0,0 +1,13 @@
|
|||
config DRM_VMWGFX
|
||||
tristate "DRM driver for VMware Virtual GPU"
|
||||
depends on DRM && PCI
|
||||
select FB_DEFERRED_IO
|
||||
select FB_CFB_FILLRECT
|
||||
select FB_CFB_COPYAREA
|
||||
select FB_CFB_IMAGEBLIT
|
||||
select DRM_TTM
|
||||
help
|
||||
KMS enabled DRM driver for SVGA2 virtual hardware.
|
||||
|
||||
If unsure say n. The compiled module will be
|
||||
called vmwgfx.ko
|
|
@ -0,0 +1,9 @@
|
|||
|
||||
ccflags-y := -Iinclude/drm
|
||||
|
||||
vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
|
||||
vmwgfx_fb.o vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_buffer.o \
|
||||
vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \
|
||||
vmwgfx_overlay.o
|
||||
|
||||
obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
|
|
@ -0,0 +1,229 @@
|
|||
/**************************************************************************
|
||||
*
|
||||
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
|
||||
#include "vmwgfx_drv.h"
|
||||
#include "ttm/ttm_bo_driver.h"
|
||||
#include "ttm/ttm_placement.h"
|
||||
|
||||
static uint32_t vram_placement_flags = TTM_PL_FLAG_VRAM |
|
||||
TTM_PL_FLAG_CACHED;
|
||||
|
||||
static uint32_t vram_ne_placement_flags = TTM_PL_FLAG_VRAM |
|
||||
TTM_PL_FLAG_CACHED |
|
||||
TTM_PL_FLAG_NO_EVICT;
|
||||
|
||||
static uint32_t sys_placement_flags = TTM_PL_FLAG_SYSTEM |
|
||||
TTM_PL_FLAG_CACHED;
|
||||
|
||||
struct ttm_placement vmw_vram_placement = {
|
||||
.fpfn = 0,
|
||||
.lpfn = 0,
|
||||
.num_placement = 1,
|
||||
.placement = &vram_placement_flags,
|
||||
.num_busy_placement = 1,
|
||||
.busy_placement = &vram_placement_flags
|
||||
};
|
||||
|
||||
struct ttm_placement vmw_vram_ne_placement = {
|
||||
.fpfn = 0,
|
||||
.lpfn = 0,
|
||||
.num_placement = 1,
|
||||
.placement = &vram_ne_placement_flags,
|
||||
.num_busy_placement = 1,
|
||||
.busy_placement = &vram_ne_placement_flags
|
||||
};
|
||||
|
||||
struct ttm_placement vmw_sys_placement = {
|
||||
.fpfn = 0,
|
||||
.lpfn = 0,
|
||||
.num_placement = 1,
|
||||
.placement = &sys_placement_flags,
|
||||
.num_busy_placement = 1,
|
||||
.busy_placement = &sys_placement_flags
|
||||
};
|
||||
|
||||
struct vmw_ttm_backend {
|
||||
struct ttm_backend backend;
|
||||
};
|
||||
|
||||
static int vmw_ttm_populate(struct ttm_backend *backend,
|
||||
unsigned long num_pages, struct page **pages,
|
||||
struct page *dummy_read_page)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vmw_ttm_bind(struct ttm_backend *backend, struct ttm_mem_reg *bo_mem)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vmw_ttm_unbind(struct ttm_backend *backend)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void vmw_ttm_clear(struct ttm_backend *backend)
|
||||
{
|
||||
}
|
||||
|
||||
static void vmw_ttm_destroy(struct ttm_backend *backend)
|
||||
{
|
||||
struct vmw_ttm_backend *vmw_be =
|
||||
container_of(backend, struct vmw_ttm_backend, backend);
|
||||
|
||||
kfree(vmw_be);
|
||||
}
|
||||
|
||||
static struct ttm_backend_func vmw_ttm_func = {
|
||||
.populate = vmw_ttm_populate,
|
||||
.clear = vmw_ttm_clear,
|
||||
.bind = vmw_ttm_bind,
|
||||
.unbind = vmw_ttm_unbind,
|
||||
.destroy = vmw_ttm_destroy,
|
||||
};
|
||||
|
||||
struct ttm_backend *vmw_ttm_backend_init(struct ttm_bo_device *bdev)
|
||||
{
|
||||
struct vmw_ttm_backend *vmw_be;
|
||||
|
||||
vmw_be = kmalloc(sizeof(*vmw_be), GFP_KERNEL);
|
||||
if (!vmw_be)
|
||||
return NULL;
|
||||
|
||||
vmw_be->backend.func = &vmw_ttm_func;
|
||||
|
||||
return &vmw_be->backend;
|
||||
}
|
||||
|
||||
int vmw_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
|
||||
struct ttm_mem_type_manager *man)
|
||||
{
|
||||
struct vmw_private *dev_priv =
|
||||
container_of(bdev, struct vmw_private, bdev);
|
||||
|
||||
switch (type) {
|
||||
case TTM_PL_SYSTEM:
|
||||
/* System memory */
|
||||
|
||||
man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
|
||||
man->available_caching = TTM_PL_MASK_CACHING;
|
||||
man->default_caching = TTM_PL_FLAG_CACHED;
|
||||
break;
|
||||
case TTM_PL_VRAM:
|
||||
/* "On-card" video ram */
|
||||
man->gpu_offset = 0;
|
||||
man->io_offset = dev_priv->vram_start;
|
||||
man->io_size = dev_priv->vram_size;
|
||||
man->flags = TTM_MEMTYPE_FLAG_FIXED |
|
||||
TTM_MEMTYPE_FLAG_NEEDS_IOREMAP | TTM_MEMTYPE_FLAG_MAPPABLE;
|
||||
man->io_addr = NULL;
|
||||
man->available_caching = TTM_PL_MASK_CACHING;
|
||||
man->default_caching = TTM_PL_FLAG_WC;
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void vmw_evict_flags(struct ttm_buffer_object *bo,
|
||||
struct ttm_placement *placement)
|
||||
{
|
||||
*placement = vmw_sys_placement;
|
||||
}
|
||||
|
||||
/**
|
||||
* FIXME: Proper access checks on buffers.
|
||||
*/
|
||||
|
||||
static int vmw_verify_access(struct ttm_buffer_object *bo, struct file *filp)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* FIXME: We're using the old vmware polling method to sync.
|
||||
* Do this with fences instead.
|
||||
*/
|
||||
|
||||
static void *vmw_sync_obj_ref(void *sync_obj)
|
||||
{
|
||||
return sync_obj;
|
||||
}
|
||||
|
||||
static void vmw_sync_obj_unref(void **sync_obj)
|
||||
{
|
||||
*sync_obj = NULL;
|
||||
}
|
||||
|
||||
static int vmw_sync_obj_flush(void *sync_obj, void *sync_arg)
|
||||
{
|
||||
struct vmw_private *dev_priv = (struct vmw_private *)sync_arg;
|
||||
|
||||
mutex_lock(&dev_priv->hw_mutex);
|
||||
vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
|
||||
mutex_unlock(&dev_priv->hw_mutex);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool vmw_sync_obj_signaled(void *sync_obj, void *sync_arg)
|
||||
{
|
||||
struct vmw_private *dev_priv = (struct vmw_private *)sync_arg;
|
||||
uint32_t sequence = (unsigned long) sync_obj;
|
||||
|
||||
return vmw_fence_signaled(dev_priv, sequence);
|
||||
}
|
||||
|
||||
static int vmw_sync_obj_wait(void *sync_obj, void *sync_arg,
|
||||
bool lazy, bool interruptible)
|
||||
{
|
||||
struct vmw_private *dev_priv = (struct vmw_private *)sync_arg;
|
||||
uint32_t sequence = (unsigned long) sync_obj;
|
||||
|
||||
return vmw_wait_fence(dev_priv, false, sequence, false, 3*HZ);
|
||||
}
|
||||
|
||||
struct ttm_bo_driver vmw_bo_driver = {
|
||||
.create_ttm_backend_entry = vmw_ttm_backend_init,
|
||||
.invalidate_caches = vmw_invalidate_caches,
|
||||
.init_mem_type = vmw_init_mem_type,
|
||||
.evict_flags = vmw_evict_flags,
|
||||
.move = NULL,
|
||||
.verify_access = vmw_verify_access,
|
||||
.sync_obj_signaled = vmw_sync_obj_signaled,
|
||||
.sync_obj_wait = vmw_sync_obj_wait,
|
||||
.sync_obj_flush = vmw_sync_obj_flush,
|
||||
.sync_obj_unref = vmw_sync_obj_unref,
|
||||
.sync_obj_ref = vmw_sync_obj_ref
|
||||
};
|
|
@ -0,0 +1,735 @@
|
|||
/**************************************************************************
|
||||
*
|
||||
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
|
||||
#include "drmP.h"
|
||||
#include "vmwgfx_drv.h"
|
||||
#include "ttm/ttm_placement.h"
|
||||
#include "ttm/ttm_bo_driver.h"
|
||||
#include "ttm/ttm_object.h"
|
||||
#include "ttm/ttm_module.h"
|
||||
|
||||
#define VMWGFX_DRIVER_NAME "vmwgfx"
|
||||
#define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
|
||||
#define VMWGFX_CHIP_SVGAII 0
|
||||
#define VMW_FB_RESERVATION 0
|
||||
|
||||
/**
|
||||
* Fully encoded drm commands. Might move to vmw_drm.h
|
||||
*/
|
||||
|
||||
#define DRM_IOCTL_VMW_GET_PARAM \
|
||||
DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GET_PARAM, \
|
||||
struct drm_vmw_getparam_arg)
|
||||
#define DRM_IOCTL_VMW_ALLOC_DMABUF \
|
||||
DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_ALLOC_DMABUF, \
|
||||
union drm_vmw_alloc_dmabuf_arg)
|
||||
#define DRM_IOCTL_VMW_UNREF_DMABUF \
|
||||
DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_DMABUF, \
|
||||
struct drm_vmw_unref_dmabuf_arg)
|
||||
#define DRM_IOCTL_VMW_CURSOR_BYPASS \
|
||||
DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CURSOR_BYPASS, \
|
||||
struct drm_vmw_cursor_bypass_arg)
|
||||
|
||||
#define DRM_IOCTL_VMW_CONTROL_STREAM \
|
||||
DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CONTROL_STREAM, \
|
||||
struct drm_vmw_control_stream_arg)
|
||||
#define DRM_IOCTL_VMW_CLAIM_STREAM \
|
||||
DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CLAIM_STREAM, \
|
||||
struct drm_vmw_stream_arg)
|
||||
#define DRM_IOCTL_VMW_UNREF_STREAM \
|
||||
DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_STREAM, \
|
||||
struct drm_vmw_stream_arg)
|
||||
|
||||
#define DRM_IOCTL_VMW_CREATE_CONTEXT \
|
||||
DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CREATE_CONTEXT, \
|
||||
struct drm_vmw_context_arg)
|
||||
#define DRM_IOCTL_VMW_UNREF_CONTEXT \
|
||||
DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_CONTEXT, \
|
||||
struct drm_vmw_context_arg)
|
||||
#define DRM_IOCTL_VMW_CREATE_SURFACE \
|
||||
DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SURFACE, \
|
||||
union drm_vmw_surface_create_arg)
|
||||
#define DRM_IOCTL_VMW_UNREF_SURFACE \
|
||||
DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SURFACE, \
|
||||
struct drm_vmw_surface_arg)
|
||||
#define DRM_IOCTL_VMW_REF_SURFACE \
|
||||
DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_REF_SURFACE, \
|
||||
union drm_vmw_surface_reference_arg)
|
||||
#define DRM_IOCTL_VMW_EXECBUF \
|
||||
DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_EXECBUF, \
|
||||
struct drm_vmw_execbuf_arg)
|
||||
#define DRM_IOCTL_VMW_FIFO_DEBUG \
|
||||
DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FIFO_DEBUG, \
|
||||
struct drm_vmw_fifo_debug_arg)
|
||||
#define DRM_IOCTL_VMW_FENCE_WAIT \
|
||||
DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT, \
|
||||
struct drm_vmw_fence_wait_arg)
|
||||
|
||||
|
||||
/**
|
||||
* The core DRM version of this macro doesn't account for
|
||||
* DRM_COMMAND_BASE.
|
||||
*/
|
||||
|
||||
#define VMW_IOCTL_DEF(ioctl, func, flags) \
|
||||
[DRM_IOCTL_NR(ioctl) - DRM_COMMAND_BASE] = {ioctl, flags, func}
|
||||
|
||||
/**
|
||||
* Ioctl definitions.
|
||||
*/
|
||||
|
||||
static struct drm_ioctl_desc vmw_ioctls[] = {
|
||||
VMW_IOCTL_DEF(DRM_IOCTL_VMW_GET_PARAM, vmw_getparam_ioctl, 0),
|
||||
VMW_IOCTL_DEF(DRM_IOCTL_VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl,
|
||||
0),
|
||||
VMW_IOCTL_DEF(DRM_IOCTL_VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl,
|
||||
0),
|
||||
VMW_IOCTL_DEF(DRM_IOCTL_VMW_CURSOR_BYPASS,
|
||||
vmw_kms_cursor_bypass_ioctl, 0),
|
||||
|
||||
VMW_IOCTL_DEF(DRM_IOCTL_VMW_CONTROL_STREAM, vmw_overlay_ioctl,
|
||||
0),
|
||||
VMW_IOCTL_DEF(DRM_IOCTL_VMW_CLAIM_STREAM, vmw_stream_claim_ioctl,
|
||||
0),
|
||||
VMW_IOCTL_DEF(DRM_IOCTL_VMW_UNREF_STREAM, vmw_stream_unref_ioctl,
|
||||
0),
|
||||
|
||||
VMW_IOCTL_DEF(DRM_IOCTL_VMW_CREATE_CONTEXT, vmw_context_define_ioctl,
|
||||
0),
|
||||
VMW_IOCTL_DEF(DRM_IOCTL_VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl,
|
||||
0),
|
||||
VMW_IOCTL_DEF(DRM_IOCTL_VMW_CREATE_SURFACE, vmw_surface_define_ioctl,
|
||||
0),
|
||||
VMW_IOCTL_DEF(DRM_IOCTL_VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl,
|
||||
0),
|
||||
VMW_IOCTL_DEF(DRM_IOCTL_VMW_REF_SURFACE, vmw_surface_reference_ioctl,
|
||||
0),
|
||||
VMW_IOCTL_DEF(DRM_IOCTL_VMW_EXECBUF, vmw_execbuf_ioctl,
|
||||
0),
|
||||
VMW_IOCTL_DEF(DRM_IOCTL_VMW_FIFO_DEBUG, vmw_fifo_debug_ioctl,
|
||||
0),
|
||||
VMW_IOCTL_DEF(DRM_IOCTL_VMW_FENCE_WAIT, vmw_fence_wait_ioctl,
|
||||
0)
|
||||
};
|
||||
|
||||
static struct pci_device_id vmw_pci_id_list[] = {
|
||||
{0x15ad, 0x0405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VMWGFX_CHIP_SVGAII},
|
||||
{0, 0, 0}
|
||||
};
|
||||
|
||||
static char *vmw_devname = "vmwgfx";
|
||||
|
||||
static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
|
||||
static void vmw_master_init(struct vmw_master *);
|
||||
|
||||
static void vmw_print_capabilities(uint32_t capabilities)
|
||||
{
|
||||
DRM_INFO("Capabilities:\n");
|
||||
if (capabilities & SVGA_CAP_RECT_COPY)
|
||||
DRM_INFO(" Rect copy.\n");
|
||||
if (capabilities & SVGA_CAP_CURSOR)
|
||||
DRM_INFO(" Cursor.\n");
|
||||
if (capabilities & SVGA_CAP_CURSOR_BYPASS)
|
||||
DRM_INFO(" Cursor bypass.\n");
|
||||
if (capabilities & SVGA_CAP_CURSOR_BYPASS_2)
|
||||
DRM_INFO(" Cursor bypass 2.\n");
|
||||
if (capabilities & SVGA_CAP_8BIT_EMULATION)
|
||||
DRM_INFO(" 8bit emulation.\n");
|
||||
if (capabilities & SVGA_CAP_ALPHA_CURSOR)
|
||||
DRM_INFO(" Alpha cursor.\n");
|
||||
if (capabilities & SVGA_CAP_3D)
|
||||
DRM_INFO(" 3D.\n");
|
||||
if (capabilities & SVGA_CAP_EXTENDED_FIFO)
|
||||
DRM_INFO(" Extended Fifo.\n");
|
||||
if (capabilities & SVGA_CAP_MULTIMON)
|
||||
DRM_INFO(" Multimon.\n");
|
||||
if (capabilities & SVGA_CAP_PITCHLOCK)
|
||||
DRM_INFO(" Pitchlock.\n");
|
||||
if (capabilities & SVGA_CAP_IRQMASK)
|
||||
DRM_INFO(" Irq mask.\n");
|
||||
if (capabilities & SVGA_CAP_DISPLAY_TOPOLOGY)
|
||||
DRM_INFO(" Display Topology.\n");
|
||||
if (capabilities & SVGA_CAP_GMR)
|
||||
DRM_INFO(" GMR.\n");
|
||||
if (capabilities & SVGA_CAP_TRACES)
|
||||
DRM_INFO(" Traces.\n");
|
||||
}
|
||||
|
||||
static int vmw_request_device(struct vmw_private *dev_priv)
|
||||
{
|
||||
int ret;
|
||||
|
||||
vmw_kms_save_vga(dev_priv);
|
||||
|
||||
ret = vmw_fifo_init(dev_priv, &dev_priv->fifo);
|
||||
if (unlikely(ret != 0)) {
|
||||
DRM_ERROR("Unable to initialize FIFO.\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void vmw_release_device(struct vmw_private *dev_priv)
|
||||
{
|
||||
vmw_fifo_release(dev_priv, &dev_priv->fifo);
|
||||
vmw_kms_restore_vga(dev_priv);
|
||||
}
|
||||
|
||||
|
||||
static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
|
||||
{
|
||||
struct vmw_private *dev_priv;
|
||||
int ret;
|
||||
|
||||
dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
|
||||
if (unlikely(dev_priv == NULL)) {
|
||||
DRM_ERROR("Failed allocating a device private struct.\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
memset(dev_priv, 0, sizeof(*dev_priv));
|
||||
|
||||
dev_priv->dev = dev;
|
||||
dev_priv->vmw_chipset = chipset;
|
||||
mutex_init(&dev_priv->hw_mutex);
|
||||
mutex_init(&dev_priv->cmdbuf_mutex);
|
||||
rwlock_init(&dev_priv->resource_lock);
|
||||
idr_init(&dev_priv->context_idr);
|
||||
idr_init(&dev_priv->surface_idr);
|
||||
idr_init(&dev_priv->stream_idr);
|
||||
ida_init(&dev_priv->gmr_ida);
|
||||
mutex_init(&dev_priv->init_mutex);
|
||||
init_waitqueue_head(&dev_priv->fence_queue);
|
||||
init_waitqueue_head(&dev_priv->fifo_queue);
|
||||
atomic_set(&dev_priv->fence_queue_waiters, 0);
|
||||
atomic_set(&dev_priv->fifo_queue_waiters, 0);
|
||||
INIT_LIST_HEAD(&dev_priv->gmr_lru);
|
||||
|
||||
dev_priv->io_start = pci_resource_start(dev->pdev, 0);
|
||||
dev_priv->vram_start = pci_resource_start(dev->pdev, 1);
|
||||
dev_priv->mmio_start = pci_resource_start(dev->pdev, 2);
|
||||
|
||||
mutex_lock(&dev_priv->hw_mutex);
|
||||
dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES);
|
||||
|
||||
if (dev_priv->capabilities & SVGA_CAP_GMR) {
|
||||
dev_priv->max_gmr_descriptors =
|
||||
vmw_read(dev_priv,
|
||||
SVGA_REG_GMR_MAX_DESCRIPTOR_LENGTH);
|
||||
dev_priv->max_gmr_ids =
|
||||
vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS);
|
||||
}
|
||||
|
||||
dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE);
|
||||
dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
|
||||
dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH);
|
||||
dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT);
|
||||
|
||||
mutex_unlock(&dev_priv->hw_mutex);
|
||||
|
||||
vmw_print_capabilities(dev_priv->capabilities);
|
||||
|
||||
if (dev_priv->capabilities & SVGA_CAP_GMR) {
|
||||
DRM_INFO("Max GMR ids is %u\n",
|
||||
(unsigned)dev_priv->max_gmr_ids);
|
||||
DRM_INFO("Max GMR descriptors is %u\n",
|
||||
(unsigned)dev_priv->max_gmr_descriptors);
|
||||
}
|
||||
DRM_INFO("VRAM at 0x%08x size is %u kiB\n",
|
||||
dev_priv->vram_start, dev_priv->vram_size / 1024);
|
||||
DRM_INFO("MMIO at 0x%08x size is %u kiB\n",
|
||||
dev_priv->mmio_start, dev_priv->mmio_size / 1024);
|
||||
|
||||
ret = vmw_ttm_global_init(dev_priv);
|
||||
if (unlikely(ret != 0))
|
||||
goto out_err0;
|
||||
|
||||
|
||||
vmw_master_init(&dev_priv->fbdev_master);
|
||||
ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
|
||||
dev_priv->active_master = &dev_priv->fbdev_master;
|
||||
|
||||
|
||||
ret = ttm_bo_device_init(&dev_priv->bdev,
|
||||
dev_priv->bo_global_ref.ref.object,
|
||||
&vmw_bo_driver, VMWGFX_FILE_PAGE_OFFSET,
|
||||
false);
|
||||
if (unlikely(ret != 0)) {
|
||||
DRM_ERROR("Failed initializing TTM buffer object driver.\n");
|
||||
goto out_err1;
|
||||
}
|
||||
|
||||
ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM,
|
||||
(dev_priv->vram_size >> PAGE_SHIFT));
|
||||
if (unlikely(ret != 0)) {
|
||||
DRM_ERROR("Failed initializing memory manager for VRAM.\n");
|
||||
goto out_err2;
|
||||
}
|
||||
|
||||
dev_priv->mmio_mtrr = drm_mtrr_add(dev_priv->mmio_start,
|
||||
dev_priv->mmio_size, DRM_MTRR_WC);
|
||||
|
||||
dev_priv->mmio_virt = ioremap_wc(dev_priv->mmio_start,
|
||||
dev_priv->mmio_size);
|
||||
|
||||
if (unlikely(dev_priv->mmio_virt == NULL)) {
|
||||
ret = -ENOMEM;
|
||||
DRM_ERROR("Failed mapping MMIO.\n");
|
||||
goto out_err3;
|
||||
}
|
||||
|
||||
dev_priv->tdev = ttm_object_device_init
|
||||
(dev_priv->mem_global_ref.object, 12);
|
||||
|
||||
if (unlikely(dev_priv->tdev == NULL)) {
|
||||
DRM_ERROR("Unable to initialize TTM object management.\n");
|
||||
ret = -ENOMEM;
|
||||
goto out_err4;
|
||||
}
|
||||
|
||||
dev->dev_private = dev_priv;
|
||||
|
||||
if (!dev->devname)
|
||||
dev->devname = vmw_devname;
|
||||
|
||||
if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
|
||||
ret = drm_irq_install(dev);
|
||||
if (unlikely(ret != 0)) {
|
||||
DRM_ERROR("Failed installing irq: %d\n", ret);
|
||||
goto out_no_irq;
|
||||
}
|
||||
}
|
||||
|
||||
ret = pci_request_regions(dev->pdev, "vmwgfx probe");
|
||||
dev_priv->stealth = (ret != 0);
|
||||
if (dev_priv->stealth) {
|
||||
/**
|
||||
* Request at least the mmio PCI resource.
|
||||
*/
|
||||
|
||||
DRM_INFO("It appears like vesafb is loaded. "
|
||||
"Ignore above error if any. Entering stealth mode.\n");
|
||||
ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe");
|
||||
if (unlikely(ret != 0)) {
|
||||
DRM_ERROR("Failed reserving the SVGA MMIO resource.\n");
|
||||
goto out_no_device;
|
||||
}
|
||||
vmw_kms_init(dev_priv);
|
||||
vmw_overlay_init(dev_priv);
|
||||
} else {
|
||||
ret = vmw_request_device(dev_priv);
|
||||
if (unlikely(ret != 0))
|
||||
goto out_no_device;
|
||||
vmw_kms_init(dev_priv);
|
||||
vmw_overlay_init(dev_priv);
|
||||
vmw_fb_init(dev_priv);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
out_no_device:
|
||||
if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
|
||||
drm_irq_uninstall(dev_priv->dev);
|
||||
if (dev->devname == vmw_devname)
|
||||
dev->devname = NULL;
|
||||
out_no_irq:
|
||||
ttm_object_device_release(&dev_priv->tdev);
|
||||
out_err4:
|
||||
iounmap(dev_priv->mmio_virt);
|
||||
out_err3:
|
||||
drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start,
|
||||
dev_priv->mmio_size, DRM_MTRR_WC);
|
||||
(void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
|
||||
out_err2:
|
||||
(void)ttm_bo_device_release(&dev_priv->bdev);
|
||||
out_err1:
|
||||
vmw_ttm_global_release(dev_priv);
|
||||
out_err0:
|
||||
ida_destroy(&dev_priv->gmr_ida);
|
||||
idr_destroy(&dev_priv->surface_idr);
|
||||
idr_destroy(&dev_priv->context_idr);
|
||||
idr_destroy(&dev_priv->stream_idr);
|
||||
kfree(dev_priv);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int vmw_driver_unload(struct drm_device *dev)
|
||||
{
|
||||
struct vmw_private *dev_priv = vmw_priv(dev);
|
||||
|
||||
DRM_INFO(VMWGFX_DRIVER_NAME " unload.\n");
|
||||
|
||||
if (!dev_priv->stealth) {
|
||||
vmw_fb_close(dev_priv);
|
||||
vmw_kms_close(dev_priv);
|
||||
vmw_overlay_close(dev_priv);
|
||||
vmw_release_device(dev_priv);
|
||||
pci_release_regions(dev->pdev);
|
||||
} else {
|
||||
vmw_kms_close(dev_priv);
|
||||
vmw_overlay_close(dev_priv);
|
||||
pci_release_region(dev->pdev, 2);
|
||||
}
|
||||
if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
|
||||
drm_irq_uninstall(dev_priv->dev);
|
||||
if (dev->devname == vmw_devname)
|
||||
dev->devname = NULL;
|
||||
ttm_object_device_release(&dev_priv->tdev);
|
||||
iounmap(dev_priv->mmio_virt);
|
||||
drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start,
|
||||
dev_priv->mmio_size, DRM_MTRR_WC);
|
||||
(void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
|
||||
(void)ttm_bo_device_release(&dev_priv->bdev);
|
||||
vmw_ttm_global_release(dev_priv);
|
||||
ida_destroy(&dev_priv->gmr_ida);
|
||||
idr_destroy(&dev_priv->surface_idr);
|
||||
idr_destroy(&dev_priv->context_idr);
|
||||
idr_destroy(&dev_priv->stream_idr);
|
||||
|
||||
kfree(dev_priv);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void vmw_postclose(struct drm_device *dev,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
struct vmw_fpriv *vmw_fp;
|
||||
|
||||
vmw_fp = vmw_fpriv(file_priv);
|
||||
ttm_object_file_release(&vmw_fp->tfile);
|
||||
if (vmw_fp->locked_master)
|
||||
drm_master_put(&vmw_fp->locked_master);
|
||||
kfree(vmw_fp);
|
||||
}
|
||||
|
||||
static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
|
||||
{
|
||||
struct vmw_private *dev_priv = vmw_priv(dev);
|
||||
struct vmw_fpriv *vmw_fp;
|
||||
int ret = -ENOMEM;
|
||||
|
||||
vmw_fp = kzalloc(sizeof(*vmw_fp), GFP_KERNEL);
|
||||
if (unlikely(vmw_fp == NULL))
|
||||
return ret;
|
||||
|
||||
vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10);
|
||||
if (unlikely(vmw_fp->tfile == NULL))
|
||||
goto out_no_tfile;
|
||||
|
||||
file_priv->driver_priv = vmw_fp;
|
||||
|
||||
if (unlikely(dev_priv->bdev.dev_mapping == NULL))
|
||||
dev_priv->bdev.dev_mapping =
|
||||
file_priv->filp->f_path.dentry->d_inode->i_mapping;
|
||||
|
||||
return 0;
|
||||
|
||||
out_no_tfile:
|
||||
kfree(vmw_fp);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
|
||||
unsigned long arg)
|
||||
{
|
||||
struct drm_file *file_priv = filp->private_data;
|
||||
struct drm_device *dev = file_priv->minor->dev;
|
||||
unsigned int nr = DRM_IOCTL_NR(cmd);
|
||||
long ret;
|
||||
|
||||
/*
|
||||
* The driver private ioctls and TTM ioctls should be
|
||||
* thread-safe.
|
||||
*/
|
||||
|
||||
if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END)
|
||||
&& (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
|
||||
struct drm_ioctl_desc *ioctl =
|
||||
&vmw_ioctls[nr - DRM_COMMAND_BASE];
|
||||
|
||||
if (unlikely(ioctl->cmd != cmd)) {
|
||||
DRM_ERROR("Invalid command format, ioctl %d\n",
|
||||
nr - DRM_COMMAND_BASE);
|
||||
return -EINVAL;
|
||||
}
|
||||
return drm_ioctl(filp->f_path.dentry->d_inode,
|
||||
filp, cmd, arg);
|
||||
}
|
||||
|
||||
/*
|
||||
* Not all old drm ioctls are thread-safe.
|
||||
*/
|
||||
|
||||
lock_kernel();
|
||||
ret = drm_ioctl(filp->f_path.dentry->d_inode, filp, cmd, arg);
|
||||
unlock_kernel();
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int vmw_firstopen(struct drm_device *dev)
|
||||
{
|
||||
struct vmw_private *dev_priv = vmw_priv(dev);
|
||||
dev_priv->is_opened = true;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void vmw_lastclose(struct drm_device *dev)
|
||||
{
|
||||
struct vmw_private *dev_priv = vmw_priv(dev);
|
||||
struct drm_crtc *crtc;
|
||||
struct drm_mode_set set;
|
||||
int ret;
|
||||
|
||||
/**
|
||||
* Do nothing on the lastclose call from drm_unload.
|
||||
*/
|
||||
|
||||
if (!dev_priv->is_opened)
|
||||
return;
|
||||
|
||||
dev_priv->is_opened = false;
|
||||
set.x = 0;
|
||||
set.y = 0;
|
||||
set.fb = NULL;
|
||||
set.mode = NULL;
|
||||
set.connectors = NULL;
|
||||
set.num_connectors = 0;
|
||||
|
||||
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
|
||||
set.crtc = crtc;
|
||||
ret = crtc->funcs->set_config(&set);
|
||||
WARN_ON(ret != 0);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
static void vmw_master_init(struct vmw_master *vmaster)
|
||||
{
|
||||
ttm_lock_init(&vmaster->lock);
|
||||
}
|
||||
|
||||
static int vmw_master_create(struct drm_device *dev,
|
||||
struct drm_master *master)
|
||||
{
|
||||
struct vmw_master *vmaster;
|
||||
|
||||
DRM_INFO("Master create.\n");
|
||||
vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL);
|
||||
if (unlikely(vmaster == NULL))
|
||||
return -ENOMEM;
|
||||
|
||||
ttm_lock_init(&vmaster->lock);
|
||||
ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
|
||||
master->driver_priv = vmaster;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void vmw_master_destroy(struct drm_device *dev,
|
||||
struct drm_master *master)
|
||||
{
|
||||
struct vmw_master *vmaster = vmw_master(master);
|
||||
|
||||
DRM_INFO("Master destroy.\n");
|
||||
master->driver_priv = NULL;
|
||||
kfree(vmaster);
|
||||
}
|
||||
|
||||
|
||||
static int vmw_master_set(struct drm_device *dev,
|
||||
struct drm_file *file_priv,
|
||||
bool from_open)
|
||||
{
|
||||
struct vmw_private *dev_priv = vmw_priv(dev);
|
||||
struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
|
||||
struct vmw_master *active = dev_priv->active_master;
|
||||
struct vmw_master *vmaster = vmw_master(file_priv->master);
|
||||
int ret = 0;
|
||||
|
||||
DRM_INFO("Master set.\n");
|
||||
if (dev_priv->stealth) {
|
||||
ret = vmw_request_device(dev_priv);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (active) {
|
||||
BUG_ON(active != &dev_priv->fbdev_master);
|
||||
ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile);
|
||||
if (unlikely(ret != 0))
|
||||
goto out_no_active_lock;
|
||||
|
||||
ttm_lock_set_kill(&active->lock, true, SIGTERM);
|
||||
ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
|
||||
if (unlikely(ret != 0)) {
|
||||
DRM_ERROR("Unable to clean VRAM on "
|
||||
"master drop.\n");
|
||||
}
|
||||
|
||||
dev_priv->active_master = NULL;
|
||||
}
|
||||
|
||||
ttm_lock_set_kill(&vmaster->lock, false, SIGTERM);
|
||||
if (!from_open) {
|
||||
ttm_vt_unlock(&vmaster->lock);
|
||||
BUG_ON(vmw_fp->locked_master != file_priv->master);
|
||||
drm_master_put(&vmw_fp->locked_master);
|
||||
}
|
||||
|
||||
dev_priv->active_master = vmaster;
|
||||
|
||||
return 0;
|
||||
|
||||
out_no_active_lock:
|
||||
vmw_release_device(dev_priv);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void vmw_master_drop(struct drm_device *dev,
|
||||
struct drm_file *file_priv,
|
||||
bool from_release)
|
||||
{
|
||||
struct vmw_private *dev_priv = vmw_priv(dev);
|
||||
struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
|
||||
struct vmw_master *vmaster = vmw_master(file_priv->master);
|
||||
int ret;
|
||||
|
||||
DRM_INFO("Master drop.\n");
|
||||
|
||||
/**
|
||||
* Make sure the master doesn't disappear while we have
|
||||
* it locked.
|
||||
*/
|
||||
|
||||
vmw_fp->locked_master = drm_master_get(file_priv->master);
|
||||
ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile);
|
||||
|
||||
if (unlikely((ret != 0))) {
|
||||
DRM_ERROR("Unable to lock TTM at VT switch.\n");
|
||||
drm_master_put(&vmw_fp->locked_master);
|
||||
}
|
||||
|
||||
ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
|
||||
|
||||
if (dev_priv->stealth) {
|
||||
ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
|
||||
if (unlikely(ret != 0))
|
||||
DRM_ERROR("Unable to clean VRAM on master drop.\n");
|
||||
vmw_release_device(dev_priv);
|
||||
}
|
||||
dev_priv->active_master = &dev_priv->fbdev_master;
|
||||
ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
|
||||
ttm_vt_unlock(&dev_priv->fbdev_master.lock);
|
||||
|
||||
if (!dev_priv->stealth)
|
||||
vmw_fb_on(dev_priv);
|
||||
}
|
||||
|
||||
|
||||
static void vmw_remove(struct pci_dev *pdev)
|
||||
{
|
||||
struct drm_device *dev = pci_get_drvdata(pdev);
|
||||
|
||||
drm_put_dev(dev);
|
||||
}
|
||||
|
||||
static struct drm_driver driver = {
|
||||
.driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
|
||||
DRIVER_MODESET,
|
||||
.load = vmw_driver_load,
|
||||
.unload = vmw_driver_unload,
|
||||
.firstopen = vmw_firstopen,
|
||||
.lastclose = vmw_lastclose,
|
||||
.irq_preinstall = vmw_irq_preinstall,
|
||||
.irq_postinstall = vmw_irq_postinstall,
|
||||
.irq_uninstall = vmw_irq_uninstall,
|
||||
.irq_handler = vmw_irq_handler,
|
||||
.reclaim_buffers_locked = NULL,
|
||||
.get_map_ofs = drm_core_get_map_ofs,
|
||||
.get_reg_ofs = drm_core_get_reg_ofs,
|
||||
.ioctls = vmw_ioctls,
|
||||
.num_ioctls = DRM_ARRAY_SIZE(vmw_ioctls),
|
||||
.dma_quiescent = NULL, /*vmw_dma_quiescent, */
|
||||
.master_create = vmw_master_create,
|
||||
.master_destroy = vmw_master_destroy,
|
||||
.master_set = vmw_master_set,
|
||||
.master_drop = vmw_master_drop,
|
||||
.open = vmw_driver_open,
|
||||
.postclose = vmw_postclose,
|
||||
.fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = drm_open,
|
||||
.release = drm_release,
|
||||
.unlocked_ioctl = vmw_unlocked_ioctl,
|
||||
.mmap = vmw_mmap,
|
||||
.poll = drm_poll,
|
||||
.fasync = drm_fasync,
|
||||
#if defined(CONFIG_COMPAT)
|
||||
.compat_ioctl = drm_compat_ioctl,
|
||||
#endif
|
||||
},
|
||||
.pci_driver = {
|
||||
.name = VMWGFX_DRIVER_NAME,
|
||||
.id_table = vmw_pci_id_list,
|
||||
.probe = vmw_probe,
|
||||
.remove = vmw_remove
|
||||
},
|
||||
.name = VMWGFX_DRIVER_NAME,
|
||||
.desc = VMWGFX_DRIVER_DESC,
|
||||
.date = VMWGFX_DRIVER_DATE,
|
||||
.major = VMWGFX_DRIVER_MAJOR,
|
||||
.minor = VMWGFX_DRIVER_MINOR,
|
||||
.patchlevel = VMWGFX_DRIVER_PATCHLEVEL
|
||||
};
|
||||
|
||||
static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
{
|
||||
return drm_get_dev(pdev, ent, &driver);
|
||||
}
|
||||
|
||||
static int __init vmwgfx_init(void)
|
||||
{
|
||||
int ret;
|
||||
ret = drm_init(&driver);
|
||||
if (ret)
|
||||
DRM_ERROR("Failed initializing DRM.\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void __exit vmwgfx_exit(void)
|
||||
{
|
||||
drm_exit(&driver);
|
||||
}
|
||||
|
||||
module_init(vmwgfx_init);
|
||||
module_exit(vmwgfx_exit);
|
||||
|
||||
MODULE_AUTHOR("VMware Inc. and others");
|
||||
MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device");
|
||||
MODULE_LICENSE("GPL and additional rights");
|
|
@ -0,0 +1,511 @@
|
|||
/**************************************************************************
|
||||
*
|
||||
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
|
||||
#ifndef _VMWGFX_DRV_H_
|
||||
#define _VMWGFX_DRV_H_
|
||||
|
||||
#include "vmwgfx_reg.h"
|
||||
#include "drmP.h"
|
||||
#include "vmwgfx_drm.h"
|
||||
#include "drm_hashtab.h"
|
||||
#include "ttm/ttm_bo_driver.h"
|
||||
#include "ttm/ttm_object.h"
|
||||
#include "ttm/ttm_lock.h"
|
||||
#include "ttm/ttm_execbuf_util.h"
|
||||
#include "ttm/ttm_module.h"
|
||||
|
||||
#define VMWGFX_DRIVER_DATE "20090724"
|
||||
#define VMWGFX_DRIVER_MAJOR 0
|
||||
#define VMWGFX_DRIVER_MINOR 1
|
||||
#define VMWGFX_DRIVER_PATCHLEVEL 2
|
||||
#define VMWGFX_FILE_PAGE_OFFSET 0x00100000
|
||||
#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
|
||||
#define VMWGFX_MAX_RELOCATIONS 2048
|
||||
#define VMWGFX_MAX_GMRS 2048
|
||||
|
||||
struct vmw_fpriv {
|
||||
struct drm_master *locked_master;
|
||||
struct ttm_object_file *tfile;
|
||||
};
|
||||
|
||||
struct vmw_dma_buffer {
|
||||
struct ttm_buffer_object base;
|
||||
struct list_head validate_list;
|
||||
struct list_head gmr_lru;
|
||||
uint32_t gmr_id;
|
||||
bool gmr_bound;
|
||||
uint32_t cur_validate_node;
|
||||
bool on_validate_list;
|
||||
};
|
||||
|
||||
struct vmw_resource {
|
||||
struct kref kref;
|
||||
struct vmw_private *dev_priv;
|
||||
struct idr *idr;
|
||||
int id;
|
||||
enum ttm_object_type res_type;
|
||||
bool avail;
|
||||
void (*hw_destroy) (struct vmw_resource *res);
|
||||
void (*res_free) (struct vmw_resource *res);
|
||||
|
||||
/* TODO is a generic snooper needed? */
|
||||
#if 0
|
||||
void (*snoop)(struct vmw_resource *res,
|
||||
struct ttm_object_file *tfile,
|
||||
SVGA3dCmdHeader *header);
|
||||
void *snoop_priv;
|
||||
#endif
|
||||
};
|
||||
|
||||
struct vmw_cursor_snooper {
|
||||
struct drm_crtc *crtc;
|
||||
size_t age;
|
||||
uint32_t *image;
|
||||
};
|
||||
|
||||
struct vmw_surface {
|
||||
struct vmw_resource res;
|
||||
uint32_t flags;
|
||||
uint32_t format;
|
||||
uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES];
|
||||
struct drm_vmw_size *sizes;
|
||||
uint32_t num_sizes;
|
||||
|
||||
/* TODO so far just a extra pointer */
|
||||
struct vmw_cursor_snooper snooper;
|
||||
};
|
||||
|
||||
struct vmw_fifo_state {
|
||||
unsigned long reserved_size;
|
||||
__le32 *dynamic_buffer;
|
||||
__le32 *static_buffer;
|
||||
__le32 *last_buffer;
|
||||
uint32_t last_data_size;
|
||||
uint32_t last_buffer_size;
|
||||
bool last_buffer_add;
|
||||
unsigned long static_buffer_size;
|
||||
bool using_bounce_buffer;
|
||||
uint32_t capabilities;
|
||||
struct rw_semaphore rwsem;
|
||||
};
|
||||
|
||||
struct vmw_relocation {
|
||||
SVGAGuestPtr *location;
|
||||
uint32_t index;
|
||||
};
|
||||
|
||||
struct vmw_sw_context{
|
||||
struct ida bo_list;
|
||||
uint32_t last_cid;
|
||||
bool cid_valid;
|
||||
uint32_t last_sid;
|
||||
bool sid_valid;
|
||||
struct ttm_object_file *tfile;
|
||||
struct list_head validate_nodes;
|
||||
struct vmw_relocation relocs[VMWGFX_MAX_RELOCATIONS];
|
||||
uint32_t cur_reloc;
|
||||
struct ttm_validate_buffer val_bufs[VMWGFX_MAX_GMRS];
|
||||
uint32_t cur_val_buf;
|
||||
};
|
||||
|
||||
struct vmw_legacy_display;
|
||||
struct vmw_overlay;
|
||||
|
||||
struct vmw_master {
|
||||
struct ttm_lock lock;
|
||||
};
|
||||
|
||||
struct vmw_private {
|
||||
struct ttm_bo_device bdev;
|
||||
struct ttm_bo_global_ref bo_global_ref;
|
||||
struct ttm_global_reference mem_global_ref;
|
||||
|
||||
struct vmw_fifo_state fifo;
|
||||
|
||||
struct drm_device *dev;
|
||||
unsigned long vmw_chipset;
|
||||
unsigned int io_start;
|
||||
uint32_t vram_start;
|
||||
uint32_t vram_size;
|
||||
uint32_t mmio_start;
|
||||
uint32_t mmio_size;
|
||||
uint32_t fb_max_width;
|
||||
uint32_t fb_max_height;
|
||||
__le32 __iomem *mmio_virt;
|
||||
int mmio_mtrr;
|
||||
uint32_t capabilities;
|
||||
uint32_t max_gmr_descriptors;
|
||||
uint32_t max_gmr_ids;
|
||||
struct mutex hw_mutex;
|
||||
|
||||
/*
|
||||
* VGA registers.
|
||||
*/
|
||||
|
||||
uint32_t vga_width;
|
||||
uint32_t vga_height;
|
||||
uint32_t vga_depth;
|
||||
uint32_t vga_bpp;
|
||||
uint32_t vga_pseudo;
|
||||
uint32_t vga_red_mask;
|
||||
uint32_t vga_blue_mask;
|
||||
uint32_t vga_green_mask;
|
||||
|
||||
/*
|
||||
* Framebuffer info.
|
||||
*/
|
||||
|
||||
void *fb_info;
|
||||
struct vmw_legacy_display *ldu_priv;
|
||||
struct vmw_overlay *overlay_priv;
|
||||
|
||||
/*
|
||||
* Context and surface management.
|
||||
*/
|
||||
|
||||
rwlock_t resource_lock;
|
||||
struct idr context_idr;
|
||||
struct idr surface_idr;
|
||||
struct idr stream_idr;
|
||||
|
||||
/*
|
||||
* Block lastclose from racing with firstopen.
|
||||
*/
|
||||
|
||||
struct mutex init_mutex;
|
||||
|
||||
/*
|
||||
* A resource manager for kernel-only surfaces and
|
||||
* contexts.
|
||||
*/
|
||||
|
||||
struct ttm_object_device *tdev;
|
||||
|
||||
/*
|
||||
* Fencing and IRQs.
|
||||
*/
|
||||
|
||||
uint32_t fence_seq;
|
||||
wait_queue_head_t fence_queue;
|
||||
wait_queue_head_t fifo_queue;
|
||||
atomic_t fence_queue_waiters;
|
||||
atomic_t fifo_queue_waiters;
|
||||
uint32_t last_read_sequence;
|
||||
spinlock_t irq_lock;
|
||||
|
||||
/*
|
||||
* Device state
|
||||
*/
|
||||
|
||||
uint32_t traces_state;
|
||||
uint32_t enable_state;
|
||||
uint32_t config_done_state;
|
||||
|
||||
/**
|
||||
* Execbuf
|
||||
*/
|
||||
/**
|
||||
* Protected by the cmdbuf mutex.
|
||||
*/
|
||||
|
||||
struct vmw_sw_context ctx;
|
||||
uint32_t val_seq;
|
||||
struct mutex cmdbuf_mutex;
|
||||
|
||||
/**
|
||||
* GMR management. Protected by the lru spinlock.
|
||||
*/
|
||||
|
||||
struct ida gmr_ida;
|
||||
struct list_head gmr_lru;
|
||||
|
||||
|
||||
/**
|
||||
* Operating mode.
|
||||
*/
|
||||
|
||||
bool stealth;
|
||||
bool is_opened;
|
||||
|
||||
/**
|
||||
* Master management.
|
||||
*/
|
||||
|
||||
struct vmw_master *active_master;
|
||||
struct vmw_master fbdev_master;
|
||||
};
|
||||
|
||||
static inline struct vmw_private *vmw_priv(struct drm_device *dev)
|
||||
{
|
||||
return (struct vmw_private *)dev->dev_private;
|
||||
}
|
||||
|
||||
static inline struct vmw_fpriv *vmw_fpriv(struct drm_file *file_priv)
|
||||
{
|
||||
return (struct vmw_fpriv *)file_priv->driver_priv;
|
||||
}
|
||||
|
||||
static inline struct vmw_master *vmw_master(struct drm_master *master)
|
||||
{
|
||||
return (struct vmw_master *) master->driver_priv;
|
||||
}
|
||||
|
||||
static inline void vmw_write(struct vmw_private *dev_priv,
|
||||
unsigned int offset, uint32_t value)
|
||||
{
|
||||
outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
|
||||
outl(value, dev_priv->io_start + VMWGFX_VALUE_PORT);
|
||||
}
|
||||
|
||||
static inline uint32_t vmw_read(struct vmw_private *dev_priv,
|
||||
unsigned int offset)
|
||||
{
|
||||
uint32_t val;
|
||||
|
||||
outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
|
||||
val = inl(dev_priv->io_start + VMWGFX_VALUE_PORT);
|
||||
return val;
|
||||
}
|
||||
|
||||
/**
|
||||
* GMR utilities - vmwgfx_gmr.c
|
||||
*/
|
||||
|
||||
extern int vmw_gmr_bind(struct vmw_private *dev_priv,
|
||||
struct ttm_buffer_object *bo);
|
||||
extern void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id);
|
||||
|
||||
/**
|
||||
* Resource utilities - vmwgfx_resource.c
|
||||
*/
|
||||
|
||||
extern struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv);
|
||||
extern void vmw_resource_unreference(struct vmw_resource **p_res);
|
||||
extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res);
|
||||
extern int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
extern int vmw_context_define_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
extern int vmw_context_check(struct vmw_private *dev_priv,
|
||||
struct ttm_object_file *tfile,
|
||||
int id);
|
||||
extern void vmw_surface_res_free(struct vmw_resource *res);
|
||||
extern int vmw_surface_init(struct vmw_private *dev_priv,
|
||||
struct vmw_surface *srf,
|
||||
void (*res_free) (struct vmw_resource *res));
|
||||
extern int vmw_user_surface_lookup(struct vmw_private *dev_priv,
|
||||
struct ttm_object_file *tfile,
|
||||
int sid, struct vmw_surface **out);
|
||||
extern int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
extern int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
extern int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
extern int vmw_surface_check(struct vmw_private *dev_priv,
|
||||
struct ttm_object_file *tfile,
|
||||
int id);
|
||||
extern void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo);
|
||||
extern int vmw_dmabuf_init(struct vmw_private *dev_priv,
|
||||
struct vmw_dma_buffer *vmw_bo,
|
||||
size_t size, struct ttm_placement *placement,
|
||||
bool interuptable,
|
||||
void (*bo_free) (struct ttm_buffer_object *bo));
|
||||
extern int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
extern int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
extern uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
|
||||
uint32_t cur_validate_node);
|
||||
extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo);
|
||||
extern int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
|
||||
uint32_t id, struct vmw_dma_buffer **out);
|
||||
extern uint32_t vmw_dmabuf_gmr(struct ttm_buffer_object *bo);
|
||||
extern void vmw_dmabuf_set_gmr(struct ttm_buffer_object *bo, uint32_t id);
|
||||
extern int vmw_gmr_id_alloc(struct vmw_private *dev_priv, uint32_t *p_id);
|
||||
extern int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
|
||||
struct vmw_dma_buffer *bo);
|
||||
extern int vmw_dmabuf_from_vram(struct vmw_private *vmw_priv,
|
||||
struct vmw_dma_buffer *bo);
|
||||
extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
extern int vmw_user_stream_lookup(struct vmw_private *dev_priv,
|
||||
struct ttm_object_file *tfile,
|
||||
uint32_t *inout_id,
|
||||
struct vmw_resource **out);
|
||||
|
||||
|
||||
/**
|
||||
* Misc Ioctl functionality - vmwgfx_ioctl.c
|
||||
*/
|
||||
|
||||
extern int vmw_getparam_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
extern int vmw_fifo_debug_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
|
||||
/**
|
||||
* Fifo utilities - vmwgfx_fifo.c
|
||||
*/
|
||||
|
||||
extern int vmw_fifo_init(struct vmw_private *dev_priv,
|
||||
struct vmw_fifo_state *fifo);
|
||||
extern void vmw_fifo_release(struct vmw_private *dev_priv,
|
||||
struct vmw_fifo_state *fifo);
|
||||
extern void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes);
|
||||
extern void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes);
|
||||
extern int vmw_fifo_send_fence(struct vmw_private *dev_priv,
|
||||
uint32_t *sequence);
|
||||
extern void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason);
|
||||
extern int vmw_fifo_mmap(struct file *filp, struct vm_area_struct *vma);
|
||||
|
||||
/**
|
||||
* TTM glue - vmwgfx_ttm_glue.c
|
||||
*/
|
||||
|
||||
extern int vmw_ttm_global_init(struct vmw_private *dev_priv);
|
||||
extern void vmw_ttm_global_release(struct vmw_private *dev_priv);
|
||||
extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma);
|
||||
|
||||
/**
|
||||
* TTM buffer object driver - vmwgfx_buffer.c
|
||||
*/
|
||||
|
||||
extern struct ttm_placement vmw_vram_placement;
|
||||
extern struct ttm_placement vmw_vram_ne_placement;
|
||||
extern struct ttm_placement vmw_sys_placement;
|
||||
extern struct ttm_bo_driver vmw_bo_driver;
|
||||
extern int vmw_dma_quiescent(struct drm_device *dev);
|
||||
|
||||
/**
|
||||
* Command submission - vmwgfx_execbuf.c
|
||||
*/
|
||||
|
||||
extern int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
|
||||
/**
|
||||
* IRQs and wating - vmwgfx_irq.c
|
||||
*/
|
||||
|
||||
extern irqreturn_t vmw_irq_handler(DRM_IRQ_ARGS);
|
||||
extern int vmw_wait_fence(struct vmw_private *dev_priv, bool lazy,
|
||||
uint32_t sequence, bool interruptible,
|
||||
unsigned long timeout);
|
||||
extern void vmw_irq_preinstall(struct drm_device *dev);
|
||||
extern int vmw_irq_postinstall(struct drm_device *dev);
|
||||
extern void vmw_irq_uninstall(struct drm_device *dev);
|
||||
extern bool vmw_fence_signaled(struct vmw_private *dev_priv,
|
||||
uint32_t sequence);
|
||||
extern int vmw_fence_wait_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
extern int vmw_fallback_wait(struct vmw_private *dev_priv,
|
||||
bool lazy,
|
||||
bool fifo_idle,
|
||||
uint32_t sequence,
|
||||
bool interruptible,
|
||||
unsigned long timeout);
|
||||
|
||||
/**
|
||||
* Kernel framebuffer - vmwgfx_fb.c
|
||||
*/
|
||||
|
||||
int vmw_fb_init(struct vmw_private *vmw_priv);
|
||||
int vmw_fb_close(struct vmw_private *dev_priv);
|
||||
int vmw_fb_off(struct vmw_private *vmw_priv);
|
||||
int vmw_fb_on(struct vmw_private *vmw_priv);
|
||||
|
||||
/**
|
||||
* Kernel modesetting - vmwgfx_kms.c
|
||||
*/
|
||||
|
||||
int vmw_kms_init(struct vmw_private *dev_priv);
|
||||
int vmw_kms_close(struct vmw_private *dev_priv);
|
||||
int vmw_kms_save_vga(struct vmw_private *vmw_priv);
|
||||
int vmw_kms_restore_vga(struct vmw_private *vmw_priv);
|
||||
int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv);
|
||||
void vmw_kms_cursor_snoop(struct vmw_surface *srf,
|
||||
struct ttm_object_file *tfile,
|
||||
struct ttm_buffer_object *bo,
|
||||
SVGA3dCmdHeader *header);
|
||||
|
||||
/**
|
||||
* Overlay control - vmwgfx_overlay.c
|
||||
*/
|
||||
|
||||
int vmw_overlay_init(struct vmw_private *dev_priv);
|
||||
int vmw_overlay_close(struct vmw_private *dev_priv);
|
||||
int vmw_overlay_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
int vmw_overlay_stop_all(struct vmw_private *dev_priv);
|
||||
int vmw_overlay_resume_all(struct vmw_private *dev_priv);
|
||||
int vmw_overlay_pause_all(struct vmw_private *dev_priv);
|
||||
int vmw_overlay_claim(struct vmw_private *dev_priv, uint32_t *out);
|
||||
int vmw_overlay_unref(struct vmw_private *dev_priv, uint32_t stream_id);
|
||||
int vmw_overlay_num_overlays(struct vmw_private *dev_priv);
|
||||
int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv);
|
||||
|
||||
/**
|
||||
* Inline helper functions
|
||||
*/
|
||||
|
||||
static inline void vmw_surface_unreference(struct vmw_surface **srf)
|
||||
{
|
||||
struct vmw_surface *tmp_srf = *srf;
|
||||
struct vmw_resource *res = &tmp_srf->res;
|
||||
*srf = NULL;
|
||||
|
||||
vmw_resource_unreference(&res);
|
||||
}
|
||||
|
||||
static inline struct vmw_surface *vmw_surface_reference(struct vmw_surface *srf)
|
||||
{
|
||||
(void) vmw_resource_reference(&srf->res);
|
||||
return srf;
|
||||
}
|
||||
|
||||
static inline void vmw_dmabuf_unreference(struct vmw_dma_buffer **buf)
|
||||
{
|
||||
struct vmw_dma_buffer *tmp_buf = *buf;
|
||||
struct ttm_buffer_object *bo = &tmp_buf->base;
|
||||
*buf = NULL;
|
||||
|
||||
ttm_bo_unref(&bo);
|
||||
}
|
||||
|
||||
static inline struct vmw_dma_buffer *vmw_dmabuf_reference(struct vmw_dma_buffer *buf)
|
||||
{
|
||||
if (ttm_bo_reference(&buf->base))
|
||||
return buf;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#endif
|
|
@ -0,0 +1,516 @@
|
|||
/**************************************************************************
|
||||
*
|
||||
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
|
||||
#include "vmwgfx_drv.h"
|
||||
#include "vmwgfx_reg.h"
|
||||
#include "ttm/ttm_bo_api.h"
|
||||
#include "ttm/ttm_placement.h"
|
||||
|
||||
static int vmw_cmd_invalid(struct vmw_private *dev_priv,
|
||||
struct vmw_sw_context *sw_context,
|
||||
SVGA3dCmdHeader *header)
|
||||
{
|
||||
return capable(CAP_SYS_ADMIN) ? : -EINVAL;
|
||||
}
|
||||
|
||||
static int vmw_cmd_ok(struct vmw_private *dev_priv,
|
||||
struct vmw_sw_context *sw_context,
|
||||
SVGA3dCmdHeader *header)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
|
||||
struct vmw_sw_context *sw_context,
|
||||
SVGA3dCmdHeader *header)
|
||||
{
|
||||
struct vmw_cid_cmd {
|
||||
SVGA3dCmdHeader header;
|
||||
__le32 cid;
|
||||
} *cmd;
|
||||
int ret;
|
||||
|
||||
cmd = container_of(header, struct vmw_cid_cmd, header);
|
||||
if (likely(sw_context->cid_valid && cmd->cid == sw_context->last_cid))
|
||||
return 0;
|
||||
|
||||
ret = vmw_context_check(dev_priv, sw_context->tfile, cmd->cid);
|
||||
if (unlikely(ret != 0)) {
|
||||
DRM_ERROR("Could not find or use context %u\n",
|
||||
(unsigned) cmd->cid);
|
||||
return ret;
|
||||
}
|
||||
|
||||
sw_context->last_cid = cmd->cid;
|
||||
sw_context->cid_valid = true;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vmw_cmd_sid_check(struct vmw_private *dev_priv,
|
||||
struct vmw_sw_context *sw_context,
|
||||
uint32_t sid)
|
||||
{
|
||||
if (unlikely((!sw_context->sid_valid || sid != sw_context->last_sid) &&
|
||||
sid != SVGA3D_INVALID_ID)) {
|
||||
int ret = vmw_surface_check(dev_priv, sw_context->tfile, sid);
|
||||
|
||||
if (unlikely(ret != 0)) {
|
||||
DRM_ERROR("Could ot find or use surface %u\n",
|
||||
(unsigned) sid);
|
||||
return ret;
|
||||
}
|
||||
|
||||
sw_context->last_sid = sid;
|
||||
sw_context->sid_valid = true;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
|
||||
struct vmw_sw_context *sw_context,
|
||||
SVGA3dCmdHeader *header)
|
||||
{
|
||||
struct vmw_sid_cmd {
|
||||
SVGA3dCmdHeader header;
|
||||
SVGA3dCmdSetRenderTarget body;
|
||||
} *cmd;
|
||||
int ret;
|
||||
|
||||
ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
|
||||
cmd = container_of(header, struct vmw_sid_cmd, header);
|
||||
return vmw_cmd_sid_check(dev_priv, sw_context, cmd->body.target.sid);
|
||||
}
|
||||
|
||||
static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
|
||||
struct vmw_sw_context *sw_context,
|
||||
SVGA3dCmdHeader *header)
|
||||
{
|
||||
struct vmw_sid_cmd {
|
||||
SVGA3dCmdHeader header;
|
||||
SVGA3dCmdSurfaceCopy body;
|
||||
} *cmd;
|
||||
int ret;
|
||||
|
||||
cmd = container_of(header, struct vmw_sid_cmd, header);
|
||||
ret = vmw_cmd_sid_check(dev_priv, sw_context, cmd->body.src.sid);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
return vmw_cmd_sid_check(dev_priv, sw_context, cmd->body.dest.sid);
|
||||
}
|
||||
|
||||
static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
|
||||
struct vmw_sw_context *sw_context,
|
||||
SVGA3dCmdHeader *header)
|
||||
{
|
||||
struct vmw_sid_cmd {
|
||||
SVGA3dCmdHeader header;
|
||||
SVGA3dCmdSurfaceStretchBlt body;
|
||||
} *cmd;
|
||||
int ret;
|
||||
|
||||
cmd = container_of(header, struct vmw_sid_cmd, header);
|
||||
ret = vmw_cmd_sid_check(dev_priv, sw_context, cmd->body.src.sid);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
return vmw_cmd_sid_check(dev_priv, sw_context, cmd->body.dest.sid);
|
||||
}
|
||||
|
||||
static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
|
||||
struct vmw_sw_context *sw_context,
|
||||
SVGA3dCmdHeader *header)
|
||||
{
|
||||
struct vmw_sid_cmd {
|
||||
SVGA3dCmdHeader header;
|
||||
SVGA3dCmdBlitSurfaceToScreen body;
|
||||
} *cmd;
|
||||
|
||||
cmd = container_of(header, struct vmw_sid_cmd, header);
|
||||
return vmw_cmd_sid_check(dev_priv, sw_context, cmd->body.srcImage.sid);
|
||||
}
|
||||
|
||||
static int vmw_cmd_present_check(struct vmw_private *dev_priv,
|
||||
struct vmw_sw_context *sw_context,
|
||||
SVGA3dCmdHeader *header)
|
||||
{
|
||||
struct vmw_sid_cmd {
|
||||
SVGA3dCmdHeader header;
|
||||
SVGA3dCmdPresent body;
|
||||
} *cmd;
|
||||
|
||||
cmd = container_of(header, struct vmw_sid_cmd, header);
|
||||
return vmw_cmd_sid_check(dev_priv, sw_context, cmd->body.sid);
|
||||
}
|
||||
|
||||
static int vmw_cmd_dma(struct vmw_private *dev_priv,
|
||||
struct vmw_sw_context *sw_context,
|
||||
SVGA3dCmdHeader *header)
|
||||
{
|
||||
uint32_t handle;
|
||||
struct vmw_dma_buffer *vmw_bo = NULL;
|
||||
struct ttm_buffer_object *bo;
|
||||
struct vmw_surface *srf = NULL;
|
||||
struct vmw_dma_cmd {
|
||||
SVGA3dCmdHeader header;
|
||||
SVGA3dCmdSurfaceDMA dma;
|
||||
} *cmd;
|
||||
struct vmw_relocation *reloc;
|
||||
int ret;
|
||||
uint32_t cur_validate_node;
|
||||
struct ttm_validate_buffer *val_buf;
|
||||
|
||||
|
||||
cmd = container_of(header, struct vmw_dma_cmd, header);
|
||||
ret = vmw_cmd_sid_check(dev_priv, sw_context, cmd->dma.host.sid);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
|
||||
handle = cmd->dma.guest.ptr.gmrId;
|
||||
ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo);
|
||||
if (unlikely(ret != 0)) {
|
||||
DRM_ERROR("Could not find or use GMR region.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
bo = &vmw_bo->base;
|
||||
|
||||
if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
|
||||
DRM_ERROR("Max number of DMA commands per submission"
|
||||
" exceeded\n");
|
||||
ret = -EINVAL;
|
||||
goto out_no_reloc;
|
||||
}
|
||||
|
||||
reloc = &sw_context->relocs[sw_context->cur_reloc++];
|
||||
reloc->location = &cmd->dma.guest.ptr;
|
||||
|
||||
cur_validate_node = vmw_dmabuf_validate_node(bo, sw_context->cur_val_buf);
|
||||
if (unlikely(cur_validate_node >= VMWGFX_MAX_GMRS)) {
|
||||
DRM_ERROR("Max number of DMA buffers per submission"
|
||||
" exceeded.\n");
|
||||
ret = -EINVAL;
|
||||
goto out_no_reloc;
|
||||
}
|
||||
|
||||
reloc->index = cur_validate_node;
|
||||
if (unlikely(cur_validate_node == sw_context->cur_val_buf)) {
|
||||
val_buf = &sw_context->val_bufs[cur_validate_node];
|
||||
val_buf->bo = ttm_bo_reference(bo);
|
||||
val_buf->new_sync_obj_arg = (void *) dev_priv;
|
||||
list_add_tail(&val_buf->head, &sw_context->validate_nodes);
|
||||
++sw_context->cur_val_buf;
|
||||
}
|
||||
|
||||
ret = vmw_user_surface_lookup(dev_priv, sw_context->tfile,
|
||||
cmd->dma.host.sid, &srf);
|
||||
if (ret) {
|
||||
DRM_ERROR("could not find surface\n");
|
||||
goto out_no_reloc;
|
||||
}
|
||||
|
||||
vmw_kms_cursor_snoop(srf, sw_context->tfile, bo, header);
|
||||
vmw_surface_unreference(&srf);
|
||||
|
||||
out_no_reloc:
|
||||
vmw_dmabuf_unreference(&vmw_bo);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
typedef int (*vmw_cmd_func) (struct vmw_private *,
|
||||
struct vmw_sw_context *,
|
||||
SVGA3dCmdHeader *);
|
||||
|
||||
#define VMW_CMD_DEF(cmd, func) \
|
||||
[cmd - SVGA_3D_CMD_BASE] = func
|
||||
|
||||
static vmw_cmd_func vmw_cmd_funcs[SVGA_3D_CMD_MAX] = {
|
||||
VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid),
|
||||
VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid),
|
||||
VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check),
|
||||
VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check),
|
||||
VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma),
|
||||
VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid),
|
||||
VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid),
|
||||
VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check),
|
||||
VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check),
|
||||
VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check),
|
||||
VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
|
||||
&vmw_cmd_set_render_target_check),
|
||||
VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_cid_check),
|
||||
VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check),
|
||||
VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check),
|
||||
VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check),
|
||||
VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check),
|
||||
VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check),
|
||||
VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check),
|
||||
VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check),
|
||||
VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_cid_check),
|
||||
VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_cid_check),
|
||||
VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_cid_check),
|
||||
VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_cid_check),
|
||||
VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_cid_check),
|
||||
VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check),
|
||||
VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_cid_check),
|
||||
VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_cid_check),
|
||||
VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_cid_check),
|
||||
VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok),
|
||||
VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
|
||||
&vmw_cmd_blt_surf_screen_check)
|
||||
};
|
||||
|
||||
static int vmw_cmd_check(struct vmw_private *dev_priv,
|
||||
struct vmw_sw_context *sw_context,
|
||||
void *buf, uint32_t *size)
|
||||
{
|
||||
uint32_t cmd_id;
|
||||
SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
|
||||
int ret;
|
||||
|
||||
cmd_id = ((uint32_t *)buf)[0];
|
||||
if (cmd_id == SVGA_CMD_UPDATE) {
|
||||
*size = 5 << 2;
|
||||
return 0;
|
||||
}
|
||||
|
||||
cmd_id = le32_to_cpu(header->id);
|
||||
*size = le32_to_cpu(header->size) + sizeof(SVGA3dCmdHeader);
|
||||
|
||||
cmd_id -= SVGA_3D_CMD_BASE;
|
||||
if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
|
||||
goto out_err;
|
||||
|
||||
ret = vmw_cmd_funcs[cmd_id](dev_priv, sw_context, header);
|
||||
if (unlikely(ret != 0))
|
||||
goto out_err;
|
||||
|
||||
return 0;
|
||||
out_err:
|
||||
DRM_ERROR("Illegal / Invalid SVGA3D command: %d\n",
|
||||
cmd_id + SVGA_3D_CMD_BASE);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int vmw_cmd_check_all(struct vmw_private *dev_priv,
|
||||
struct vmw_sw_context *sw_context,
|
||||
void *buf, uint32_t size)
|
||||
{
|
||||
int32_t cur_size = size;
|
||||
int ret;
|
||||
|
||||
while (cur_size > 0) {
|
||||
ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
buf = (void *)((unsigned long) buf + size);
|
||||
cur_size -= size;
|
||||
}
|
||||
|
||||
if (unlikely(cur_size != 0)) {
|
||||
DRM_ERROR("Command verifier out of sync.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void vmw_free_relocations(struct vmw_sw_context *sw_context)
|
||||
{
|
||||
sw_context->cur_reloc = 0;
|
||||
}
|
||||
|
||||
static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
|
||||
{
|
||||
uint32_t i;
|
||||
struct vmw_relocation *reloc;
|
||||
struct ttm_validate_buffer *validate;
|
||||
struct ttm_buffer_object *bo;
|
||||
|
||||
for (i = 0; i < sw_context->cur_reloc; ++i) {
|
||||
reloc = &sw_context->relocs[i];
|
||||
validate = &sw_context->val_bufs[reloc->index];
|
||||
bo = validate->bo;
|
||||
reloc->location->offset += bo->offset;
|
||||
reloc->location->gmrId = vmw_dmabuf_gmr(bo);
|
||||
}
|
||||
vmw_free_relocations(sw_context);
|
||||
}
|
||||
|
||||
static void vmw_clear_validations(struct vmw_sw_context *sw_context)
|
||||
{
|
||||
struct ttm_validate_buffer *entry, *next;
|
||||
|
||||
list_for_each_entry_safe(entry, next, &sw_context->validate_nodes,
|
||||
head) {
|
||||
list_del(&entry->head);
|
||||
vmw_dmabuf_validate_clear(entry->bo);
|
||||
ttm_bo_unref(&entry->bo);
|
||||
sw_context->cur_val_buf--;
|
||||
}
|
||||
BUG_ON(sw_context->cur_val_buf != 0);
|
||||
}
|
||||
|
||||
static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
|
||||
struct ttm_buffer_object *bo)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (vmw_dmabuf_gmr(bo) != SVGA_GMR_NULL)
|
||||
return 0;
|
||||
|
||||
ret = vmw_gmr_bind(dev_priv, bo);
|
||||
if (likely(ret == 0 || ret == -ERESTART))
|
||||
return ret;
|
||||
|
||||
|
||||
ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
static int vmw_validate_buffers(struct vmw_private *dev_priv,
|
||||
struct vmw_sw_context *sw_context)
|
||||
{
|
||||
struct ttm_validate_buffer *entry;
|
||||
int ret;
|
||||
|
||||
list_for_each_entry(entry, &sw_context->validate_nodes, head) {
|
||||
ret = vmw_validate_single_buffer(dev_priv, entry->bo);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
struct vmw_private *dev_priv = vmw_priv(dev);
|
||||
struct drm_vmw_execbuf_arg *arg = (struct drm_vmw_execbuf_arg *)data;
|
||||
struct drm_vmw_fence_rep fence_rep;
|
||||
struct drm_vmw_fence_rep __user *user_fence_rep;
|
||||
int ret;
|
||||
void *user_cmd;
|
||||
void *cmd;
|
||||
uint32_t sequence;
|
||||
struct vmw_sw_context *sw_context = &dev_priv->ctx;
|
||||
struct vmw_master *vmaster = vmw_master(file_priv->master);
|
||||
|
||||
ret = ttm_read_lock(&vmaster->lock, true);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
|
||||
ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
|
||||
if (unlikely(ret != 0)) {
|
||||
ret = -ERESTART;
|
||||
goto out_no_cmd_mutex;
|
||||
}
|
||||
|
||||
cmd = vmw_fifo_reserve(dev_priv, arg->command_size);
|
||||
if (unlikely(cmd == NULL)) {
|
||||
DRM_ERROR("Failed reserving fifo space for commands.\n");
|
||||
ret = -ENOMEM;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
user_cmd = (void __user *)(unsigned long)arg->commands;
|
||||
ret = copy_from_user(cmd, user_cmd, arg->command_size);
|
||||
|
||||
if (unlikely(ret != 0)) {
|
||||
DRM_ERROR("Failed copying commands.\n");
|
||||
goto out_commit;
|
||||
}
|
||||
|
||||
sw_context->tfile = vmw_fpriv(file_priv)->tfile;
|
||||
sw_context->cid_valid = false;
|
||||
sw_context->sid_valid = false;
|
||||
sw_context->cur_reloc = 0;
|
||||
sw_context->cur_val_buf = 0;
|
||||
|
||||
INIT_LIST_HEAD(&sw_context->validate_nodes);
|
||||
|
||||
ret = vmw_cmd_check_all(dev_priv, sw_context, cmd, arg->command_size);
|
||||
if (unlikely(ret != 0))
|
||||
goto out_err;
|
||||
ret = ttm_eu_reserve_buffers(&sw_context->validate_nodes,
|
||||
dev_priv->val_seq++);
|
||||
if (unlikely(ret != 0))
|
||||
goto out_err;
|
||||
|
||||
ret = vmw_validate_buffers(dev_priv, sw_context);
|
||||
if (unlikely(ret != 0))
|
||||
goto out_err;
|
||||
|
||||
vmw_apply_relocations(sw_context);
|
||||
vmw_fifo_commit(dev_priv, arg->command_size);
|
||||
|
||||
ret = vmw_fifo_send_fence(dev_priv, &sequence);
|
||||
|
||||
ttm_eu_fence_buffer_objects(&sw_context->validate_nodes,
|
||||
(void *)(unsigned long) sequence);
|
||||
vmw_clear_validations(sw_context);
|
||||
mutex_unlock(&dev_priv->cmdbuf_mutex);
|
||||
|
||||
/*
|
||||
* This error is harmless, because if fence submission fails,
|
||||
* vmw_fifo_send_fence will sync.
|
||||
*/
|
||||
|
||||
if (ret != 0)
|
||||
DRM_ERROR("Fence submission error. Syncing.\n");
|
||||
|
||||
fence_rep.error = ret;
|
||||
fence_rep.fence_seq = (uint64_t) sequence;
|
||||
|
||||
user_fence_rep = (struct drm_vmw_fence_rep __user *)
|
||||
(unsigned long)arg->fence_rep;
|
||||
|
||||
/*
|
||||
* copy_to_user errors will be detected by user space not
|
||||
* seeing fence_rep::error filled in.
|
||||
*/
|
||||
|
||||
ret = copy_to_user(user_fence_rep, &fence_rep, sizeof(fence_rep));
|
||||
|
||||
vmw_kms_cursor_post_execbuf(dev_priv);
|
||||
ttm_read_unlock(&vmaster->lock);
|
||||
return 0;
|
||||
out_err:
|
||||
vmw_free_relocations(sw_context);
|
||||
ttm_eu_backoff_reservation(&sw_context->validate_nodes);
|
||||
vmw_clear_validations(sw_context);
|
||||
out_commit:
|
||||
vmw_fifo_commit(dev_priv, 0);
|
||||
out_unlock:
|
||||
mutex_unlock(&dev_priv->cmdbuf_mutex);
|
||||
out_no_cmd_mutex:
|
||||
ttm_read_unlock(&vmaster->lock);
|
||||
return ret;
|
||||
}
|
|
@ -0,0 +1,742 @@
|
|||
/**************************************************************************
|
||||
*
|
||||
* Copyright © 2007 David Airlie
|
||||
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
|
||||
#include "drmP.h"
|
||||
#include "vmwgfx_drv.h"
|
||||
|
||||
#include "ttm/ttm_placement.h"
|
||||
|
||||
#define VMW_DIRTY_DELAY (HZ / 30)
|
||||
|
||||
struct vmw_fb_par {
|
||||
struct vmw_private *vmw_priv;
|
||||
|
||||
void *vmalloc;
|
||||
|
||||
struct vmw_dma_buffer *vmw_bo;
|
||||
struct ttm_bo_kmap_obj map;
|
||||
|
||||
u32 pseudo_palette[17];
|
||||
|
||||
unsigned depth;
|
||||
unsigned bpp;
|
||||
|
||||
unsigned max_width;
|
||||
unsigned max_height;
|
||||
|
||||
void *bo_ptr;
|
||||
unsigned bo_size;
|
||||
bool bo_iowrite;
|
||||
|
||||
struct {
|
||||
spinlock_t lock;
|
||||
bool active;
|
||||
unsigned x1;
|
||||
unsigned y1;
|
||||
unsigned x2;
|
||||
unsigned y2;
|
||||
} dirty;
|
||||
};
|
||||
|
||||
static int vmw_fb_setcolreg(unsigned regno, unsigned red, unsigned green,
|
||||
unsigned blue, unsigned transp,
|
||||
struct fb_info *info)
|
||||
{
|
||||
struct vmw_fb_par *par = info->par;
|
||||
u32 *pal = par->pseudo_palette;
|
||||
|
||||
if (regno > 15) {
|
||||
DRM_ERROR("Bad regno %u.\n", regno);
|
||||
return 1;
|
||||
}
|
||||
|
||||
switch (par->depth) {
|
||||
case 24:
|
||||
case 32:
|
||||
pal[regno] = ((red & 0xff00) << 8) |
|
||||
(green & 0xff00) |
|
||||
((blue & 0xff00) >> 8);
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("Bad depth %u, bpp %u.\n", par->depth, par->bpp);
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vmw_fb_check_var(struct fb_var_screeninfo *var,
|
||||
struct fb_info *info)
|
||||
{
|
||||
int depth = var->bits_per_pixel;
|
||||
struct vmw_fb_par *par = info->par;
|
||||
struct vmw_private *vmw_priv = par->vmw_priv;
|
||||
|
||||
switch (var->bits_per_pixel) {
|
||||
case 32:
|
||||
depth = (var->transp.length > 0) ? 32 : 24;
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("Bad bpp %u.\n", var->bits_per_pixel);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
switch (depth) {
|
||||
case 24:
|
||||
var->red.offset = 16;
|
||||
var->green.offset = 8;
|
||||
var->blue.offset = 0;
|
||||
var->red.length = 8;
|
||||
var->green.length = 8;
|
||||
var->blue.length = 8;
|
||||
var->transp.length = 0;
|
||||
var->transp.offset = 0;
|
||||
break;
|
||||
case 32:
|
||||
var->red.offset = 16;
|
||||
var->green.offset = 8;
|
||||
var->blue.offset = 0;
|
||||
var->red.length = 8;
|
||||
var->green.length = 8;
|
||||
var->blue.length = 8;
|
||||
var->transp.length = 8;
|
||||
var->transp.offset = 24;
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("Bad depth %u.\n", depth);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* without multimon its hard to resize */
|
||||
if (!(vmw_priv->capabilities & SVGA_CAP_MULTIMON) &&
|
||||
(var->xres != par->max_width ||
|
||||
var->yres != par->max_height)) {
|
||||
DRM_ERROR("Tried to resize, but we don't have multimon\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (var->xres > par->max_width ||
|
||||
var->yres > par->max_height) {
|
||||
DRM_ERROR("Requested geom can not fit in framebuffer\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vmw_fb_set_par(struct fb_info *info)
|
||||
{
|
||||
struct vmw_fb_par *par = info->par;
|
||||
struct vmw_private *vmw_priv = par->vmw_priv;
|
||||
|
||||
if (vmw_priv->capabilities & SVGA_CAP_MULTIMON) {
|
||||
vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1);
|
||||
vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, 0);
|
||||
vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true);
|
||||
vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, 0);
|
||||
vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, 0);
|
||||
vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, 0);
|
||||
vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, 0);
|
||||
vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
|
||||
|
||||
vmw_write(vmw_priv, SVGA_REG_ENABLE, 1);
|
||||
vmw_write(vmw_priv, SVGA_REG_WIDTH, par->max_width);
|
||||
vmw_write(vmw_priv, SVGA_REG_HEIGHT, par->max_height);
|
||||
vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, par->bpp);
|
||||
vmw_write(vmw_priv, SVGA_REG_DEPTH, par->depth);
|
||||
vmw_write(vmw_priv, SVGA_REG_RED_MASK, 0x00ff0000);
|
||||
vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, 0x0000ff00);
|
||||
vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, 0x000000ff);
|
||||
|
||||
/* TODO check if pitch and offset changes */
|
||||
|
||||
vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1);
|
||||
vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, 0);
|
||||
vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true);
|
||||
vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, info->var.xoffset);
|
||||
vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, info->var.yoffset);
|
||||
vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, info->var.xres);
|
||||
vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, info->var.yres);
|
||||
vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
|
||||
} else {
|
||||
vmw_write(vmw_priv, SVGA_REG_WIDTH, info->var.xres);
|
||||
vmw_write(vmw_priv, SVGA_REG_HEIGHT, info->var.yres);
|
||||
|
||||
/* TODO check if pitch and offset changes */
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vmw_fb_pan_display(struct fb_var_screeninfo *var,
|
||||
struct fb_info *info)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vmw_fb_blank(int blank, struct fb_info *info)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Dirty code
|
||||
*/
|
||||
|
||||
static void vmw_fb_dirty_flush(struct vmw_fb_par *par)
|
||||
{
|
||||
struct vmw_private *vmw_priv = par->vmw_priv;
|
||||
struct fb_info *info = vmw_priv->fb_info;
|
||||
int stride = (info->fix.line_length / 4);
|
||||
int *src = (int *)info->screen_base;
|
||||
__le32 __iomem *vram_mem = par->bo_ptr;
|
||||
unsigned long flags;
|
||||
unsigned x, y, w, h;
|
||||
int i, k;
|
||||
struct {
|
||||
uint32_t header;
|
||||
SVGAFifoCmdUpdate body;
|
||||
} *cmd;
|
||||
|
||||
spin_lock_irqsave(&par->dirty.lock, flags);
|
||||
if (!par->dirty.active) {
|
||||
spin_unlock_irqrestore(&par->dirty.lock, flags);
|
||||
return;
|
||||
}
|
||||
x = par->dirty.x1;
|
||||
y = par->dirty.y1;
|
||||
w = min(par->dirty.x2, info->var.xres) - x;
|
||||
h = min(par->dirty.y2, info->var.yres) - y;
|
||||
par->dirty.x1 = par->dirty.x2 = 0;
|
||||
par->dirty.y1 = par->dirty.y2 = 0;
|
||||
spin_unlock_irqrestore(&par->dirty.lock, flags);
|
||||
|
||||
for (i = y * stride; i < info->fix.smem_len / 4; i += stride) {
|
||||
for (k = i+x; k < i+x+w && k < info->fix.smem_len / 4; k++)
|
||||
iowrite32(src[k], vram_mem + k);
|
||||
}
|
||||
|
||||
#if 0
|
||||
DRM_INFO("%s, (%u, %u) (%ux%u)\n", __func__, x, y, w, h);
|
||||
#endif
|
||||
|
||||
cmd = vmw_fifo_reserve(vmw_priv, sizeof(*cmd));
|
||||
if (unlikely(cmd == NULL)) {
|
||||
DRM_ERROR("Fifo reserve failed.\n");
|
||||
return;
|
||||
}
|
||||
|
||||
cmd->header = cpu_to_le32(SVGA_CMD_UPDATE);
|
||||
cmd->body.x = cpu_to_le32(x);
|
||||
cmd->body.y = cpu_to_le32(y);
|
||||
cmd->body.width = cpu_to_le32(w);
|
||||
cmd->body.height = cpu_to_le32(h);
|
||||
vmw_fifo_commit(vmw_priv, sizeof(*cmd));
|
||||
}
|
||||
|
||||
static void vmw_fb_dirty_mark(struct vmw_fb_par *par,
|
||||
unsigned x1, unsigned y1,
|
||||
unsigned width, unsigned height)
|
||||
{
|
||||
struct fb_info *info = par->vmw_priv->fb_info;
|
||||
unsigned long flags;
|
||||
unsigned x2 = x1 + width;
|
||||
unsigned y2 = y1 + height;
|
||||
|
||||
spin_lock_irqsave(&par->dirty.lock, flags);
|
||||
if (par->dirty.x1 == par->dirty.x2) {
|
||||
par->dirty.x1 = x1;
|
||||
par->dirty.y1 = y1;
|
||||
par->dirty.x2 = x2;
|
||||
par->dirty.y2 = y2;
|
||||
/* if we are active start the dirty work
|
||||
* we share the work with the defio system */
|
||||
if (par->dirty.active)
|
||||
schedule_delayed_work(&info->deferred_work, VMW_DIRTY_DELAY);
|
||||
} else {
|
||||
if (x1 < par->dirty.x1)
|
||||
par->dirty.x1 = x1;
|
||||
if (y1 < par->dirty.y1)
|
||||
par->dirty.y1 = y1;
|
||||
if (x2 > par->dirty.x2)
|
||||
par->dirty.x2 = x2;
|
||||
if (y2 > par->dirty.y2)
|
||||
par->dirty.y2 = y2;
|
||||
}
|
||||
spin_unlock_irqrestore(&par->dirty.lock, flags);
|
||||
}
|
||||
|
||||
static void vmw_deferred_io(struct fb_info *info,
|
||||
struct list_head *pagelist)
|
||||
{
|
||||
struct vmw_fb_par *par = info->par;
|
||||
unsigned long start, end, min, max;
|
||||
unsigned long flags;
|
||||
struct page *page;
|
||||
int y1, y2;
|
||||
|
||||
min = ULONG_MAX;
|
||||
max = 0;
|
||||
list_for_each_entry(page, pagelist, lru) {
|
||||
start = page->index << PAGE_SHIFT;
|
||||
end = start + PAGE_SIZE - 1;
|
||||
min = min(min, start);
|
||||
max = max(max, end);
|
||||
}
|
||||
|
||||
if (min < max) {
|
||||
y1 = min / info->fix.line_length;
|
||||
y2 = (max / info->fix.line_length) + 1;
|
||||
|
||||
spin_lock_irqsave(&par->dirty.lock, flags);
|
||||
par->dirty.x1 = 0;
|
||||
par->dirty.y1 = y1;
|
||||
par->dirty.x2 = info->var.xres;
|
||||
par->dirty.y2 = y2;
|
||||
spin_unlock_irqrestore(&par->dirty.lock, flags);
|
||||
}
|
||||
|
||||
vmw_fb_dirty_flush(par);
|
||||
};
|
||||
|
||||
struct fb_deferred_io vmw_defio = {
|
||||
.delay = VMW_DIRTY_DELAY,
|
||||
.deferred_io = vmw_deferred_io,
|
||||
};
|
||||
|
||||
/*
|
||||
* Draw code
|
||||
*/
|
||||
|
||||
static void vmw_fb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
|
||||
{
|
||||
cfb_fillrect(info, rect);
|
||||
vmw_fb_dirty_mark(info->par, rect->dx, rect->dy,
|
||||
rect->width, rect->height);
|
||||
}
|
||||
|
||||
static void vmw_fb_copyarea(struct fb_info *info, const struct fb_copyarea *region)
|
||||
{
|
||||
cfb_copyarea(info, region);
|
||||
vmw_fb_dirty_mark(info->par, region->dx, region->dy,
|
||||
region->width, region->height);
|
||||
}
|
||||
|
||||
static void vmw_fb_imageblit(struct fb_info *info, const struct fb_image *image)
|
||||
{
|
||||
cfb_imageblit(info, image);
|
||||
vmw_fb_dirty_mark(info->par, image->dx, image->dy,
|
||||
image->width, image->height);
|
||||
}
|
||||
|
||||
/*
|
||||
* Bring up code
|
||||
*/
|
||||
|
||||
static struct fb_ops vmw_fb_ops = {
|
||||
.owner = THIS_MODULE,
|
||||
.fb_check_var = vmw_fb_check_var,
|
||||
.fb_set_par = vmw_fb_set_par,
|
||||
.fb_setcolreg = vmw_fb_setcolreg,
|
||||
.fb_fillrect = vmw_fb_fillrect,
|
||||
.fb_copyarea = vmw_fb_copyarea,
|
||||
.fb_imageblit = vmw_fb_imageblit,
|
||||
.fb_pan_display = vmw_fb_pan_display,
|
||||
.fb_blank = vmw_fb_blank,
|
||||
};
|
||||
|
||||
static int vmw_fb_create_bo(struct vmw_private *vmw_priv,
|
||||
size_t size, struct vmw_dma_buffer **out)
|
||||
{
|
||||
struct vmw_dma_buffer *vmw_bo;
|
||||
struct ttm_placement ne_placement = vmw_vram_ne_placement;
|
||||
int ret;
|
||||
|
||||
ne_placement.lpfn = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
||||
|
||||
/* interuptable? */
|
||||
ret = ttm_write_lock(&vmw_priv->fbdev_master.lock, false);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
|
||||
vmw_bo = kmalloc(sizeof(*vmw_bo), GFP_KERNEL);
|
||||
if (!vmw_bo)
|
||||
goto err_unlock;
|
||||
|
||||
ret = vmw_dmabuf_init(vmw_priv, vmw_bo, size,
|
||||
&ne_placement,
|
||||
false,
|
||||
&vmw_dmabuf_bo_free);
|
||||
if (unlikely(ret != 0))
|
||||
goto err_unlock; /* init frees the buffer on failure */
|
||||
|
||||
*out = vmw_bo;
|
||||
|
||||
ttm_write_unlock(&vmw_priv->fbdev_master.lock);
|
||||
|
||||
return 0;
|
||||
|
||||
err_unlock:
|
||||
ttm_write_unlock(&vmw_priv->fbdev_master.lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int vmw_fb_init(struct vmw_private *vmw_priv)
|
||||
{
|
||||
struct device *device = &vmw_priv->dev->pdev->dev;
|
||||
struct vmw_fb_par *par;
|
||||
struct fb_info *info;
|
||||
unsigned initial_width, initial_height;
|
||||
unsigned fb_width, fb_height;
|
||||
unsigned fb_bbp, fb_depth, fb_offset, fb_pitch, fb_size;
|
||||
int ret;
|
||||
|
||||
initial_width = 800;
|
||||
initial_height = 600;
|
||||
|
||||
fb_bbp = 32;
|
||||
fb_depth = 24;
|
||||
|
||||
if (vmw_priv->capabilities & SVGA_CAP_MULTIMON) {
|
||||
fb_width = min(vmw_priv->fb_max_width, (unsigned)2048);
|
||||
fb_height = min(vmw_priv->fb_max_height, (unsigned)2048);
|
||||
} else {
|
||||
fb_width = min(vmw_priv->fb_max_width, initial_width);
|
||||
fb_height = min(vmw_priv->fb_max_height, initial_height);
|
||||
}
|
||||
|
||||
initial_width = min(fb_width, initial_width);
|
||||
initial_height = min(fb_height, initial_height);
|
||||
|
||||
vmw_write(vmw_priv, SVGA_REG_WIDTH, fb_width);
|
||||
vmw_write(vmw_priv, SVGA_REG_HEIGHT, fb_height);
|
||||
vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, fb_bbp);
|
||||
vmw_write(vmw_priv, SVGA_REG_DEPTH, fb_depth);
|
||||
vmw_write(vmw_priv, SVGA_REG_RED_MASK, 0x00ff0000);
|
||||
vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, 0x0000ff00);
|
||||
vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, 0x000000ff);
|
||||
|
||||
fb_size = vmw_read(vmw_priv, SVGA_REG_FB_SIZE);
|
||||
fb_offset = vmw_read(vmw_priv, SVGA_REG_FB_OFFSET);
|
||||
fb_pitch = vmw_read(vmw_priv, SVGA_REG_BYTES_PER_LINE);
|
||||
|
||||
DRM_DEBUG("width %u\n", vmw_read(vmw_priv, SVGA_REG_MAX_WIDTH));
|
||||
DRM_DEBUG("height %u\n", vmw_read(vmw_priv, SVGA_REG_MAX_HEIGHT));
|
||||
DRM_DEBUG("width %u\n", vmw_read(vmw_priv, SVGA_REG_WIDTH));
|
||||
DRM_DEBUG("height %u\n", vmw_read(vmw_priv, SVGA_REG_HEIGHT));
|
||||
DRM_DEBUG("bpp %u\n", vmw_read(vmw_priv, SVGA_REG_BITS_PER_PIXEL));
|
||||
DRM_DEBUG("depth %u\n", vmw_read(vmw_priv, SVGA_REG_DEPTH));
|
||||
DRM_DEBUG("bpl %u\n", vmw_read(vmw_priv, SVGA_REG_BYTES_PER_LINE));
|
||||
DRM_DEBUG("r mask %08x\n", vmw_read(vmw_priv, SVGA_REG_RED_MASK));
|
||||
DRM_DEBUG("g mask %08x\n", vmw_read(vmw_priv, SVGA_REG_GREEN_MASK));
|
||||
DRM_DEBUG("b mask %08x\n", vmw_read(vmw_priv, SVGA_REG_BLUE_MASK));
|
||||
DRM_DEBUG("fb_offset 0x%08x\n", fb_offset);
|
||||
DRM_DEBUG("fb_pitch %u\n", fb_pitch);
|
||||
DRM_DEBUG("fb_size %u kiB\n", fb_size / 1024);
|
||||
|
||||
info = framebuffer_alloc(sizeof(*par), device);
|
||||
if (!info)
|
||||
return -ENOMEM;
|
||||
|
||||
/*
|
||||
* Par
|
||||
*/
|
||||
vmw_priv->fb_info = info;
|
||||
par = info->par;
|
||||
par->vmw_priv = vmw_priv;
|
||||
par->depth = fb_depth;
|
||||
par->bpp = fb_bbp;
|
||||
par->vmalloc = NULL;
|
||||
par->max_width = fb_width;
|
||||
par->max_height = fb_height;
|
||||
|
||||
/*
|
||||
* Create buffers and alloc memory
|
||||
*/
|
||||
par->vmalloc = vmalloc(fb_size);
|
||||
if (unlikely(par->vmalloc == NULL)) {
|
||||
ret = -ENOMEM;
|
||||
goto err_free;
|
||||
}
|
||||
|
||||
ret = vmw_fb_create_bo(vmw_priv, fb_size, &par->vmw_bo);
|
||||
if (unlikely(ret != 0))
|
||||
goto err_free;
|
||||
|
||||
ret = ttm_bo_kmap(&par->vmw_bo->base,
|
||||
0,
|
||||
par->vmw_bo->base.num_pages,
|
||||
&par->map);
|
||||
if (unlikely(ret != 0))
|
||||
goto err_unref;
|
||||
par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &par->bo_iowrite);
|
||||
par->bo_size = fb_size;
|
||||
|
||||
/*
|
||||
* Fixed and var
|
||||
*/
|
||||
strcpy(info->fix.id, "svgadrmfb");
|
||||
info->fix.type = FB_TYPE_PACKED_PIXELS;
|
||||
info->fix.visual = FB_VISUAL_TRUECOLOR;
|
||||
info->fix.type_aux = 0;
|
||||
info->fix.xpanstep = 1; /* doing it in hw */
|
||||
info->fix.ypanstep = 1; /* doing it in hw */
|
||||
info->fix.ywrapstep = 0;
|
||||
info->fix.accel = FB_ACCEL_NONE;
|
||||
info->fix.line_length = fb_pitch;
|
||||
|
||||
info->fix.smem_start = 0;
|
||||
info->fix.smem_len = fb_size;
|
||||
|
||||
info->fix.mmio_start = 0;
|
||||
info->fix.mmio_len = 0;
|
||||
|
||||
info->pseudo_palette = par->pseudo_palette;
|
||||
info->screen_base = par->vmalloc;
|
||||
info->screen_size = fb_size;
|
||||
|
||||
info->flags = FBINFO_DEFAULT;
|
||||
info->fbops = &vmw_fb_ops;
|
||||
|
||||
/* 24 depth per default */
|
||||
info->var.red.offset = 16;
|
||||
info->var.green.offset = 8;
|
||||
info->var.blue.offset = 0;
|
||||
info->var.red.length = 8;
|
||||
info->var.green.length = 8;
|
||||
info->var.blue.length = 8;
|
||||
info->var.transp.offset = 0;
|
||||
info->var.transp.length = 0;
|
||||
|
||||
info->var.xres_virtual = fb_width;
|
||||
info->var.yres_virtual = fb_height;
|
||||
info->var.bits_per_pixel = par->bpp;
|
||||
info->var.xoffset = 0;
|
||||
info->var.yoffset = 0;
|
||||
info->var.activate = FB_ACTIVATE_NOW;
|
||||
info->var.height = -1;
|
||||
info->var.width = -1;
|
||||
|
||||
info->var.xres = initial_width;
|
||||
info->var.yres = initial_height;
|
||||
|
||||
#if 0
|
||||
info->pixmap.size = 64*1024;
|
||||
info->pixmap.buf_align = 8;
|
||||
info->pixmap.access_align = 32;
|
||||
info->pixmap.flags = FB_PIXMAP_SYSTEM;
|
||||
info->pixmap.scan_align = 1;
|
||||
#else
|
||||
info->pixmap.size = 0;
|
||||
info->pixmap.buf_align = 8;
|
||||
info->pixmap.access_align = 32;
|
||||
info->pixmap.flags = FB_PIXMAP_SYSTEM;
|
||||
info->pixmap.scan_align = 1;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Dirty & Deferred IO
|
||||
*/
|
||||
par->dirty.x1 = par->dirty.x2 = 0;
|
||||
par->dirty.y1 = par->dirty.y1 = 0;
|
||||
par->dirty.active = true;
|
||||
spin_lock_init(&par->dirty.lock);
|
||||
info->fbdefio = &vmw_defio;
|
||||
fb_deferred_io_init(info);
|
||||
|
||||
ret = register_framebuffer(info);
|
||||
if (unlikely(ret != 0))
|
||||
goto err_defio;
|
||||
|
||||
return 0;
|
||||
|
||||
err_defio:
|
||||
fb_deferred_io_cleanup(info);
|
||||
ttm_bo_kunmap(&par->map);
|
||||
err_unref:
|
||||
ttm_bo_unref((struct ttm_buffer_object **)&par->vmw_bo);
|
||||
err_free:
|
||||
vfree(par->vmalloc);
|
||||
framebuffer_release(info);
|
||||
vmw_priv->fb_info = NULL;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int vmw_fb_close(struct vmw_private *vmw_priv)
|
||||
{
|
||||
struct fb_info *info;
|
||||
struct vmw_fb_par *par;
|
||||
struct ttm_buffer_object *bo;
|
||||
|
||||
if (!vmw_priv->fb_info)
|
||||
return 0;
|
||||
|
||||
info = vmw_priv->fb_info;
|
||||
par = info->par;
|
||||
bo = &par->vmw_bo->base;
|
||||
par->vmw_bo = NULL;
|
||||
|
||||
/* ??? order */
|
||||
fb_deferred_io_cleanup(info);
|
||||
unregister_framebuffer(info);
|
||||
|
||||
ttm_bo_kunmap(&par->map);
|
||||
ttm_bo_unref(&bo);
|
||||
|
||||
vfree(par->vmalloc);
|
||||
framebuffer_release(info);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int vmw_dmabuf_from_vram(struct vmw_private *vmw_priv,
|
||||
struct vmw_dma_buffer *vmw_bo)
|
||||
{
|
||||
struct ttm_buffer_object *bo = &vmw_bo->base;
|
||||
int ret = 0;
|
||||
|
||||
ret = ttm_bo_reserve(bo, false, false, false, 0);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
|
||||
ret = ttm_bo_validate(bo, &vmw_sys_placement, false, false);
|
||||
ttm_bo_unreserve(bo);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
|
||||
struct vmw_dma_buffer *vmw_bo)
|
||||
{
|
||||
struct ttm_buffer_object *bo = &vmw_bo->base;
|
||||
struct ttm_placement ne_placement = vmw_vram_ne_placement;
|
||||
int ret = 0;
|
||||
|
||||
ne_placement.lpfn = bo->num_pages;
|
||||
|
||||
/* interuptable? */
|
||||
ret = ttm_write_lock(&vmw_priv->active_master->lock, false);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
|
||||
ret = ttm_bo_reserve(bo, false, false, false, 0);
|
||||
if (unlikely(ret != 0))
|
||||
goto err_unlock;
|
||||
|
||||
if (vmw_bo->gmr_bound) {
|
||||
vmw_gmr_unbind(vmw_priv, vmw_bo->gmr_id);
|
||||
spin_lock(&bo->glob->lru_lock);
|
||||
ida_remove(&vmw_priv->gmr_ida, vmw_bo->gmr_id);
|
||||
spin_unlock(&bo->glob->lru_lock);
|
||||
vmw_bo->gmr_bound = NULL;
|
||||
}
|
||||
|
||||
ret = ttm_bo_validate(bo, &ne_placement, false, false);
|
||||
ttm_bo_unreserve(bo);
|
||||
err_unlock:
|
||||
ttm_write_unlock(&vmw_priv->active_master->lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int vmw_fb_off(struct vmw_private *vmw_priv)
|
||||
{
|
||||
struct fb_info *info;
|
||||
struct vmw_fb_par *par;
|
||||
unsigned long flags;
|
||||
|
||||
if (!vmw_priv->fb_info)
|
||||
return -EINVAL;
|
||||
|
||||
info = vmw_priv->fb_info;
|
||||
par = info->par;
|
||||
|
||||
spin_lock_irqsave(&par->dirty.lock, flags);
|
||||
par->dirty.active = false;
|
||||
spin_unlock_irqrestore(&par->dirty.lock, flags);
|
||||
|
||||
flush_scheduled_work();
|
||||
|
||||
par->bo_ptr = NULL;
|
||||
ttm_bo_kunmap(&par->map);
|
||||
|
||||
vmw_dmabuf_from_vram(vmw_priv, par->vmw_bo);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int vmw_fb_on(struct vmw_private *vmw_priv)
|
||||
{
|
||||
struct fb_info *info;
|
||||
struct vmw_fb_par *par;
|
||||
unsigned long flags;
|
||||
bool dummy;
|
||||
int ret;
|
||||
|
||||
if (!vmw_priv->fb_info)
|
||||
return -EINVAL;
|
||||
|
||||
info = vmw_priv->fb_info;
|
||||
par = info->par;
|
||||
|
||||
/* we are already active */
|
||||
if (par->bo_ptr != NULL)
|
||||
return 0;
|
||||
|
||||
/* Make sure that all overlays are stoped when we take over */
|
||||
vmw_overlay_stop_all(vmw_priv);
|
||||
|
||||
ret = vmw_dmabuf_to_start_of_vram(vmw_priv, par->vmw_bo);
|
||||
if (unlikely(ret != 0)) {
|
||||
DRM_ERROR("could not move buffer to start of VRAM\n");
|
||||
goto err_no_buffer;
|
||||
}
|
||||
|
||||
ret = ttm_bo_kmap(&par->vmw_bo->base,
|
||||
0,
|
||||
par->vmw_bo->base.num_pages,
|
||||
&par->map);
|
||||
BUG_ON(ret != 0);
|
||||
par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &dummy);
|
||||
|
||||
spin_lock_irqsave(&par->dirty.lock, flags);
|
||||
par->dirty.active = true;
|
||||
spin_unlock_irqrestore(&par->dirty.lock, flags);
|
||||
|
||||
err_no_buffer:
|
||||
vmw_fb_set_par(info);
|
||||
|
||||
vmw_fb_dirty_mark(par, 0, 0, info->var.xres, info->var.yres);
|
||||
|
||||
/* If there already was stuff dirty we wont
|
||||
* schedule a new work, so lets do it now */
|
||||
schedule_delayed_work(&info->deferred_work, 0);
|
||||
|
||||
return 0;
|
||||
}
|
|
@ -0,0 +1,521 @@
|
|||
/**************************************************************************
|
||||
*
|
||||
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
|
||||
#include "vmwgfx_drv.h"
|
||||
#include "drmP.h"
|
||||
#include "ttm/ttm_placement.h"
|
||||
|
||||
int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
|
||||
{
|
||||
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
|
||||
uint32_t max;
|
||||
uint32_t min;
|
||||
uint32_t dummy;
|
||||
int ret;
|
||||
|
||||
fifo->static_buffer_size = VMWGFX_FIFO_STATIC_SIZE;
|
||||
fifo->static_buffer = vmalloc(fifo->static_buffer_size);
|
||||
if (unlikely(fifo->static_buffer == NULL))
|
||||
return -ENOMEM;
|
||||
|
||||
fifo->last_buffer_size = VMWGFX_FIFO_STATIC_SIZE;
|
||||
fifo->last_data_size = 0;
|
||||
fifo->last_buffer_add = false;
|
||||
fifo->last_buffer = vmalloc(fifo->last_buffer_size);
|
||||
if (unlikely(fifo->last_buffer == NULL)) {
|
||||
ret = -ENOMEM;
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
fifo->dynamic_buffer = NULL;
|
||||
fifo->reserved_size = 0;
|
||||
fifo->using_bounce_buffer = false;
|
||||
|
||||
init_rwsem(&fifo->rwsem);
|
||||
|
||||
/*
|
||||
* Allow mapping the first page read-only to user-space.
|
||||
*/
|
||||
|
||||
DRM_INFO("width %d\n", vmw_read(dev_priv, SVGA_REG_WIDTH));
|
||||
DRM_INFO("height %d\n", vmw_read(dev_priv, SVGA_REG_HEIGHT));
|
||||
DRM_INFO("bpp %d\n", vmw_read(dev_priv, SVGA_REG_BITS_PER_PIXEL));
|
||||
|
||||
mutex_lock(&dev_priv->hw_mutex);
|
||||
dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE);
|
||||
dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE);
|
||||
vmw_write(dev_priv, SVGA_REG_ENABLE, 1);
|
||||
|
||||
min = 4;
|
||||
if (dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO)
|
||||
min = vmw_read(dev_priv, SVGA_REG_MEM_REGS);
|
||||
min <<= 2;
|
||||
|
||||
if (min < PAGE_SIZE)
|
||||
min = PAGE_SIZE;
|
||||
|
||||
iowrite32(min, fifo_mem + SVGA_FIFO_MIN);
|
||||
iowrite32(dev_priv->mmio_size, fifo_mem + SVGA_FIFO_MAX);
|
||||
wmb();
|
||||
iowrite32(min, fifo_mem + SVGA_FIFO_NEXT_CMD);
|
||||
iowrite32(min, fifo_mem + SVGA_FIFO_STOP);
|
||||
iowrite32(0, fifo_mem + SVGA_FIFO_BUSY);
|
||||
mb();
|
||||
|
||||
vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1);
|
||||
mutex_unlock(&dev_priv->hw_mutex);
|
||||
|
||||
max = ioread32(fifo_mem + SVGA_FIFO_MAX);
|
||||
min = ioread32(fifo_mem + SVGA_FIFO_MIN);
|
||||
fifo->capabilities = ioread32(fifo_mem + SVGA_FIFO_CAPABILITIES);
|
||||
|
||||
DRM_INFO("Fifo max 0x%08x min 0x%08x cap 0x%08x\n",
|
||||
(unsigned int) max,
|
||||
(unsigned int) min,
|
||||
(unsigned int) fifo->capabilities);
|
||||
|
||||
dev_priv->fence_seq = (uint32_t) -100;
|
||||
dev_priv->last_read_sequence = (uint32_t) -100;
|
||||
iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE);
|
||||
|
||||
return vmw_fifo_send_fence(dev_priv, &dummy);
|
||||
out_err:
|
||||
vfree(fifo->static_buffer);
|
||||
fifo->static_buffer = NULL;
|
||||
return ret;
|
||||
}
|
||||
|
||||
void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
|
||||
{
|
||||
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
|
||||
|
||||
mutex_lock(&dev_priv->hw_mutex);
|
||||
|
||||
if (unlikely(ioread32(fifo_mem + SVGA_FIFO_BUSY) == 0)) {
|
||||
iowrite32(1, fifo_mem + SVGA_FIFO_BUSY);
|
||||
vmw_write(dev_priv, SVGA_REG_SYNC, reason);
|
||||
}
|
||||
|
||||
mutex_unlock(&dev_priv->hw_mutex);
|
||||
}
|
||||
|
||||
void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
|
||||
{
|
||||
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
|
||||
|
||||
mutex_lock(&dev_priv->hw_mutex);
|
||||
|
||||
while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0)
|
||||
vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
|
||||
|
||||
dev_priv->last_read_sequence = ioread32(fifo_mem + SVGA_FIFO_FENCE);
|
||||
|
||||
vmw_write(dev_priv, SVGA_REG_CONFIG_DONE,
|
||||
dev_priv->config_done_state);
|
||||
vmw_write(dev_priv, SVGA_REG_ENABLE,
|
||||
dev_priv->enable_state);
|
||||
|
||||
mutex_unlock(&dev_priv->hw_mutex);
|
||||
|
||||
if (likely(fifo->last_buffer != NULL)) {
|
||||
vfree(fifo->last_buffer);
|
||||
fifo->last_buffer = NULL;
|
||||
}
|
||||
|
||||
if (likely(fifo->static_buffer != NULL)) {
|
||||
vfree(fifo->static_buffer);
|
||||
fifo->static_buffer = NULL;
|
||||
}
|
||||
|
||||
if (likely(fifo->dynamic_buffer != NULL)) {
|
||||
vfree(fifo->dynamic_buffer);
|
||||
fifo->dynamic_buffer = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static bool vmw_fifo_is_full(struct vmw_private *dev_priv, uint32_t bytes)
|
||||
{
|
||||
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
|
||||
uint32_t max = ioread32(fifo_mem + SVGA_FIFO_MAX);
|
||||
uint32_t next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
|
||||
uint32_t min = ioread32(fifo_mem + SVGA_FIFO_MIN);
|
||||
uint32_t stop = ioread32(fifo_mem + SVGA_FIFO_STOP);
|
||||
|
||||
return ((max - next_cmd) + (stop - min) <= bytes);
|
||||
}
|
||||
|
||||
static int vmw_fifo_wait_noirq(struct vmw_private *dev_priv,
|
||||
uint32_t bytes, bool interruptible,
|
||||
unsigned long timeout)
|
||||
{
|
||||
int ret = 0;
|
||||
unsigned long end_jiffies = jiffies + timeout;
|
||||
DEFINE_WAIT(__wait);
|
||||
|
||||
DRM_INFO("Fifo wait noirq.\n");
|
||||
|
||||
for (;;) {
|
||||
prepare_to_wait(&dev_priv->fifo_queue, &__wait,
|
||||
(interruptible) ?
|
||||
TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
|
||||
if (!vmw_fifo_is_full(dev_priv, bytes))
|
||||
break;
|
||||
if (time_after_eq(jiffies, end_jiffies)) {
|
||||
ret = -EBUSY;
|
||||
DRM_ERROR("SVGA device lockup.\n");
|
||||
break;
|
||||
}
|
||||
schedule_timeout(1);
|
||||
if (interruptible && signal_pending(current)) {
|
||||
ret = -ERESTART;
|
||||
break;
|
||||
}
|
||||
}
|
||||
finish_wait(&dev_priv->fifo_queue, &__wait);
|
||||
wake_up_all(&dev_priv->fifo_queue);
|
||||
DRM_INFO("Fifo noirq exit.\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int vmw_fifo_wait(struct vmw_private *dev_priv,
|
||||
uint32_t bytes, bool interruptible,
|
||||
unsigned long timeout)
|
||||
{
|
||||
long ret = 1L;
|
||||
unsigned long irq_flags;
|
||||
|
||||
if (likely(!vmw_fifo_is_full(dev_priv, bytes)))
|
||||
return 0;
|
||||
|
||||
vmw_fifo_ping_host(dev_priv, SVGA_SYNC_FIFOFULL);
|
||||
if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
|
||||
return vmw_fifo_wait_noirq(dev_priv, bytes,
|
||||
interruptible, timeout);
|
||||
|
||||
mutex_lock(&dev_priv->hw_mutex);
|
||||
if (atomic_add_return(1, &dev_priv->fifo_queue_waiters) > 0) {
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
|
||||
outl(SVGA_IRQFLAG_FIFO_PROGRESS,
|
||||
dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
|
||||
vmw_write(dev_priv, SVGA_REG_IRQMASK,
|
||||
vmw_read(dev_priv, SVGA_REG_IRQMASK) |
|
||||
SVGA_IRQFLAG_FIFO_PROGRESS);
|
||||
spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
|
||||
}
|
||||
mutex_unlock(&dev_priv->hw_mutex);
|
||||
|
||||
if (interruptible)
|
||||
ret = wait_event_interruptible_timeout
|
||||
(dev_priv->fifo_queue,
|
||||
!vmw_fifo_is_full(dev_priv, bytes), timeout);
|
||||
else
|
||||
ret = wait_event_timeout
|
||||
(dev_priv->fifo_queue,
|
||||
!vmw_fifo_is_full(dev_priv, bytes), timeout);
|
||||
|
||||
if (unlikely(ret == -ERESTARTSYS))
|
||||
ret = -ERESTART;
|
||||
else if (unlikely(ret == 0))
|
||||
ret = -EBUSY;
|
||||
else if (likely(ret > 0))
|
||||
ret = 0;
|
||||
|
||||
mutex_lock(&dev_priv->hw_mutex);
|
||||
if (atomic_dec_and_test(&dev_priv->fifo_queue_waiters)) {
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
|
||||
vmw_write(dev_priv, SVGA_REG_IRQMASK,
|
||||
vmw_read(dev_priv, SVGA_REG_IRQMASK) &
|
||||
~SVGA_IRQFLAG_FIFO_PROGRESS);
|
||||
spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
|
||||
}
|
||||
mutex_unlock(&dev_priv->hw_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
|
||||
{
|
||||
struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
|
||||
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
|
||||
uint32_t max;
|
||||
uint32_t min;
|
||||
uint32_t next_cmd;
|
||||
uint32_t reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
|
||||
int ret;
|
||||
|
||||
down_write(&fifo_state->rwsem);
|
||||
max = ioread32(fifo_mem + SVGA_FIFO_MAX);
|
||||
min = ioread32(fifo_mem + SVGA_FIFO_MIN);
|
||||
next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
|
||||
|
||||
if (unlikely(bytes >= (max - min)))
|
||||
goto out_err;
|
||||
|
||||
BUG_ON(fifo_state->reserved_size != 0);
|
||||
BUG_ON(fifo_state->dynamic_buffer != NULL);
|
||||
|
||||
fifo_state->reserved_size = bytes;
|
||||
|
||||
while (1) {
|
||||
uint32_t stop = ioread32(fifo_mem + SVGA_FIFO_STOP);
|
||||
bool need_bounce = false;
|
||||
bool reserve_in_place = false;
|
||||
|
||||
if (next_cmd >= stop) {
|
||||
if (likely((next_cmd + bytes < max ||
|
||||
(next_cmd + bytes == max && stop > min))))
|
||||
reserve_in_place = true;
|
||||
|
||||
else if (vmw_fifo_is_full(dev_priv, bytes)) {
|
||||
ret = vmw_fifo_wait(dev_priv, bytes,
|
||||
false, 3 * HZ);
|
||||
if (unlikely(ret != 0))
|
||||
goto out_err;
|
||||
} else
|
||||
need_bounce = true;
|
||||
|
||||
} else {
|
||||
|
||||
if (likely((next_cmd + bytes < stop)))
|
||||
reserve_in_place = true;
|
||||
else {
|
||||
ret = vmw_fifo_wait(dev_priv, bytes,
|
||||
false, 3 * HZ);
|
||||
if (unlikely(ret != 0))
|
||||
goto out_err;
|
||||
}
|
||||
}
|
||||
|
||||
if (reserve_in_place) {
|
||||
if (reserveable || bytes <= sizeof(uint32_t)) {
|
||||
fifo_state->using_bounce_buffer = false;
|
||||
|
||||
if (reserveable)
|
||||
iowrite32(bytes, fifo_mem +
|
||||
SVGA_FIFO_RESERVED);
|
||||
return fifo_mem + (next_cmd >> 2);
|
||||
} else {
|
||||
need_bounce = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (need_bounce) {
|
||||
fifo_state->using_bounce_buffer = true;
|
||||
if (bytes < fifo_state->static_buffer_size)
|
||||
return fifo_state->static_buffer;
|
||||
else {
|
||||
fifo_state->dynamic_buffer = vmalloc(bytes);
|
||||
return fifo_state->dynamic_buffer;
|
||||
}
|
||||
}
|
||||
}
|
||||
out_err:
|
||||
fifo_state->reserved_size = 0;
|
||||
up_write(&fifo_state->rwsem);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void vmw_fifo_res_copy(struct vmw_fifo_state *fifo_state,
|
||||
__le32 __iomem *fifo_mem,
|
||||
uint32_t next_cmd,
|
||||
uint32_t max, uint32_t min, uint32_t bytes)
|
||||
{
|
||||
uint32_t chunk_size = max - next_cmd;
|
||||
uint32_t rest;
|
||||
uint32_t *buffer = (fifo_state->dynamic_buffer != NULL) ?
|
||||
fifo_state->dynamic_buffer : fifo_state->static_buffer;
|
||||
|
||||
if (bytes < chunk_size)
|
||||
chunk_size = bytes;
|
||||
|
||||
iowrite32(bytes, fifo_mem + SVGA_FIFO_RESERVED);
|
||||
mb();
|
||||
memcpy_toio(fifo_mem + (next_cmd >> 2), buffer, chunk_size);
|
||||
rest = bytes - chunk_size;
|
||||
if (rest)
|
||||
memcpy_toio(fifo_mem + (min >> 2), buffer + (chunk_size >> 2),
|
||||
rest);
|
||||
}
|
||||
|
||||
static void vmw_fifo_slow_copy(struct vmw_fifo_state *fifo_state,
|
||||
__le32 __iomem *fifo_mem,
|
||||
uint32_t next_cmd,
|
||||
uint32_t max, uint32_t min, uint32_t bytes)
|
||||
{
|
||||
uint32_t *buffer = (fifo_state->dynamic_buffer != NULL) ?
|
||||
fifo_state->dynamic_buffer : fifo_state->static_buffer;
|
||||
|
||||
while (bytes > 0) {
|
||||
iowrite32(*buffer++, fifo_mem + (next_cmd >> 2));
|
||||
next_cmd += sizeof(uint32_t);
|
||||
if (unlikely(next_cmd == max))
|
||||
next_cmd = min;
|
||||
mb();
|
||||
iowrite32(next_cmd, fifo_mem + SVGA_FIFO_NEXT_CMD);
|
||||
mb();
|
||||
bytes -= sizeof(uint32_t);
|
||||
}
|
||||
}
|
||||
|
||||
void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
|
||||
{
|
||||
struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
|
||||
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
|
||||
uint32_t next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
|
||||
uint32_t max = ioread32(fifo_mem + SVGA_FIFO_MAX);
|
||||
uint32_t min = ioread32(fifo_mem + SVGA_FIFO_MIN);
|
||||
bool reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
|
||||
|
||||
BUG_ON((bytes & 3) != 0);
|
||||
BUG_ON(bytes > fifo_state->reserved_size);
|
||||
|
||||
fifo_state->reserved_size = 0;
|
||||
|
||||
if (fifo_state->using_bounce_buffer) {
|
||||
if (reserveable)
|
||||
vmw_fifo_res_copy(fifo_state, fifo_mem,
|
||||
next_cmd, max, min, bytes);
|
||||
else
|
||||
vmw_fifo_slow_copy(fifo_state, fifo_mem,
|
||||
next_cmd, max, min, bytes);
|
||||
|
||||
if (fifo_state->dynamic_buffer) {
|
||||
vfree(fifo_state->dynamic_buffer);
|
||||
fifo_state->dynamic_buffer = NULL;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if (fifo_state->using_bounce_buffer || reserveable) {
|
||||
next_cmd += bytes;
|
||||
if (next_cmd >= max)
|
||||
next_cmd -= max - min;
|
||||
mb();
|
||||
iowrite32(next_cmd, fifo_mem + SVGA_FIFO_NEXT_CMD);
|
||||
}
|
||||
|
||||
if (reserveable)
|
||||
iowrite32(0, fifo_mem + SVGA_FIFO_RESERVED);
|
||||
mb();
|
||||
vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
|
||||
up_write(&fifo_state->rwsem);
|
||||
}
|
||||
|
||||
int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence)
|
||||
{
|
||||
struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
|
||||
struct svga_fifo_cmd_fence *cmd_fence;
|
||||
void *fm;
|
||||
int ret = 0;
|
||||
uint32_t bytes = sizeof(__le32) + sizeof(*cmd_fence);
|
||||
|
||||
fm = vmw_fifo_reserve(dev_priv, bytes);
|
||||
if (unlikely(fm == NULL)) {
|
||||
down_write(&fifo_state->rwsem);
|
||||
*sequence = dev_priv->fence_seq;
|
||||
up_write(&fifo_state->rwsem);
|
||||
ret = -ENOMEM;
|
||||
(void)vmw_fallback_wait(dev_priv, false, true, *sequence,
|
||||
false, 3*HZ);
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
do {
|
||||
*sequence = dev_priv->fence_seq++;
|
||||
} while (*sequence == 0);
|
||||
|
||||
if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
|
||||
|
||||
/*
|
||||
* Don't request hardware to send a fence. The
|
||||
* waiting code in vmwgfx_irq.c will emulate this.
|
||||
*/
|
||||
|
||||
vmw_fifo_commit(dev_priv, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
*(__le32 *) fm = cpu_to_le32(SVGA_CMD_FENCE);
|
||||
cmd_fence = (struct svga_fifo_cmd_fence *)
|
||||
((unsigned long)fm + sizeof(__le32));
|
||||
|
||||
iowrite32(*sequence, &cmd_fence->fence);
|
||||
fifo_state->last_buffer_add = true;
|
||||
vmw_fifo_commit(dev_priv, bytes);
|
||||
fifo_state->last_buffer_add = false;
|
||||
|
||||
out_err:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* Map the first page of the FIFO read-only to user-space.
|
||||
*/
|
||||
|
||||
static int vmw_fifo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
|
||||
{
|
||||
int ret;
|
||||
unsigned long address = (unsigned long)vmf->virtual_address;
|
||||
|
||||
if (address != vma->vm_start)
|
||||
return VM_FAULT_SIGBUS;
|
||||
|
||||
ret = vm_insert_pfn(vma, address, vma->vm_pgoff);
|
||||
if (likely(ret == -EBUSY || ret == 0))
|
||||
return VM_FAULT_NOPAGE;
|
||||
else if (ret == -ENOMEM)
|
||||
return VM_FAULT_OOM;
|
||||
|
||||
return VM_FAULT_SIGBUS;
|
||||
}
|
||||
|
||||
static struct vm_operations_struct vmw_fifo_vm_ops = {
|
||||
.fault = vmw_fifo_vm_fault,
|
||||
.open = NULL,
|
||||
.close = NULL
|
||||
};
|
||||
|
||||
int vmw_fifo_mmap(struct file *filp, struct vm_area_struct *vma)
|
||||
{
|
||||
struct drm_file *file_priv;
|
||||
struct vmw_private *dev_priv;
|
||||
|
||||
file_priv = (struct drm_file *)filp->private_data;
|
||||
dev_priv = vmw_priv(file_priv->minor->dev);
|
||||
|
||||
if (vma->vm_pgoff != (dev_priv->mmio_start >> PAGE_SHIFT) ||
|
||||
(vma->vm_end - vma->vm_start) != PAGE_SIZE)
|
||||
return -EINVAL;
|
||||
|
||||
vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
|
||||
vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_SHARED;
|
||||
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
|
||||
vma->vm_page_prot = ttm_io_prot(TTM_PL_FLAG_UNCACHED,
|
||||
vma->vm_page_prot);
|
||||
vma->vm_ops = &vmw_fifo_vm_ops;
|
||||
return 0;
|
||||
}
|
|
@ -0,0 +1,213 @@
|
|||
/**************************************************************************
|
||||
*
|
||||
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
|
||||
#include "vmwgfx_drv.h"
|
||||
#include "drmP.h"
|
||||
#include "ttm/ttm_bo_driver.h"
|
||||
|
||||
/**
|
||||
* FIXME: Adjust to the ttm lowmem / highmem storage to minimize
|
||||
* the number of used descriptors.
|
||||
*/
|
||||
|
||||
static int vmw_gmr_build_descriptors(struct list_head *desc_pages,
|
||||
struct page *pages[],
|
||||
unsigned long num_pages)
|
||||
{
|
||||
struct page *page, *next;
|
||||
struct svga_guest_mem_descriptor *page_virtual = NULL;
|
||||
struct svga_guest_mem_descriptor *desc_virtual = NULL;
|
||||
unsigned int desc_per_page;
|
||||
unsigned long prev_pfn;
|
||||
unsigned long pfn;
|
||||
int ret;
|
||||
|
||||
desc_per_page = PAGE_SIZE /
|
||||
sizeof(struct svga_guest_mem_descriptor) - 1;
|
||||
|
||||
while (likely(num_pages != 0)) {
|
||||
page = alloc_page(__GFP_HIGHMEM);
|
||||
if (unlikely(page == NULL)) {
|
||||
ret = -ENOMEM;
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
list_add_tail(&page->lru, desc_pages);
|
||||
|
||||
/*
|
||||
* Point previous page terminating descriptor to this
|
||||
* page before unmapping it.
|
||||
*/
|
||||
|
||||
if (likely(page_virtual != NULL)) {
|
||||
desc_virtual->ppn = page_to_pfn(page);
|
||||
kunmap_atomic(page_virtual, KM_USER0);
|
||||
}
|
||||
|
||||
page_virtual = kmap_atomic(page, KM_USER0);
|
||||
desc_virtual = page_virtual - 1;
|
||||
prev_pfn = ~(0UL);
|
||||
|
||||
while (likely(num_pages != 0)) {
|
||||
pfn = page_to_pfn(*pages);
|
||||
|
||||
if (pfn != prev_pfn + 1) {
|
||||
|
||||
if (desc_virtual - page_virtual ==
|
||||
desc_per_page - 1)
|
||||
break;
|
||||
|
||||
(++desc_virtual)->ppn = cpu_to_le32(pfn);
|
||||
desc_virtual->num_pages = cpu_to_le32(1);
|
||||
} else {
|
||||
uint32_t tmp =
|
||||
le32_to_cpu(desc_virtual->num_pages);
|
||||
desc_virtual->num_pages = cpu_to_le32(tmp + 1);
|
||||
}
|
||||
prev_pfn = pfn;
|
||||
--num_pages;
|
||||
++pages;
|
||||
}
|
||||
|
||||
(++desc_virtual)->ppn = cpu_to_le32(0);
|
||||
desc_virtual->num_pages = cpu_to_le32(0);
|
||||
}
|
||||
|
||||
if (likely(page_virtual != NULL))
|
||||
kunmap_atomic(page_virtual, KM_USER0);
|
||||
|
||||
return 0;
|
||||
out_err:
|
||||
list_for_each_entry_safe(page, next, desc_pages, lru) {
|
||||
list_del_init(&page->lru);
|
||||
__free_page(page);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline void vmw_gmr_free_descriptors(struct list_head *desc_pages)
|
||||
{
|
||||
struct page *page, *next;
|
||||
|
||||
list_for_each_entry_safe(page, next, desc_pages, lru) {
|
||||
list_del_init(&page->lru);
|
||||
__free_page(page);
|
||||
}
|
||||
}
|
||||
|
||||
static void vmw_gmr_fire_descriptors(struct vmw_private *dev_priv,
|
||||
int gmr_id, struct list_head *desc_pages)
|
||||
{
|
||||
struct page *page;
|
||||
|
||||
if (unlikely(list_empty(desc_pages)))
|
||||
return;
|
||||
|
||||
page = list_entry(desc_pages->next, struct page, lru);
|
||||
|
||||
mutex_lock(&dev_priv->hw_mutex);
|
||||
|
||||
vmw_write(dev_priv, SVGA_REG_GMR_ID, gmr_id);
|
||||
wmb();
|
||||
vmw_write(dev_priv, SVGA_REG_GMR_DESCRIPTOR, page_to_pfn(page));
|
||||
mb();
|
||||
|
||||
mutex_unlock(&dev_priv->hw_mutex);
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* FIXME: Adjust to the ttm lowmem / highmem storage to minimize
|
||||
* the number of used descriptors.
|
||||
*/
|
||||
|
||||
static unsigned long vmw_gmr_count_descriptors(struct page *pages[],
|
||||
unsigned long num_pages)
|
||||
{
|
||||
unsigned long prev_pfn = ~(0UL);
|
||||
unsigned long pfn;
|
||||
unsigned long descriptors = 0;
|
||||
|
||||
while (num_pages--) {
|
||||
pfn = page_to_pfn(*pages++);
|
||||
if (prev_pfn + 1 != pfn)
|
||||
++descriptors;
|
||||
prev_pfn = pfn;
|
||||
}
|
||||
|
||||
return descriptors;
|
||||
}
|
||||
|
||||
int vmw_gmr_bind(struct vmw_private *dev_priv,
|
||||
struct ttm_buffer_object *bo)
|
||||
{
|
||||
struct ttm_tt *ttm = bo->ttm;
|
||||
unsigned long descriptors;
|
||||
int ret;
|
||||
uint32_t id;
|
||||
struct list_head desc_pages;
|
||||
|
||||
if (!(dev_priv->capabilities & SVGA_CAP_GMR))
|
||||
return -EINVAL;
|
||||
|
||||
ret = ttm_tt_populate(ttm);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
|
||||
descriptors = vmw_gmr_count_descriptors(ttm->pages, ttm->num_pages);
|
||||
if (unlikely(descriptors > dev_priv->max_gmr_descriptors))
|
||||
return -EINVAL;
|
||||
|
||||
INIT_LIST_HEAD(&desc_pages);
|
||||
ret = vmw_gmr_build_descriptors(&desc_pages, ttm->pages,
|
||||
ttm->num_pages);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
|
||||
ret = vmw_gmr_id_alloc(dev_priv, &id);
|
||||
if (unlikely(ret != 0))
|
||||
goto out_no_id;
|
||||
|
||||
vmw_gmr_fire_descriptors(dev_priv, id, &desc_pages);
|
||||
vmw_gmr_free_descriptors(&desc_pages);
|
||||
vmw_dmabuf_set_gmr(bo, id);
|
||||
return 0;
|
||||
|
||||
out_no_id:
|
||||
vmw_gmr_free_descriptors(&desc_pages);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id)
|
||||
{
|
||||
mutex_lock(&dev_priv->hw_mutex);
|
||||
vmw_write(dev_priv, SVGA_REG_GMR_ID, gmr_id);
|
||||
wmb();
|
||||
vmw_write(dev_priv, SVGA_REG_GMR_DESCRIPTOR, 0);
|
||||
mb();
|
||||
mutex_unlock(&dev_priv->hw_mutex);
|
||||
}
|
|
@ -0,0 +1,81 @@
|
|||
/**************************************************************************
|
||||
*
|
||||
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
|
||||
#include "vmwgfx_drv.h"
|
||||
#include "vmwgfx_drm.h"
|
||||
|
||||
int vmw_getparam_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
struct vmw_private *dev_priv = vmw_priv(dev);
|
||||
struct drm_vmw_getparam_arg *param =
|
||||
(struct drm_vmw_getparam_arg *)data;
|
||||
|
||||
switch (param->param) {
|
||||
case DRM_VMW_PARAM_NUM_STREAMS:
|
||||
param->value = vmw_overlay_num_overlays(dev_priv);
|
||||
break;
|
||||
case DRM_VMW_PARAM_NUM_FREE_STREAMS:
|
||||
param->value = vmw_overlay_num_free_overlays(dev_priv);
|
||||
break;
|
||||
case DRM_VMW_PARAM_3D:
|
||||
param->value = dev_priv->capabilities & SVGA_CAP_3D ? 1 : 0;
|
||||
break;
|
||||
case DRM_VMW_PARAM_FIFO_OFFSET:
|
||||
param->value = dev_priv->mmio_start;
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("Illegal vmwgfx get param request: %d\n",
|
||||
param->param);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int vmw_fifo_debug_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
struct vmw_private *dev_priv = vmw_priv(dev);
|
||||
struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
|
||||
struct drm_vmw_fifo_debug_arg *arg =
|
||||
(struct drm_vmw_fifo_debug_arg *)data;
|
||||
__le32 __user *buffer = (__le32 __user *)
|
||||
(unsigned long)arg->debug_buffer;
|
||||
|
||||
if (unlikely(fifo_state->last_buffer == NULL))
|
||||
return -EINVAL;
|
||||
|
||||
if (arg->debug_buffer_size < fifo_state->last_data_size) {
|
||||
arg->used_size = arg->debug_buffer_size;
|
||||
arg->did_not_fit = 1;
|
||||
} else {
|
||||
arg->used_size = fifo_state->last_data_size;
|
||||
arg->did_not_fit = 0;
|
||||
}
|
||||
return copy_to_user(buffer, fifo_state->last_buffer, arg->used_size);
|
||||
}
|
|
@ -0,0 +1,295 @@
|
|||
/**************************************************************************
|
||||
*
|
||||
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
|
||||
#include "drmP.h"
|
||||
#include "vmwgfx_drv.h"
|
||||
|
||||
#define VMW_FENCE_WRAP (1 << 24)
|
||||
|
||||
irqreturn_t vmw_irq_handler(DRM_IRQ_ARGS)
|
||||
{
|
||||
struct drm_device *dev = (struct drm_device *)arg;
|
||||
struct vmw_private *dev_priv = vmw_priv(dev);
|
||||
uint32_t status;
|
||||
|
||||
spin_lock(&dev_priv->irq_lock);
|
||||
status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
|
||||
spin_unlock(&dev_priv->irq_lock);
|
||||
|
||||
if (status & SVGA_IRQFLAG_ANY_FENCE)
|
||||
wake_up_all(&dev_priv->fence_queue);
|
||||
if (status & SVGA_IRQFLAG_FIFO_PROGRESS)
|
||||
wake_up_all(&dev_priv->fifo_queue);
|
||||
|
||||
if (likely(status)) {
|
||||
outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
return IRQ_NONE;
|
||||
}
|
||||
|
||||
static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t sequence)
|
||||
{
|
||||
uint32_t busy;
|
||||
|
||||
mutex_lock(&dev_priv->hw_mutex);
|
||||
busy = vmw_read(dev_priv, SVGA_REG_BUSY);
|
||||
mutex_unlock(&dev_priv->hw_mutex);
|
||||
|
||||
return (busy == 0);
|
||||
}
|
||||
|
||||
|
||||
bool vmw_fence_signaled(struct vmw_private *dev_priv,
|
||||
uint32_t sequence)
|
||||
{
|
||||
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
|
||||
struct vmw_fifo_state *fifo_state;
|
||||
bool ret;
|
||||
|
||||
if (likely(dev_priv->last_read_sequence - sequence < VMW_FENCE_WRAP))
|
||||
return true;
|
||||
|
||||
dev_priv->last_read_sequence = ioread32(fifo_mem + SVGA_FIFO_FENCE);
|
||||
if (likely(dev_priv->last_read_sequence - sequence < VMW_FENCE_WRAP))
|
||||
return true;
|
||||
|
||||
fifo_state = &dev_priv->fifo;
|
||||
if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE) &&
|
||||
vmw_fifo_idle(dev_priv, sequence))
|
||||
return true;
|
||||
|
||||
/**
|
||||
* Below is to signal stale fences that have wrapped.
|
||||
* First, block fence submission.
|
||||
*/
|
||||
|
||||
down_read(&fifo_state->rwsem);
|
||||
|
||||
/**
|
||||
* Then check if the sequence is higher than what we've actually
|
||||
* emitted. Then the fence is stale and signaled.
|
||||
*/
|
||||
|
||||
ret = ((dev_priv->fence_seq - sequence) > VMW_FENCE_WRAP);
|
||||
up_read(&fifo_state->rwsem);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int vmw_fallback_wait(struct vmw_private *dev_priv,
|
||||
bool lazy,
|
||||
bool fifo_idle,
|
||||
uint32_t sequence,
|
||||
bool interruptible,
|
||||
unsigned long timeout)
|
||||
{
|
||||
struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
|
||||
|
||||
uint32_t count = 0;
|
||||
uint32_t signal_seq;
|
||||
int ret;
|
||||
unsigned long end_jiffies = jiffies + timeout;
|
||||
bool (*wait_condition)(struct vmw_private *, uint32_t);
|
||||
DEFINE_WAIT(__wait);
|
||||
|
||||
wait_condition = (fifo_idle) ? &vmw_fifo_idle :
|
||||
&vmw_fence_signaled;
|
||||
|
||||
/**
|
||||
* Block command submission while waiting for idle.
|
||||
*/
|
||||
|
||||
if (fifo_idle)
|
||||
down_read(&fifo_state->rwsem);
|
||||
signal_seq = dev_priv->fence_seq;
|
||||
ret = 0;
|
||||
|
||||
for (;;) {
|
||||
prepare_to_wait(&dev_priv->fence_queue, &__wait,
|
||||
(interruptible) ?
|
||||
TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
|
||||
if (wait_condition(dev_priv, sequence))
|
||||
break;
|
||||
if (time_after_eq(jiffies, end_jiffies)) {
|
||||
DRM_ERROR("SVGA device lockup.\n");
|
||||
break;
|
||||
}
|
||||
if (lazy)
|
||||
schedule_timeout(1);
|
||||
else if ((++count & 0x0F) == 0) {
|
||||
/**
|
||||
* FIXME: Use schedule_hr_timeout here for
|
||||
* newer kernels and lower CPU utilization.
|
||||
*/
|
||||
|
||||
__set_current_state(TASK_RUNNING);
|
||||
schedule();
|
||||
__set_current_state((interruptible) ?
|
||||
TASK_INTERRUPTIBLE :
|
||||
TASK_UNINTERRUPTIBLE);
|
||||
}
|
||||
if (interruptible && signal_pending(current)) {
|
||||
ret = -ERESTART;
|
||||
break;
|
||||
}
|
||||
}
|
||||
finish_wait(&dev_priv->fence_queue, &__wait);
|
||||
if (ret == 0 && fifo_idle) {
|
||||
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
|
||||
iowrite32(signal_seq, fifo_mem + SVGA_FIFO_FENCE);
|
||||
}
|
||||
wake_up_all(&dev_priv->fence_queue);
|
||||
if (fifo_idle)
|
||||
up_read(&fifo_state->rwsem);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int vmw_wait_fence(struct vmw_private *dev_priv,
|
||||
bool lazy, uint32_t sequence,
|
||||
bool interruptible, unsigned long timeout)
|
||||
{
|
||||
long ret;
|
||||
unsigned long irq_flags;
|
||||
struct vmw_fifo_state *fifo = &dev_priv->fifo;
|
||||
|
||||
if (likely(dev_priv->last_read_sequence - sequence < VMW_FENCE_WRAP))
|
||||
return 0;
|
||||
|
||||
if (likely(vmw_fence_signaled(dev_priv, sequence)))
|
||||
return 0;
|
||||
|
||||
vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
|
||||
|
||||
if (!(fifo->capabilities & SVGA_FIFO_CAP_FENCE))
|
||||
return vmw_fallback_wait(dev_priv, lazy, true, sequence,
|
||||
interruptible, timeout);
|
||||
|
||||
if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
|
||||
return vmw_fallback_wait(dev_priv, lazy, false, sequence,
|
||||
interruptible, timeout);
|
||||
|
||||
mutex_lock(&dev_priv->hw_mutex);
|
||||
if (atomic_add_return(1, &dev_priv->fence_queue_waiters) > 0) {
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
|
||||
outl(SVGA_IRQFLAG_ANY_FENCE,
|
||||
dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
|
||||
vmw_write(dev_priv, SVGA_REG_IRQMASK,
|
||||
vmw_read(dev_priv, SVGA_REG_IRQMASK) |
|
||||
SVGA_IRQFLAG_ANY_FENCE);
|
||||
spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
|
||||
}
|
||||
mutex_unlock(&dev_priv->hw_mutex);
|
||||
|
||||
if (interruptible)
|
||||
ret = wait_event_interruptible_timeout
|
||||
(dev_priv->fence_queue,
|
||||
vmw_fence_signaled(dev_priv, sequence),
|
||||
timeout);
|
||||
else
|
||||
ret = wait_event_timeout
|
||||
(dev_priv->fence_queue,
|
||||
vmw_fence_signaled(dev_priv, sequence),
|
||||
timeout);
|
||||
|
||||
if (unlikely(ret == -ERESTARTSYS))
|
||||
ret = -ERESTART;
|
||||
else if (unlikely(ret == 0))
|
||||
ret = -EBUSY;
|
||||
else if (likely(ret > 0))
|
||||
ret = 0;
|
||||
|
||||
mutex_lock(&dev_priv->hw_mutex);
|
||||
if (atomic_dec_and_test(&dev_priv->fence_queue_waiters)) {
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
|
||||
vmw_write(dev_priv, SVGA_REG_IRQMASK,
|
||||
vmw_read(dev_priv, SVGA_REG_IRQMASK) &
|
||||
~SVGA_IRQFLAG_ANY_FENCE);
|
||||
spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
|
||||
}
|
||||
mutex_unlock(&dev_priv->hw_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void vmw_irq_preinstall(struct drm_device *dev)
|
||||
{
|
||||
struct vmw_private *dev_priv = vmw_priv(dev);
|
||||
uint32_t status;
|
||||
|
||||
if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
|
||||
return;
|
||||
|
||||
spin_lock_init(&dev_priv->irq_lock);
|
||||
status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
|
||||
outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
|
||||
}
|
||||
|
||||
int vmw_irq_postinstall(struct drm_device *dev)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
void vmw_irq_uninstall(struct drm_device *dev)
|
||||
{
|
||||
struct vmw_private *dev_priv = vmw_priv(dev);
|
||||
uint32_t status;
|
||||
|
||||
if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
|
||||
return;
|
||||
|
||||
mutex_lock(&dev_priv->hw_mutex);
|
||||
vmw_write(dev_priv, SVGA_REG_IRQMASK, 0);
|
||||
mutex_unlock(&dev_priv->hw_mutex);
|
||||
|
||||
status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
|
||||
outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
|
||||
}
|
||||
|
||||
#define VMW_FENCE_WAIT_TIMEOUT 3*HZ;
|
||||
|
||||
int vmw_fence_wait_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
struct drm_vmw_fence_wait_arg *arg =
|
||||
(struct drm_vmw_fence_wait_arg *)data;
|
||||
unsigned long timeout;
|
||||
|
||||
if (!arg->cookie_valid) {
|
||||
arg->cookie_valid = 1;
|
||||
arg->kernel_cookie = jiffies + VMW_FENCE_WAIT_TIMEOUT;
|
||||
}
|
||||
|
||||
timeout = jiffies;
|
||||
if (time_after_eq(timeout, (unsigned long)arg->kernel_cookie))
|
||||
return -EBUSY;
|
||||
|
||||
timeout = (unsigned long)arg->kernel_cookie - timeout;
|
||||
return vmw_wait_fence(vmw_priv(dev), true, arg->sequence, true, timeout);
|
||||
}
|
|
@ -0,0 +1,872 @@
|
|||
/**************************************************************************
|
||||
*
|
||||
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
|
||||
#include "vmwgfx_kms.h"
|
||||
|
||||
/* Might need a hrtimer here? */
|
||||
#define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1)
|
||||
|
||||
|
||||
void vmw_display_unit_cleanup(struct vmw_display_unit *du)
|
||||
{
|
||||
if (du->cursor_surface)
|
||||
vmw_surface_unreference(&du->cursor_surface);
|
||||
if (du->cursor_dmabuf)
|
||||
vmw_dmabuf_unreference(&du->cursor_dmabuf);
|
||||
drm_crtc_cleanup(&du->crtc);
|
||||
drm_encoder_cleanup(&du->encoder);
|
||||
drm_connector_cleanup(&du->connector);
|
||||
}
|
||||
|
||||
/*
|
||||
* Display Unit Cursor functions
|
||||
*/
|
||||
|
||||
int vmw_cursor_update_image(struct vmw_private *dev_priv,
|
||||
u32 *image, u32 width, u32 height,
|
||||
u32 hotspotX, u32 hotspotY)
|
||||
{
|
||||
struct {
|
||||
u32 cmd;
|
||||
SVGAFifoCmdDefineAlphaCursor cursor;
|
||||
} *cmd;
|
||||
u32 image_size = width * height * 4;
|
||||
u32 cmd_size = sizeof(*cmd) + image_size;
|
||||
|
||||
if (!image)
|
||||
return -EINVAL;
|
||||
|
||||
cmd = vmw_fifo_reserve(dev_priv, cmd_size);
|
||||
if (unlikely(cmd == NULL)) {
|
||||
DRM_ERROR("Fifo reserve failed.\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
memset(cmd, 0, sizeof(*cmd));
|
||||
|
||||
memcpy(&cmd[1], image, image_size);
|
||||
|
||||
cmd->cmd = cpu_to_le32(SVGA_CMD_DEFINE_ALPHA_CURSOR);
|
||||
cmd->cursor.id = cpu_to_le32(0);
|
||||
cmd->cursor.width = cpu_to_le32(width);
|
||||
cmd->cursor.height = cpu_to_le32(height);
|
||||
cmd->cursor.hotspotX = cpu_to_le32(hotspotX);
|
||||
cmd->cursor.hotspotY = cpu_to_le32(hotspotY);
|
||||
|
||||
vmw_fifo_commit(dev_priv, cmd_size);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void vmw_cursor_update_position(struct vmw_private *dev_priv,
|
||||
bool show, int x, int y)
|
||||
{
|
||||
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
|
||||
uint32_t count;
|
||||
|
||||
iowrite32(show ? 1 : 0, fifo_mem + SVGA_FIFO_CURSOR_ON);
|
||||
iowrite32(x, fifo_mem + SVGA_FIFO_CURSOR_X);
|
||||
iowrite32(y, fifo_mem + SVGA_FIFO_CURSOR_Y);
|
||||
count = ioread32(fifo_mem + SVGA_FIFO_CURSOR_COUNT);
|
||||
iowrite32(++count, fifo_mem + SVGA_FIFO_CURSOR_COUNT);
|
||||
}
|
||||
|
||||
int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
|
||||
uint32_t handle, uint32_t width, uint32_t height)
|
||||
{
|
||||
struct vmw_private *dev_priv = vmw_priv(crtc->dev);
|
||||
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
|
||||
struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
|
||||
struct vmw_surface *surface = NULL;
|
||||
struct vmw_dma_buffer *dmabuf = NULL;
|
||||
int ret;
|
||||
|
||||
if (handle) {
|
||||
ret = vmw_user_surface_lookup(dev_priv, tfile,
|
||||
handle, &surface);
|
||||
if (!ret) {
|
||||
if (!surface->snooper.image) {
|
||||
DRM_ERROR("surface not suitable for cursor\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
} else {
|
||||
ret = vmw_user_dmabuf_lookup(tfile,
|
||||
handle, &dmabuf);
|
||||
if (ret) {
|
||||
DRM_ERROR("failed to find surface or dmabuf: %i\n", ret);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* takedown old cursor */
|
||||
if (du->cursor_surface) {
|
||||
du->cursor_surface->snooper.crtc = NULL;
|
||||
vmw_surface_unreference(&du->cursor_surface);
|
||||
}
|
||||
if (du->cursor_dmabuf)
|
||||
vmw_dmabuf_unreference(&du->cursor_dmabuf);
|
||||
|
||||
/* setup new image */
|
||||
if (surface) {
|
||||
/* vmw_user_surface_lookup takes one reference */
|
||||
du->cursor_surface = surface;
|
||||
|
||||
du->cursor_surface->snooper.crtc = crtc;
|
||||
du->cursor_age = du->cursor_surface->snooper.age;
|
||||
vmw_cursor_update_image(dev_priv, surface->snooper.image,
|
||||
64, 64, du->hotspot_x, du->hotspot_y);
|
||||
} else if (dmabuf) {
|
||||
struct ttm_bo_kmap_obj map;
|
||||
unsigned long kmap_offset;
|
||||
unsigned long kmap_num;
|
||||
void *virtual;
|
||||
bool dummy;
|
||||
|
||||
/* vmw_user_surface_lookup takes one reference */
|
||||
du->cursor_dmabuf = dmabuf;
|
||||
|
||||
kmap_offset = 0;
|
||||
kmap_num = (64*64*4) >> PAGE_SHIFT;
|
||||
|
||||
ret = ttm_bo_reserve(&dmabuf->base, true, false, false, 0);
|
||||
if (unlikely(ret != 0)) {
|
||||
DRM_ERROR("reserve failed\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = ttm_bo_kmap(&dmabuf->base, kmap_offset, kmap_num, &map);
|
||||
if (unlikely(ret != 0))
|
||||
goto err_unreserve;
|
||||
|
||||
virtual = ttm_kmap_obj_virtual(&map, &dummy);
|
||||
vmw_cursor_update_image(dev_priv, virtual, 64, 64,
|
||||
du->hotspot_x, du->hotspot_y);
|
||||
|
||||
ttm_bo_kunmap(&map);
|
||||
err_unreserve:
|
||||
ttm_bo_unreserve(&dmabuf->base);
|
||||
|
||||
} else {
|
||||
vmw_cursor_update_position(dev_priv, false, 0, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
vmw_cursor_update_position(dev_priv, true, du->cursor_x, du->cursor_y);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
|
||||
{
|
||||
struct vmw_private *dev_priv = vmw_priv(crtc->dev);
|
||||
struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
|
||||
bool shown = du->cursor_surface || du->cursor_dmabuf ? true : false;
|
||||
|
||||
du->cursor_x = x + crtc->x;
|
||||
du->cursor_y = y + crtc->y;
|
||||
|
||||
vmw_cursor_update_position(dev_priv, shown,
|
||||
du->cursor_x, du->cursor_y);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void vmw_kms_cursor_snoop(struct vmw_surface *srf,
|
||||
struct ttm_object_file *tfile,
|
||||
struct ttm_buffer_object *bo,
|
||||
SVGA3dCmdHeader *header)
|
||||
{
|
||||
struct ttm_bo_kmap_obj map;
|
||||
unsigned long kmap_offset;
|
||||
unsigned long kmap_num;
|
||||
SVGA3dCopyBox *box;
|
||||
unsigned box_count;
|
||||
void *virtual;
|
||||
bool dummy;
|
||||
struct vmw_dma_cmd {
|
||||
SVGA3dCmdHeader header;
|
||||
SVGA3dCmdSurfaceDMA dma;
|
||||
} *cmd;
|
||||
int ret;
|
||||
|
||||
cmd = container_of(header, struct vmw_dma_cmd, header);
|
||||
|
||||
/* No snooper installed */
|
||||
if (!srf->snooper.image)
|
||||
return;
|
||||
|
||||
if (cmd->dma.host.face != 0 || cmd->dma.host.mipmap != 0) {
|
||||
DRM_ERROR("face and mipmap for cursors should never != 0\n");
|
||||
return;
|
||||
}
|
||||
|
||||
if (cmd->header.size < 64) {
|
||||
DRM_ERROR("at least one full copy box must be given\n");
|
||||
return;
|
||||
}
|
||||
|
||||
box = (SVGA3dCopyBox *)&cmd[1];
|
||||
box_count = (cmd->header.size - sizeof(SVGA3dCmdSurfaceDMA)) /
|
||||
sizeof(SVGA3dCopyBox);
|
||||
|
||||
if (cmd->dma.guest.pitch != (64 * 4) ||
|
||||
cmd->dma.guest.ptr.offset % PAGE_SIZE ||
|
||||
box->x != 0 || box->y != 0 || box->z != 0 ||
|
||||
box->srcx != 0 || box->srcy != 0 || box->srcz != 0 ||
|
||||
box->w != 64 || box->h != 64 || box->d != 1 ||
|
||||
box_count != 1) {
|
||||
/* TODO handle none page aligned offsets */
|
||||
/* TODO handle partial uploads and pitch != 256 */
|
||||
/* TODO handle more then one copy (size != 64) */
|
||||
DRM_ERROR("lazy programer, cant handle wierd stuff\n");
|
||||
return;
|
||||
}
|
||||
|
||||
kmap_offset = cmd->dma.guest.ptr.offset >> PAGE_SHIFT;
|
||||
kmap_num = (64*64*4) >> PAGE_SHIFT;
|
||||
|
||||
ret = ttm_bo_reserve(bo, true, false, false, 0);
|
||||
if (unlikely(ret != 0)) {
|
||||
DRM_ERROR("reserve failed\n");
|
||||
return;
|
||||
}
|
||||
|
||||
ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
|
||||
if (unlikely(ret != 0))
|
||||
goto err_unreserve;
|
||||
|
||||
virtual = ttm_kmap_obj_virtual(&map, &dummy);
|
||||
|
||||
memcpy(srf->snooper.image, virtual, 64*64*4);
|
||||
srf->snooper.age++;
|
||||
|
||||
/* we can't call this function from this function since execbuf has
|
||||
* reserved fifo space.
|
||||
*
|
||||
* if (srf->snooper.crtc)
|
||||
* vmw_ldu_crtc_cursor_update_image(dev_priv,
|
||||
* srf->snooper.image, 64, 64,
|
||||
* du->hotspot_x, du->hotspot_y);
|
||||
*/
|
||||
|
||||
ttm_bo_kunmap(&map);
|
||||
err_unreserve:
|
||||
ttm_bo_unreserve(bo);
|
||||
}
|
||||
|
||||
void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct vmw_display_unit *du;
|
||||
struct drm_crtc *crtc;
|
||||
|
||||
mutex_lock(&dev->mode_config.mutex);
|
||||
|
||||
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
|
||||
du = vmw_crtc_to_du(crtc);
|
||||
if (!du->cursor_surface ||
|
||||
du->cursor_age == du->cursor_surface->snooper.age)
|
||||
continue;
|
||||
|
||||
du->cursor_age = du->cursor_surface->snooper.age;
|
||||
vmw_cursor_update_image(dev_priv,
|
||||
du->cursor_surface->snooper.image,
|
||||
64, 64, du->hotspot_x, du->hotspot_y);
|
||||
}
|
||||
|
||||
mutex_unlock(&dev->mode_config.mutex);
|
||||
}
|
||||
|
||||
/*
|
||||
* Generic framebuffer code
|
||||
*/
|
||||
|
||||
int vmw_framebuffer_create_handle(struct drm_framebuffer *fb,
|
||||
struct drm_file *file_priv,
|
||||
unsigned int *handle)
|
||||
{
|
||||
if (handle)
|
||||
handle = 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Surface framebuffer code
|
||||
*/
|
||||
|
||||
#define vmw_framebuffer_to_vfbs(x) \
|
||||
container_of(x, struct vmw_framebuffer_surface, base.base)
|
||||
|
||||
struct vmw_framebuffer_surface {
|
||||
struct vmw_framebuffer base;
|
||||
struct vmw_surface *surface;
|
||||
struct delayed_work d_work;
|
||||
struct mutex work_lock;
|
||||
bool present_fs;
|
||||
};
|
||||
|
||||
void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
|
||||
{
|
||||
struct vmw_framebuffer_surface *vfb =
|
||||
vmw_framebuffer_to_vfbs(framebuffer);
|
||||
|
||||
cancel_delayed_work_sync(&vfb->d_work);
|
||||
drm_framebuffer_cleanup(framebuffer);
|
||||
vmw_surface_unreference(&vfb->surface);
|
||||
|
||||
kfree(framebuffer);
|
||||
}
|
||||
|
||||
static void vmw_framebuffer_present_fs_callback(struct work_struct *work)
|
||||
{
|
||||
struct delayed_work *d_work =
|
||||
container_of(work, struct delayed_work, work);
|
||||
struct vmw_framebuffer_surface *vfbs =
|
||||
container_of(d_work, struct vmw_framebuffer_surface, d_work);
|
||||
struct vmw_surface *surf = vfbs->surface;
|
||||
struct drm_framebuffer *framebuffer = &vfbs->base.base;
|
||||
struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
|
||||
|
||||
struct {
|
||||
SVGA3dCmdHeader header;
|
||||
SVGA3dCmdPresent body;
|
||||
SVGA3dCopyRect cr;
|
||||
} *cmd;
|
||||
|
||||
mutex_lock(&vfbs->work_lock);
|
||||
if (!vfbs->present_fs)
|
||||
goto out_unlock;
|
||||
|
||||
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
|
||||
if (unlikely(cmd == NULL))
|
||||
goto out_resched;
|
||||
|
||||
cmd->header.id = cpu_to_le32(SVGA_3D_CMD_PRESENT);
|
||||
cmd->header.size = cpu_to_le32(sizeof(cmd->body) + sizeof(cmd->cr));
|
||||
cmd->body.sid = cpu_to_le32(surf->res.id);
|
||||
cmd->cr.x = cpu_to_le32(0);
|
||||
cmd->cr.y = cpu_to_le32(0);
|
||||
cmd->cr.srcx = cmd->cr.x;
|
||||
cmd->cr.srcy = cmd->cr.y;
|
||||
cmd->cr.w = cpu_to_le32(framebuffer->width);
|
||||
cmd->cr.h = cpu_to_le32(framebuffer->height);
|
||||
vfbs->present_fs = false;
|
||||
vmw_fifo_commit(dev_priv, sizeof(*cmd));
|
||||
out_resched:
|
||||
/**
|
||||
* Will not re-add if already pending.
|
||||
*/
|
||||
schedule_delayed_work(&vfbs->d_work, VMWGFX_PRESENT_RATE);
|
||||
out_unlock:
|
||||
mutex_unlock(&vfbs->work_lock);
|
||||
}
|
||||
|
||||
|
||||
int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
|
||||
unsigned flags, unsigned color,
|
||||
struct drm_clip_rect *clips,
|
||||
unsigned num_clips)
|
||||
{
|
||||
struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
|
||||
struct vmw_framebuffer_surface *vfbs =
|
||||
vmw_framebuffer_to_vfbs(framebuffer);
|
||||
struct vmw_surface *surf = vfbs->surface;
|
||||
struct drm_clip_rect norect;
|
||||
SVGA3dCopyRect *cr;
|
||||
int i, inc = 1;
|
||||
|
||||
struct {
|
||||
SVGA3dCmdHeader header;
|
||||
SVGA3dCmdPresent body;
|
||||
SVGA3dCopyRect cr;
|
||||
} *cmd;
|
||||
|
||||
if (!num_clips ||
|
||||
!(dev_priv->fifo.capabilities &
|
||||
SVGA_FIFO_CAP_SCREEN_OBJECT)) {
|
||||
int ret;
|
||||
|
||||
mutex_lock(&vfbs->work_lock);
|
||||
vfbs->present_fs = true;
|
||||
ret = schedule_delayed_work(&vfbs->d_work, VMWGFX_PRESENT_RATE);
|
||||
mutex_unlock(&vfbs->work_lock);
|
||||
if (ret) {
|
||||
/**
|
||||
* No work pending, Force immediate present.
|
||||
*/
|
||||
vmw_framebuffer_present_fs_callback(&vfbs->d_work.work);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!num_clips) {
|
||||
num_clips = 1;
|
||||
clips = &norect;
|
||||
norect.x1 = norect.y1 = 0;
|
||||
norect.x2 = framebuffer->width;
|
||||
norect.y2 = framebuffer->height;
|
||||
} else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) {
|
||||
num_clips /= 2;
|
||||
inc = 2; /* skip source rects */
|
||||
}
|
||||
|
||||
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd) + (num_clips - 1) * sizeof(cmd->cr));
|
||||
if (unlikely(cmd == NULL)) {
|
||||
DRM_ERROR("Fifo reserve failed.\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
memset(cmd, 0, sizeof(*cmd));
|
||||
|
||||
cmd->header.id = cpu_to_le32(SVGA_3D_CMD_PRESENT);
|
||||
cmd->header.size = cpu_to_le32(sizeof(cmd->body) + num_clips * sizeof(cmd->cr));
|
||||
cmd->body.sid = cpu_to_le32(surf->res.id);
|
||||
|
||||
for (i = 0, cr = &cmd->cr; i < num_clips; i++, cr++, clips += inc) {
|
||||
cr->x = cpu_to_le16(clips->x1);
|
||||
cr->y = cpu_to_le16(clips->y1);
|
||||
cr->srcx = cr->x;
|
||||
cr->srcy = cr->y;
|
||||
cr->w = cpu_to_le16(clips->x2 - clips->x1);
|
||||
cr->h = cpu_to_le16(clips->y2 - clips->y1);
|
||||
}
|
||||
|
||||
vmw_fifo_commit(dev_priv, sizeof(*cmd) + (num_clips - 1) * sizeof(cmd->cr));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = {
|
||||
.destroy = vmw_framebuffer_surface_destroy,
|
||||
.dirty = vmw_framebuffer_surface_dirty,
|
||||
.create_handle = vmw_framebuffer_create_handle,
|
||||
};
|
||||
|
||||
int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
|
||||
struct vmw_surface *surface,
|
||||
struct vmw_framebuffer **out,
|
||||
unsigned width, unsigned height)
|
||||
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct vmw_framebuffer_surface *vfbs;
|
||||
int ret;
|
||||
|
||||
vfbs = kzalloc(sizeof(*vfbs), GFP_KERNEL);
|
||||
if (!vfbs) {
|
||||
ret = -ENOMEM;
|
||||
goto out_err1;
|
||||
}
|
||||
|
||||
ret = drm_framebuffer_init(dev, &vfbs->base.base,
|
||||
&vmw_framebuffer_surface_funcs);
|
||||
if (ret)
|
||||
goto out_err2;
|
||||
|
||||
if (!vmw_surface_reference(surface)) {
|
||||
DRM_ERROR("failed to reference surface %p\n", surface);
|
||||
goto out_err3;
|
||||
}
|
||||
|
||||
/* XXX get the first 3 from the surface info */
|
||||
vfbs->base.base.bits_per_pixel = 32;
|
||||
vfbs->base.base.pitch = width * 32 / 4;
|
||||
vfbs->base.base.depth = 24;
|
||||
vfbs->base.base.width = width;
|
||||
vfbs->base.base.height = height;
|
||||
vfbs->base.pin = NULL;
|
||||
vfbs->base.unpin = NULL;
|
||||
vfbs->surface = surface;
|
||||
mutex_init(&vfbs->work_lock);
|
||||
INIT_DELAYED_WORK(&vfbs->d_work, &vmw_framebuffer_present_fs_callback);
|
||||
*out = &vfbs->base;
|
||||
|
||||
return 0;
|
||||
|
||||
out_err3:
|
||||
drm_framebuffer_cleanup(&vfbs->base.base);
|
||||
out_err2:
|
||||
kfree(vfbs);
|
||||
out_err1:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Dmabuf framebuffer code
|
||||
*/
|
||||
|
||||
#define vmw_framebuffer_to_vfbd(x) \
|
||||
container_of(x, struct vmw_framebuffer_dmabuf, base.base)
|
||||
|
||||
struct vmw_framebuffer_dmabuf {
|
||||
struct vmw_framebuffer base;
|
||||
struct vmw_dma_buffer *buffer;
|
||||
};
|
||||
|
||||
void vmw_framebuffer_dmabuf_destroy(struct drm_framebuffer *framebuffer)
|
||||
{
|
||||
struct vmw_framebuffer_dmabuf *vfbd =
|
||||
vmw_framebuffer_to_vfbd(framebuffer);
|
||||
|
||||
drm_framebuffer_cleanup(framebuffer);
|
||||
vmw_dmabuf_unreference(&vfbd->buffer);
|
||||
|
||||
kfree(vfbd);
|
||||
}
|
||||
|
||||
int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
|
||||
unsigned flags, unsigned color,
|
||||
struct drm_clip_rect *clips,
|
||||
unsigned num_clips)
|
||||
{
|
||||
struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
|
||||
struct drm_clip_rect norect;
|
||||
struct {
|
||||
uint32_t header;
|
||||
SVGAFifoCmdUpdate body;
|
||||
} *cmd;
|
||||
int i, increment = 1;
|
||||
|
||||
if (!num_clips ||
|
||||
!(dev_priv->fifo.capabilities &
|
||||
SVGA_FIFO_CAP_SCREEN_OBJECT)) {
|
||||
num_clips = 1;
|
||||
clips = &norect;
|
||||
norect.x1 = norect.y1 = 0;
|
||||
norect.x2 = framebuffer->width;
|
||||
norect.y2 = framebuffer->height;
|
||||
} else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) {
|
||||
num_clips /= 2;
|
||||
increment = 2;
|
||||
}
|
||||
|
||||
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd) * num_clips);
|
||||
if (unlikely(cmd == NULL)) {
|
||||
DRM_ERROR("Fifo reserve failed.\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
for (i = 0; i < num_clips; i++, clips += increment) {
|
||||
cmd[i].header = cpu_to_le32(SVGA_CMD_UPDATE);
|
||||
cmd[i].body.x = cpu_to_le32(clips[i].x1);
|
||||
cmd[i].body.y = cpu_to_le32(clips[i].y1);
|
||||
cmd[i].body.width = cpu_to_le32(clips[i].x2 - clips[i].x1);
|
||||
cmd[i].body.height = cpu_to_le32(clips[i].y2 - clips[i].y1);
|
||||
}
|
||||
|
||||
vmw_fifo_commit(dev_priv, sizeof(*cmd) * num_clips);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct drm_framebuffer_funcs vmw_framebuffer_dmabuf_funcs = {
|
||||
.destroy = vmw_framebuffer_dmabuf_destroy,
|
||||
.dirty = vmw_framebuffer_dmabuf_dirty,
|
||||
.create_handle = vmw_framebuffer_create_handle,
|
||||
};
|
||||
|
||||
static int vmw_framebuffer_dmabuf_pin(struct vmw_framebuffer *vfb)
|
||||
{
|
||||
struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
|
||||
struct vmw_framebuffer_dmabuf *vfbd =
|
||||
vmw_framebuffer_to_vfbd(&vfb->base);
|
||||
int ret;
|
||||
|
||||
vmw_overlay_pause_all(dev_priv);
|
||||
|
||||
ret = vmw_dmabuf_to_start_of_vram(dev_priv, vfbd->buffer);
|
||||
|
||||
if (dev_priv->capabilities & SVGA_CAP_MULTIMON) {
|
||||
vmw_write(dev_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1);
|
||||
vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, 0);
|
||||
vmw_write(dev_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true);
|
||||
vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_X, 0);
|
||||
vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_Y, 0);
|
||||
vmw_write(dev_priv, SVGA_REG_DISPLAY_WIDTH, 0);
|
||||
vmw_write(dev_priv, SVGA_REG_DISPLAY_HEIGHT, 0);
|
||||
vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
|
||||
|
||||
vmw_write(dev_priv, SVGA_REG_ENABLE, 1);
|
||||
vmw_write(dev_priv, SVGA_REG_WIDTH, vfb->base.width);
|
||||
vmw_write(dev_priv, SVGA_REG_HEIGHT, vfb->base.height);
|
||||
vmw_write(dev_priv, SVGA_REG_BITS_PER_PIXEL, vfb->base.bits_per_pixel);
|
||||
vmw_write(dev_priv, SVGA_REG_DEPTH, vfb->base.depth);
|
||||
vmw_write(dev_priv, SVGA_REG_RED_MASK, 0x00ff0000);
|
||||
vmw_write(dev_priv, SVGA_REG_GREEN_MASK, 0x0000ff00);
|
||||
vmw_write(dev_priv, SVGA_REG_BLUE_MASK, 0x000000ff);
|
||||
} else
|
||||
WARN_ON(true);
|
||||
|
||||
vmw_overlay_resume_all(dev_priv);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vmw_framebuffer_dmabuf_unpin(struct vmw_framebuffer *vfb)
|
||||
{
|
||||
struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
|
||||
struct vmw_framebuffer_dmabuf *vfbd =
|
||||
vmw_framebuffer_to_vfbd(&vfb->base);
|
||||
|
||||
if (!vfbd->buffer) {
|
||||
WARN_ON(!vfbd->buffer);
|
||||
return 0;
|
||||
}
|
||||
|
||||
return vmw_dmabuf_from_vram(dev_priv, vfbd->buffer);
|
||||
}
|
||||
|
||||
int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
|
||||
struct vmw_dma_buffer *dmabuf,
|
||||
struct vmw_framebuffer **out,
|
||||
unsigned width, unsigned height)
|
||||
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct vmw_framebuffer_dmabuf *vfbd;
|
||||
int ret;
|
||||
|
||||
vfbd = kzalloc(sizeof(*vfbd), GFP_KERNEL);
|
||||
if (!vfbd) {
|
||||
ret = -ENOMEM;
|
||||
goto out_err1;
|
||||
}
|
||||
|
||||
ret = drm_framebuffer_init(dev, &vfbd->base.base,
|
||||
&vmw_framebuffer_dmabuf_funcs);
|
||||
if (ret)
|
||||
goto out_err2;
|
||||
|
||||
if (!vmw_dmabuf_reference(dmabuf)) {
|
||||
DRM_ERROR("failed to reference dmabuf %p\n", dmabuf);
|
||||
goto out_err3;
|
||||
}
|
||||
|
||||
/* XXX get the first 3 from the surface info */
|
||||
vfbd->base.base.bits_per_pixel = 32;
|
||||
vfbd->base.base.pitch = width * 32 / 4;
|
||||
vfbd->base.base.depth = 24;
|
||||
vfbd->base.base.width = width;
|
||||
vfbd->base.base.height = height;
|
||||
vfbd->base.pin = vmw_framebuffer_dmabuf_pin;
|
||||
vfbd->base.unpin = vmw_framebuffer_dmabuf_unpin;
|
||||
vfbd->buffer = dmabuf;
|
||||
*out = &vfbd->base;
|
||||
|
||||
return 0;
|
||||
|
||||
out_err3:
|
||||
drm_framebuffer_cleanup(&vfbd->base.base);
|
||||
out_err2:
|
||||
kfree(vfbd);
|
||||
out_err1:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Generic Kernel modesetting functions
|
||||
*/
|
||||
|
||||
static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
|
||||
struct drm_file *file_priv,
|
||||
struct drm_mode_fb_cmd *mode_cmd)
|
||||
{
|
||||
struct vmw_private *dev_priv = vmw_priv(dev);
|
||||
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
|
||||
struct vmw_framebuffer *vfb = NULL;
|
||||
struct vmw_surface *surface = NULL;
|
||||
struct vmw_dma_buffer *bo = NULL;
|
||||
int ret;
|
||||
|
||||
ret = vmw_user_surface_lookup(dev_priv, tfile,
|
||||
mode_cmd->handle, &surface);
|
||||
if (ret)
|
||||
goto try_dmabuf;
|
||||
|
||||
ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, &vfb,
|
||||
mode_cmd->width, mode_cmd->height);
|
||||
|
||||
/* vmw_user_surface_lookup takes one ref so does new_fb */
|
||||
vmw_surface_unreference(&surface);
|
||||
|
||||
if (ret) {
|
||||
DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
|
||||
return NULL;
|
||||
}
|
||||
return &vfb->base;
|
||||
|
||||
try_dmabuf:
|
||||
DRM_INFO("%s: trying buffer\n", __func__);
|
||||
|
||||
ret = vmw_user_dmabuf_lookup(tfile, mode_cmd->handle, &bo);
|
||||
if (ret) {
|
||||
DRM_ERROR("failed to find buffer: %i\n", ret);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, bo, &vfb,
|
||||
mode_cmd->width, mode_cmd->height);
|
||||
|
||||
/* vmw_user_dmabuf_lookup takes one ref so does new_fb */
|
||||
vmw_dmabuf_unreference(&bo);
|
||||
|
||||
if (ret) {
|
||||
DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return &vfb->base;
|
||||
}
|
||||
|
||||
static int vmw_kms_fb_changed(struct drm_device *dev)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct drm_mode_config_funcs vmw_kms_funcs = {
|
||||
.fb_create = vmw_kms_fb_create,
|
||||
.fb_changed = vmw_kms_fb_changed,
|
||||
};
|
||||
|
||||
int vmw_kms_init(struct vmw_private *dev_priv)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
int ret;
|
||||
|
||||
drm_mode_config_init(dev);
|
||||
dev->mode_config.funcs = &vmw_kms_funcs;
|
||||
dev->mode_config.min_width = 640;
|
||||
dev->mode_config.min_height = 480;
|
||||
dev->mode_config.max_width = 2048;
|
||||
dev->mode_config.max_height = 2048;
|
||||
|
||||
ret = vmw_kms_init_legacy_display_system(dev_priv);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int vmw_kms_close(struct vmw_private *dev_priv)
|
||||
{
|
||||
/*
|
||||
* Docs says we should take the lock before calling this function
|
||||
* but since it destroys encoders and our destructor calls
|
||||
* drm_encoder_cleanup which takes the lock we deadlock.
|
||||
*/
|
||||
drm_mode_config_cleanup(dev_priv->dev);
|
||||
vmw_kms_close_legacy_display_system(dev_priv);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
struct drm_vmw_cursor_bypass_arg *arg = data;
|
||||
struct vmw_display_unit *du;
|
||||
struct drm_mode_object *obj;
|
||||
struct drm_crtc *crtc;
|
||||
int ret = 0;
|
||||
|
||||
|
||||
mutex_lock(&dev->mode_config.mutex);
|
||||
if (arg->flags & DRM_VMW_CURSOR_BYPASS_ALL) {
|
||||
|
||||
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
|
||||
du = vmw_crtc_to_du(crtc);
|
||||
du->hotspot_x = arg->xhot;
|
||||
du->hotspot_y = arg->yhot;
|
||||
}
|
||||
|
||||
mutex_unlock(&dev->mode_config.mutex);
|
||||
return 0;
|
||||
}
|
||||
|
||||
obj = drm_mode_object_find(dev, arg->crtc_id, DRM_MODE_OBJECT_CRTC);
|
||||
if (!obj) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
crtc = obj_to_crtc(obj);
|
||||
du = vmw_crtc_to_du(crtc);
|
||||
|
||||
du->hotspot_x = arg->xhot;
|
||||
du->hotspot_y = arg->yhot;
|
||||
|
||||
out:
|
||||
mutex_unlock(&dev->mode_config.mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int vmw_kms_save_vga(struct vmw_private *vmw_priv)
|
||||
{
|
||||
/*
|
||||
* setup a single multimon monitor with the size
|
||||
* of 0x0, this stops the UI from resizing when we
|
||||
* change the framebuffer size
|
||||
*/
|
||||
if (vmw_priv->capabilities & SVGA_CAP_MULTIMON) {
|
||||
vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1);
|
||||
vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, 0);
|
||||
vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true);
|
||||
vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, 0);
|
||||
vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, 0);
|
||||
vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, 0);
|
||||
vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, 0);
|
||||
vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
|
||||
}
|
||||
|
||||
vmw_priv->vga_width = vmw_read(vmw_priv, SVGA_REG_WIDTH);
|
||||
vmw_priv->vga_height = vmw_read(vmw_priv, SVGA_REG_HEIGHT);
|
||||
vmw_priv->vga_bpp = vmw_read(vmw_priv, SVGA_REG_BITS_PER_PIXEL);
|
||||
vmw_priv->vga_depth = vmw_read(vmw_priv, SVGA_REG_DEPTH);
|
||||
vmw_priv->vga_pseudo = vmw_read(vmw_priv, SVGA_REG_PSEUDOCOLOR);
|
||||
vmw_priv->vga_red_mask = vmw_read(vmw_priv, SVGA_REG_RED_MASK);
|
||||
vmw_priv->vga_green_mask = vmw_read(vmw_priv, SVGA_REG_GREEN_MASK);
|
||||
vmw_priv->vga_blue_mask = vmw_read(vmw_priv, SVGA_REG_BLUE_MASK);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int vmw_kms_restore_vga(struct vmw_private *vmw_priv)
|
||||
{
|
||||
vmw_write(vmw_priv, SVGA_REG_WIDTH, vmw_priv->vga_width);
|
||||
vmw_write(vmw_priv, SVGA_REG_HEIGHT, vmw_priv->vga_height);
|
||||
vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, vmw_priv->vga_bpp);
|
||||
vmw_write(vmw_priv, SVGA_REG_DEPTH, vmw_priv->vga_depth);
|
||||
vmw_write(vmw_priv, SVGA_REG_PSEUDOCOLOR, vmw_priv->vga_pseudo);
|
||||
vmw_write(vmw_priv, SVGA_REG_RED_MASK, vmw_priv->vga_red_mask);
|
||||
vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, vmw_priv->vga_green_mask);
|
||||
vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, vmw_priv->vga_blue_mask);
|
||||
|
||||
/* TODO check for multimon */
|
||||
vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 0);
|
||||
|
||||
return 0;
|
||||
}
|
|
@ -0,0 +1,102 @@
|
|||
/**************************************************************************
|
||||
*
|
||||
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
|
||||
#ifndef VMWGFX_KMS_H_
|
||||
#define VMWGFX_KMS_H_
|
||||
|
||||
#include "drmP.h"
|
||||
#include "vmwgfx_drv.h"
|
||||
|
||||
|
||||
#define vmw_framebuffer_to_vfb(x) \
|
||||
container_of(x, struct vmw_framebuffer, base)
|
||||
|
||||
/**
|
||||
* Base class for framebuffers
|
||||
*
|
||||
* @pin is called the when ever a crtc uses this framebuffer
|
||||
* @unpin is called
|
||||
*/
|
||||
struct vmw_framebuffer {
|
||||
struct drm_framebuffer base;
|
||||
int (*pin)(struct vmw_framebuffer *fb);
|
||||
int (*unpin)(struct vmw_framebuffer *fb);
|
||||
};
|
||||
|
||||
|
||||
#define vmw_crtc_to_du(x) \
|
||||
container_of(x, struct vmw_display_unit, crtc)
|
||||
|
||||
/*
|
||||
* Basic cursor manipulation
|
||||
*/
|
||||
int vmw_cursor_update_image(struct vmw_private *dev_priv,
|
||||
u32 *image, u32 width, u32 height,
|
||||
u32 hotspotX, u32 hotspotY);
|
||||
void vmw_cursor_update_position(struct vmw_private *dev_priv,
|
||||
bool show, int x, int y);
|
||||
|
||||
/**
|
||||
* Base class display unit.
|
||||
*
|
||||
* Since the SVGA hw doesn't have a concept of a crtc, encoder or connector
|
||||
* so the display unit is all of them at the same time. This is true for both
|
||||
* legacy multimon and screen objects.
|
||||
*/
|
||||
struct vmw_display_unit {
|
||||
struct drm_crtc crtc;
|
||||
struct drm_encoder encoder;
|
||||
struct drm_connector connector;
|
||||
|
||||
struct vmw_surface *cursor_surface;
|
||||
struct vmw_dma_buffer *cursor_dmabuf;
|
||||
size_t cursor_age;
|
||||
|
||||
int cursor_x;
|
||||
int cursor_y;
|
||||
|
||||
int hotspot_x;
|
||||
int hotspot_y;
|
||||
|
||||
unsigned unit;
|
||||
};
|
||||
|
||||
/*
|
||||
* Shared display unit functions - vmwgfx_kms.c
|
||||
*/
|
||||
void vmw_display_unit_cleanup(struct vmw_display_unit *du);
|
||||
int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
|
||||
uint32_t handle, uint32_t width, uint32_t height);
|
||||
int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y);
|
||||
|
||||
/*
|
||||
* Legacy display unit functions - vmwgfx_ldu.h
|
||||
*/
|
||||
int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv);
|
||||
int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv);
|
||||
|
||||
#endif
|
|
@ -0,0 +1,516 @@
|
|||
/**************************************************************************
|
||||
*
|
||||
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
|
||||
#include "vmwgfx_kms.h"
|
||||
|
||||
#define vmw_crtc_to_ldu(x) \
|
||||
container_of(x, struct vmw_legacy_display_unit, base.crtc)
|
||||
#define vmw_encoder_to_ldu(x) \
|
||||
container_of(x, struct vmw_legacy_display_unit, base.encoder)
|
||||
#define vmw_connector_to_ldu(x) \
|
||||
container_of(x, struct vmw_legacy_display_unit, base.connector)
|
||||
|
||||
struct vmw_legacy_display {
|
||||
struct list_head active;
|
||||
|
||||
unsigned num_active;
|
||||
|
||||
struct vmw_framebuffer *fb;
|
||||
};
|
||||
|
||||
/**
|
||||
* Display unit using the legacy register interface.
|
||||
*/
|
||||
struct vmw_legacy_display_unit {
|
||||
struct vmw_display_unit base;
|
||||
|
||||
struct list_head active;
|
||||
|
||||
unsigned unit;
|
||||
};
|
||||
|
||||
static void vmw_ldu_destroy(struct vmw_legacy_display_unit *ldu)
|
||||
{
|
||||
list_del_init(&ldu->active);
|
||||
vmw_display_unit_cleanup(&ldu->base);
|
||||
kfree(ldu);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Legacy Display Unit CRTC functions
|
||||
*/
|
||||
|
||||
static void vmw_ldu_crtc_save(struct drm_crtc *crtc)
|
||||
{
|
||||
}
|
||||
|
||||
static void vmw_ldu_crtc_restore(struct drm_crtc *crtc)
|
||||
{
|
||||
}
|
||||
|
||||
static void vmw_ldu_crtc_gamma_set(struct drm_crtc *crtc,
|
||||
u16 *r, u16 *g, u16 *b,
|
||||
uint32_t size)
|
||||
{
|
||||
}
|
||||
|
||||
static void vmw_ldu_crtc_destroy(struct drm_crtc *crtc)
|
||||
{
|
||||
vmw_ldu_destroy(vmw_crtc_to_ldu(crtc));
|
||||
}
|
||||
|
||||
static int vmw_ldu_commit_list(struct vmw_private *dev_priv)
|
||||
{
|
||||
struct vmw_legacy_display *lds = dev_priv->ldu_priv;
|
||||
struct vmw_legacy_display_unit *entry;
|
||||
struct drm_crtc *crtc;
|
||||
int i = 0;
|
||||
|
||||
/* to stop the screen from changing size on resize */
|
||||
vmw_write(dev_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 0);
|
||||
for (i = 0; i < lds->num_active; i++) {
|
||||
vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, i);
|
||||
vmw_write(dev_priv, SVGA_REG_DISPLAY_IS_PRIMARY, !i);
|
||||
vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_X, 0);
|
||||
vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_Y, 0);
|
||||
vmw_write(dev_priv, SVGA_REG_DISPLAY_WIDTH, 0);
|
||||
vmw_write(dev_priv, SVGA_REG_DISPLAY_HEIGHT, 0);
|
||||
vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
|
||||
}
|
||||
|
||||
/* Now set the mode */
|
||||
vmw_write(dev_priv, SVGA_REG_NUM_GUEST_DISPLAYS, lds->num_active);
|
||||
i = 0;
|
||||
list_for_each_entry(entry, &lds->active, active) {
|
||||
crtc = &entry->base.crtc;
|
||||
|
||||
vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, i);
|
||||
vmw_write(dev_priv, SVGA_REG_DISPLAY_IS_PRIMARY, !i);
|
||||
vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_X, crtc->x);
|
||||
vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_Y, crtc->y);
|
||||
vmw_write(dev_priv, SVGA_REG_DISPLAY_WIDTH, crtc->mode.hdisplay);
|
||||
vmw_write(dev_priv, SVGA_REG_DISPLAY_HEIGHT, crtc->mode.vdisplay);
|
||||
vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
|
||||
|
||||
i++;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vmw_ldu_del_active(struct vmw_private *vmw_priv,
|
||||
struct vmw_legacy_display_unit *ldu)
|
||||
{
|
||||
struct vmw_legacy_display *ld = vmw_priv->ldu_priv;
|
||||
if (list_empty(&ldu->active))
|
||||
return 0;
|
||||
|
||||
list_del_init(&ldu->active);
|
||||
if (--(ld->num_active) == 0) {
|
||||
BUG_ON(!ld->fb);
|
||||
if (ld->fb->unpin)
|
||||
ld->fb->unpin(ld->fb);
|
||||
ld->fb = NULL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vmw_ldu_add_active(struct vmw_private *vmw_priv,
|
||||
struct vmw_legacy_display_unit *ldu,
|
||||
struct vmw_framebuffer *vfb)
|
||||
{
|
||||
struct vmw_legacy_display *ld = vmw_priv->ldu_priv;
|
||||
struct vmw_legacy_display_unit *entry;
|
||||
struct list_head *at;
|
||||
|
||||
if (!list_empty(&ldu->active))
|
||||
return 0;
|
||||
|
||||
at = &ld->active;
|
||||
list_for_each_entry(entry, &ld->active, active) {
|
||||
if (entry->unit > ldu->unit)
|
||||
break;
|
||||
|
||||
at = &entry->active;
|
||||
}
|
||||
|
||||
list_add(&ldu->active, at);
|
||||
if (ld->num_active++ == 0) {
|
||||
BUG_ON(ld->fb);
|
||||
if (vfb->pin)
|
||||
vfb->pin(vfb);
|
||||
ld->fb = vfb;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vmw_ldu_crtc_set_config(struct drm_mode_set *set)
|
||||
{
|
||||
struct vmw_private *dev_priv;
|
||||
struct vmw_legacy_display_unit *ldu;
|
||||
struct drm_connector *connector;
|
||||
struct drm_display_mode *mode;
|
||||
struct drm_encoder *encoder;
|
||||
struct vmw_framebuffer *vfb;
|
||||
struct drm_framebuffer *fb;
|
||||
struct drm_crtc *crtc;
|
||||
|
||||
if (!set)
|
||||
return -EINVAL;
|
||||
|
||||
if (!set->crtc)
|
||||
return -EINVAL;
|
||||
|
||||
/* get the ldu */
|
||||
crtc = set->crtc;
|
||||
ldu = vmw_crtc_to_ldu(crtc);
|
||||
vfb = set->fb ? vmw_framebuffer_to_vfb(set->fb) : NULL;
|
||||
dev_priv = vmw_priv(crtc->dev);
|
||||
|
||||
if (set->num_connectors > 1) {
|
||||
DRM_ERROR("to many connectors\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (set->num_connectors == 1 &&
|
||||
set->connectors[0] != &ldu->base.connector) {
|
||||
DRM_ERROR("connector doesn't match %p %p\n",
|
||||
set->connectors[0], &ldu->base.connector);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* ldu only supports one fb active at the time */
|
||||
if (dev_priv->ldu_priv->fb && vfb &&
|
||||
dev_priv->ldu_priv->fb != vfb) {
|
||||
DRM_ERROR("Multiple framebuffers not supported\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* since they always map one to one these are safe */
|
||||
connector = &ldu->base.connector;
|
||||
encoder = &ldu->base.encoder;
|
||||
|
||||
/* should we turn the crtc off? */
|
||||
if (set->num_connectors == 0 || !set->mode || !set->fb) {
|
||||
|
||||
connector->encoder = NULL;
|
||||
encoder->crtc = NULL;
|
||||
crtc->fb = NULL;
|
||||
|
||||
vmw_ldu_del_active(dev_priv, ldu);
|
||||
|
||||
vmw_ldu_commit_list(dev_priv);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/* we now know we want to set a mode */
|
||||
mode = set->mode;
|
||||
fb = set->fb;
|
||||
|
||||
if (set->x + mode->hdisplay > fb->width ||
|
||||
set->y + mode->vdisplay > fb->height) {
|
||||
DRM_ERROR("set outside of framebuffer\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
vmw_fb_off(dev_priv);
|
||||
|
||||
crtc->fb = fb;
|
||||
encoder->crtc = crtc;
|
||||
connector->encoder = encoder;
|
||||
crtc->x = set->x;
|
||||
crtc->y = set->y;
|
||||
crtc->mode = *mode;
|
||||
|
||||
vmw_ldu_add_active(dev_priv, ldu, vfb);
|
||||
|
||||
vmw_ldu_commit_list(dev_priv);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct drm_crtc_funcs vmw_legacy_crtc_funcs = {
|
||||
.save = vmw_ldu_crtc_save,
|
||||
.restore = vmw_ldu_crtc_restore,
|
||||
.cursor_set = vmw_du_crtc_cursor_set,
|
||||
.cursor_move = vmw_du_crtc_cursor_move,
|
||||
.gamma_set = vmw_ldu_crtc_gamma_set,
|
||||
.destroy = vmw_ldu_crtc_destroy,
|
||||
.set_config = vmw_ldu_crtc_set_config,
|
||||
};
|
||||
|
||||
/*
|
||||
* Legacy Display Unit encoder functions
|
||||
*/
|
||||
|
||||
static void vmw_ldu_encoder_destroy(struct drm_encoder *encoder)
|
||||
{
|
||||
vmw_ldu_destroy(vmw_encoder_to_ldu(encoder));
|
||||
}
|
||||
|
||||
static struct drm_encoder_funcs vmw_legacy_encoder_funcs = {
|
||||
.destroy = vmw_ldu_encoder_destroy,
|
||||
};
|
||||
|
||||
/*
|
||||
* Legacy Display Unit connector functions
|
||||
*/
|
||||
|
||||
static void vmw_ldu_connector_dpms(struct drm_connector *connector, int mode)
|
||||
{
|
||||
}
|
||||
|
||||
static void vmw_ldu_connector_save(struct drm_connector *connector)
|
||||
{
|
||||
}
|
||||
|
||||
static void vmw_ldu_connector_restore(struct drm_connector *connector)
|
||||
{
|
||||
}
|
||||
|
||||
static enum drm_connector_status
|
||||
vmw_ldu_connector_detect(struct drm_connector *connector)
|
||||
{
|
||||
/* XXX vmwctrl should control connection status */
|
||||
if (vmw_connector_to_ldu(connector)->base.unit == 0)
|
||||
return connector_status_connected;
|
||||
return connector_status_disconnected;
|
||||
}
|
||||
|
||||
static struct drm_display_mode vmw_ldu_connector_builtin[] = {
|
||||
/* 640x480@60Hz */
|
||||
{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
|
||||
752, 800, 0, 480, 489, 492, 525, 0,
|
||||
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
|
||||
/* 800x600@60Hz */
|
||||
{ DRM_MODE("800x600",
|
||||
DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
|
||||
40000, 800, 840, 968, 1056, 0, 600, 601, 605, 628,
|
||||
0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
|
||||
/* 1024x768@60Hz */
|
||||
{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
|
||||
1184, 1344, 0, 768, 771, 777, 806, 0,
|
||||
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
|
||||
/* 1152x864@75Hz */
|
||||
{ DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
|
||||
1344, 1600, 0, 864, 865, 868, 900, 0,
|
||||
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
|
||||
/* 1280x768@60Hz */
|
||||
{ DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344,
|
||||
1472, 1664, 0, 768, 771, 778, 798, 0,
|
||||
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
|
||||
/* 1280x800@60Hz */
|
||||
{ DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352,
|
||||
1480, 1680, 0, 800, 803, 809, 831, 0,
|
||||
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
|
||||
/* 1280x960@60Hz */
|
||||
{ DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376,
|
||||
1488, 1800, 0, 960, 961, 964, 1000, 0,
|
||||
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
|
||||
/* 1280x1024@60Hz */
|
||||
{ DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328,
|
||||
1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
|
||||
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
|
||||
/* 1360x768@60Hz */
|
||||
{ DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424,
|
||||
1536, 1792, 0, 768, 771, 777, 795, 0,
|
||||
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
|
||||
/* 1440x1050@60Hz */
|
||||
{ DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488,
|
||||
1632, 1864, 0, 1050, 1053, 1057, 1089, 0,
|
||||
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
|
||||
/* 1440x900@60Hz */
|
||||
{ DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520,
|
||||
1672, 1904, 0, 900, 903, 909, 934, 0,
|
||||
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
|
||||
/* 1600x1200@60Hz */
|
||||
{ DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664,
|
||||
1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
|
||||
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
|
||||
/* 1680x1050@60Hz */
|
||||
{ DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784,
|
||||
1960, 2240, 0, 1050, 1053, 1059, 1089, 0,
|
||||
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
|
||||
/* 1792x1344@60Hz */
|
||||
{ DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920,
|
||||
2120, 2448, 0, 1344, 1345, 1348, 1394, 0,
|
||||
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
|
||||
/* 1853x1392@60Hz */
|
||||
{ DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952,
|
||||
2176, 2528, 0, 1392, 1393, 1396, 1439, 0,
|
||||
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
|
||||
/* 1920x1200@60Hz */
|
||||
{ DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056,
|
||||
2256, 2592, 0, 1200, 1203, 1209, 1245, 0,
|
||||
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
|
||||
/* 1920x1440@60Hz */
|
||||
{ DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048,
|
||||
2256, 2600, 0, 1440, 1441, 1444, 1500, 0,
|
||||
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
|
||||
/* 2560x1600@60Hz */
|
||||
{ DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752,
|
||||
3032, 3504, 0, 1600, 1603, 1609, 1658, 0,
|
||||
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
|
||||
/* Terminate */
|
||||
{ DRM_MODE("", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) },
|
||||
};
|
||||
|
||||
static int vmw_ldu_connector_fill_modes(struct drm_connector *connector,
|
||||
uint32_t max_width, uint32_t max_height)
|
||||
{
|
||||
struct drm_device *dev = connector->dev;
|
||||
struct drm_display_mode *mode = NULL;
|
||||
int i;
|
||||
|
||||
for (i = 0; vmw_ldu_connector_builtin[i].type != 0; i++) {
|
||||
if (vmw_ldu_connector_builtin[i].hdisplay > max_width ||
|
||||
vmw_ldu_connector_builtin[i].vdisplay > max_height)
|
||||
continue;
|
||||
|
||||
mode = drm_mode_duplicate(dev, &vmw_ldu_connector_builtin[i]);
|
||||
if (!mode)
|
||||
return 0;
|
||||
mode->vrefresh = drm_mode_vrefresh(mode);
|
||||
|
||||
drm_mode_probed_add(connector, mode);
|
||||
}
|
||||
|
||||
drm_mode_connector_list_update(connector);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int vmw_ldu_connector_set_property(struct drm_connector *connector,
|
||||
struct drm_property *property,
|
||||
uint64_t val)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void vmw_ldu_connector_destroy(struct drm_connector *connector)
|
||||
{
|
||||
vmw_ldu_destroy(vmw_connector_to_ldu(connector));
|
||||
}
|
||||
|
||||
static struct drm_connector_funcs vmw_legacy_connector_funcs = {
|
||||
.dpms = vmw_ldu_connector_dpms,
|
||||
.save = vmw_ldu_connector_save,
|
||||
.restore = vmw_ldu_connector_restore,
|
||||
.detect = vmw_ldu_connector_detect,
|
||||
.fill_modes = vmw_ldu_connector_fill_modes,
|
||||
.set_property = vmw_ldu_connector_set_property,
|
||||
.destroy = vmw_ldu_connector_destroy,
|
||||
};
|
||||
|
||||
static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
|
||||
{
|
||||
struct vmw_legacy_display_unit *ldu;
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct drm_connector *connector;
|
||||
struct drm_encoder *encoder;
|
||||
struct drm_crtc *crtc;
|
||||
|
||||
ldu = kzalloc(sizeof(*ldu), GFP_KERNEL);
|
||||
if (!ldu)
|
||||
return -ENOMEM;
|
||||
|
||||
ldu->unit = unit;
|
||||
crtc = &ldu->base.crtc;
|
||||
encoder = &ldu->base.encoder;
|
||||
connector = &ldu->base.connector;
|
||||
|
||||
drm_connector_init(dev, connector, &vmw_legacy_connector_funcs,
|
||||
DRM_MODE_CONNECTOR_LVDS);
|
||||
/* Initial status */
|
||||
if (unit == 0)
|
||||
connector->status = connector_status_connected;
|
||||
else
|
||||
connector->status = connector_status_disconnected;
|
||||
|
||||
drm_encoder_init(dev, encoder, &vmw_legacy_encoder_funcs,
|
||||
DRM_MODE_ENCODER_LVDS);
|
||||
drm_mode_connector_attach_encoder(connector, encoder);
|
||||
encoder->possible_crtcs = (1 << unit);
|
||||
encoder->possible_clones = 0;
|
||||
|
||||
INIT_LIST_HEAD(&ldu->active);
|
||||
|
||||
drm_crtc_init(dev, crtc, &vmw_legacy_crtc_funcs);
|
||||
|
||||
drm_connector_attach_property(connector,
|
||||
dev->mode_config.dirty_info_property,
|
||||
1);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv)
|
||||
{
|
||||
if (dev_priv->ldu_priv) {
|
||||
DRM_INFO("ldu system already on\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
dev_priv->ldu_priv = kmalloc(GFP_KERNEL, sizeof(*dev_priv->ldu_priv));
|
||||
|
||||
if (!dev_priv->ldu_priv)
|
||||
return -ENOMEM;
|
||||
|
||||
INIT_LIST_HEAD(&dev_priv->ldu_priv->active);
|
||||
dev_priv->ldu_priv->num_active = 0;
|
||||
dev_priv->ldu_priv->fb = NULL;
|
||||
|
||||
drm_mode_create_dirty_info_property(dev_priv->dev);
|
||||
|
||||
vmw_ldu_init(dev_priv, 0);
|
||||
vmw_ldu_init(dev_priv, 1);
|
||||
vmw_ldu_init(dev_priv, 2);
|
||||
vmw_ldu_init(dev_priv, 3);
|
||||
vmw_ldu_init(dev_priv, 4);
|
||||
vmw_ldu_init(dev_priv, 5);
|
||||
vmw_ldu_init(dev_priv, 6);
|
||||
vmw_ldu_init(dev_priv, 7);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv)
|
||||
{
|
||||
if (!dev_priv->ldu_priv)
|
||||
return -ENOSYS;
|
||||
|
||||
BUG_ON(!list_empty(&dev_priv->ldu_priv->active));
|
||||
|
||||
kfree(dev_priv->ldu_priv);
|
||||
|
||||
return 0;
|
||||
}
|
|
@ -0,0 +1,634 @@
|
|||
/**************************************************************************
|
||||
*
|
||||
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
|
||||
|
||||
#include "drmP.h"
|
||||
#include "vmwgfx_drv.h"
|
||||
|
||||
#include "ttm/ttm_placement.h"
|
||||
|
||||
#include "svga_overlay.h"
|
||||
#include "svga_escape.h"
|
||||
|
||||
#define VMW_MAX_NUM_STREAMS 1
|
||||
|
||||
struct vmw_stream {
|
||||
struct vmw_dma_buffer *buf;
|
||||
bool claimed;
|
||||
bool paused;
|
||||
struct drm_vmw_control_stream_arg saved;
|
||||
};
|
||||
|
||||
/**
|
||||
* Overlay control
|
||||
*/
|
||||
struct vmw_overlay {
|
||||
/*
|
||||
* Each stream is a single overlay. In Xv these are called ports.
|
||||
*/
|
||||
struct mutex mutex;
|
||||
struct vmw_stream stream[VMW_MAX_NUM_STREAMS];
|
||||
};
|
||||
|
||||
static inline struct vmw_overlay *vmw_overlay(struct drm_device *dev)
|
||||
{
|
||||
struct vmw_private *dev_priv = vmw_priv(dev);
|
||||
return dev_priv ? dev_priv->overlay_priv : NULL;
|
||||
}
|
||||
|
||||
struct vmw_escape_header {
|
||||
uint32_t cmd;
|
||||
SVGAFifoCmdEscape body;
|
||||
};
|
||||
|
||||
struct vmw_escape_video_flush {
|
||||
struct vmw_escape_header escape;
|
||||
SVGAEscapeVideoFlush flush;
|
||||
};
|
||||
|
||||
static inline void fill_escape(struct vmw_escape_header *header,
|
||||
uint32_t size)
|
||||
{
|
||||
header->cmd = SVGA_CMD_ESCAPE;
|
||||
header->body.nsid = SVGA_ESCAPE_NSID_VMWARE;
|
||||
header->body.size = size;
|
||||
}
|
||||
|
||||
static inline void fill_flush(struct vmw_escape_video_flush *cmd,
|
||||
uint32_t stream_id)
|
||||
{
|
||||
fill_escape(&cmd->escape, sizeof(cmd->flush));
|
||||
cmd->flush.cmdType = SVGA_ESCAPE_VMWARE_VIDEO_FLUSH;
|
||||
cmd->flush.streamId = stream_id;
|
||||
}
|
||||
|
||||
/**
|
||||
* Pin or unpin a buffer in vram.
|
||||
*
|
||||
* @dev_priv: Driver private.
|
||||
* @buf: DMA buffer to pin or unpin.
|
||||
* @pin: Pin buffer in vram if true.
|
||||
* @interruptible: Use interruptible wait.
|
||||
*
|
||||
* Takes the current masters ttm lock in read.
|
||||
*
|
||||
* Returns
|
||||
* -ERESTARTSYS if interrupted by a signal.
|
||||
*/
|
||||
static int vmw_dmabuf_pin_in_vram(struct vmw_private *dev_priv,
|
||||
struct vmw_dma_buffer *buf,
|
||||
bool pin, bool interruptible)
|
||||
{
|
||||
struct ttm_buffer_object *bo = &buf->base;
|
||||
struct ttm_bo_global *glob = bo->glob;
|
||||
struct ttm_placement *overlay_placement = &vmw_vram_placement;
|
||||
int ret;
|
||||
|
||||
ret = ttm_read_lock(&dev_priv->active_master->lock, interruptible);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
|
||||
ret = ttm_bo_reserve(bo, interruptible, false, false, 0);
|
||||
if (unlikely(ret != 0))
|
||||
goto err;
|
||||
|
||||
if (buf->gmr_bound) {
|
||||
vmw_gmr_unbind(dev_priv, buf->gmr_id);
|
||||
spin_lock(&glob->lru_lock);
|
||||
ida_remove(&dev_priv->gmr_ida, buf->gmr_id);
|
||||
spin_unlock(&glob->lru_lock);
|
||||
buf->gmr_bound = NULL;
|
||||
}
|
||||
|
||||
if (pin)
|
||||
overlay_placement = &vmw_vram_ne_placement;
|
||||
|
||||
ret = ttm_bo_validate(bo, overlay_placement, interruptible, false);
|
||||
|
||||
ttm_bo_unreserve(bo);
|
||||
|
||||
err:
|
||||
ttm_read_unlock(&dev_priv->active_master->lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* Send put command to hw.
|
||||
*
|
||||
* Returns
|
||||
* -ERESTARTSYS if interrupted by a signal.
|
||||
*/
|
||||
static int vmw_overlay_send_put(struct vmw_private *dev_priv,
|
||||
struct vmw_dma_buffer *buf,
|
||||
struct drm_vmw_control_stream_arg *arg,
|
||||
bool interruptible)
|
||||
{
|
||||
struct {
|
||||
struct vmw_escape_header escape;
|
||||
struct {
|
||||
struct {
|
||||
uint32_t cmdType;
|
||||
uint32_t streamId;
|
||||
} header;
|
||||
struct {
|
||||
uint32_t registerId;
|
||||
uint32_t value;
|
||||
} items[SVGA_VIDEO_PITCH_3 + 1];
|
||||
} body;
|
||||
struct vmw_escape_video_flush flush;
|
||||
} *cmds;
|
||||
uint32_t offset;
|
||||
int i, ret;
|
||||
|
||||
for (;;) {
|
||||
cmds = vmw_fifo_reserve(dev_priv, sizeof(*cmds));
|
||||
if (cmds)
|
||||
break;
|
||||
|
||||
ret = vmw_fallback_wait(dev_priv, false, true, 0,
|
||||
interruptible, 3*HZ);
|
||||
if (interruptible && ret == -ERESTARTSYS)
|
||||
return ret;
|
||||
else
|
||||
BUG_ON(ret != 0);
|
||||
}
|
||||
|
||||
fill_escape(&cmds->escape, sizeof(cmds->body));
|
||||
cmds->body.header.cmdType = SVGA_ESCAPE_VMWARE_VIDEO_SET_REGS;
|
||||
cmds->body.header.streamId = arg->stream_id;
|
||||
|
||||
for (i = 0; i <= SVGA_VIDEO_PITCH_3; i++)
|
||||
cmds->body.items[i].registerId = i;
|
||||
|
||||
offset = buf->base.offset + arg->offset;
|
||||
|
||||
cmds->body.items[SVGA_VIDEO_ENABLED].value = true;
|
||||
cmds->body.items[SVGA_VIDEO_FLAGS].value = arg->flags;
|
||||
cmds->body.items[SVGA_VIDEO_DATA_OFFSET].value = offset;
|
||||
cmds->body.items[SVGA_VIDEO_FORMAT].value = arg->format;
|
||||
cmds->body.items[SVGA_VIDEO_COLORKEY].value = arg->color_key;
|
||||
cmds->body.items[SVGA_VIDEO_SIZE].value = arg->size;
|
||||
cmds->body.items[SVGA_VIDEO_WIDTH].value = arg->width;
|
||||
cmds->body.items[SVGA_VIDEO_HEIGHT].value = arg->height;
|
||||
cmds->body.items[SVGA_VIDEO_SRC_X].value = arg->src.x;
|
||||
cmds->body.items[SVGA_VIDEO_SRC_Y].value = arg->src.y;
|
||||
cmds->body.items[SVGA_VIDEO_SRC_WIDTH].value = arg->src.w;
|
||||
cmds->body.items[SVGA_VIDEO_SRC_HEIGHT].value = arg->src.h;
|
||||
cmds->body.items[SVGA_VIDEO_DST_X].value = arg->dst.x;
|
||||
cmds->body.items[SVGA_VIDEO_DST_Y].value = arg->dst.y;
|
||||
cmds->body.items[SVGA_VIDEO_DST_WIDTH].value = arg->dst.w;
|
||||
cmds->body.items[SVGA_VIDEO_DST_HEIGHT].value = arg->dst.h;
|
||||
cmds->body.items[SVGA_VIDEO_PITCH_1].value = arg->pitch[0];
|
||||
cmds->body.items[SVGA_VIDEO_PITCH_2].value = arg->pitch[1];
|
||||
cmds->body.items[SVGA_VIDEO_PITCH_3].value = arg->pitch[2];
|
||||
|
||||
fill_flush(&cmds->flush, arg->stream_id);
|
||||
|
||||
vmw_fifo_commit(dev_priv, sizeof(*cmds));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Send stop command to hw.
|
||||
*
|
||||
* Returns
|
||||
* -ERESTARTSYS if interrupted by a signal.
|
||||
*/
|
||||
static int vmw_overlay_send_stop(struct vmw_private *dev_priv,
|
||||
uint32_t stream_id,
|
||||
bool interruptible)
|
||||
{
|
||||
struct {
|
||||
struct vmw_escape_header escape;
|
||||
SVGAEscapeVideoSetRegs body;
|
||||
struct vmw_escape_video_flush flush;
|
||||
} *cmds;
|
||||
int ret;
|
||||
|
||||
for (;;) {
|
||||
cmds = vmw_fifo_reserve(dev_priv, sizeof(*cmds));
|
||||
if (cmds)
|
||||
break;
|
||||
|
||||
ret = vmw_fallback_wait(dev_priv, false, true, 0,
|
||||
interruptible, 3*HZ);
|
||||
if (interruptible && ret == -ERESTARTSYS)
|
||||
return ret;
|
||||
else
|
||||
BUG_ON(ret != 0);
|
||||
}
|
||||
|
||||
fill_escape(&cmds->escape, sizeof(cmds->body));
|
||||
cmds->body.header.cmdType = SVGA_ESCAPE_VMWARE_VIDEO_SET_REGS;
|
||||
cmds->body.header.streamId = stream_id;
|
||||
cmds->body.items[0].registerId = SVGA_VIDEO_ENABLED;
|
||||
cmds->body.items[0].value = false;
|
||||
fill_flush(&cmds->flush, stream_id);
|
||||
|
||||
vmw_fifo_commit(dev_priv, sizeof(*cmds));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Stop or pause a stream.
|
||||
*
|
||||
* If the stream is paused the no evict flag is removed from the buffer
|
||||
* but left in vram. This allows for instance mode_set to evict it
|
||||
* should it need to.
|
||||
*
|
||||
* The caller must hold the overlay lock.
|
||||
*
|
||||
* @stream_id which stream to stop/pause.
|
||||
* @pause true to pause, false to stop completely.
|
||||
*/
|
||||
static int vmw_overlay_stop(struct vmw_private *dev_priv,
|
||||
uint32_t stream_id, bool pause,
|
||||
bool interruptible)
|
||||
{
|
||||
struct vmw_overlay *overlay = dev_priv->overlay_priv;
|
||||
struct vmw_stream *stream = &overlay->stream[stream_id];
|
||||
int ret;
|
||||
|
||||
/* no buffer attached the stream is completely stopped */
|
||||
if (!stream->buf)
|
||||
return 0;
|
||||
|
||||
/* If the stream is paused this is already done */
|
||||
if (!stream->paused) {
|
||||
ret = vmw_overlay_send_stop(dev_priv, stream_id,
|
||||
interruptible);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* We just remove the NO_EVICT flag so no -ENOMEM */
|
||||
ret = vmw_dmabuf_pin_in_vram(dev_priv, stream->buf, false,
|
||||
interruptible);
|
||||
if (interruptible && ret == -ERESTARTSYS)
|
||||
return ret;
|
||||
else
|
||||
BUG_ON(ret != 0);
|
||||
}
|
||||
|
||||
if (!pause) {
|
||||
vmw_dmabuf_unreference(&stream->buf);
|
||||
stream->paused = false;
|
||||
} else {
|
||||
stream->paused = true;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Update a stream and send any put or stop fifo commands needed.
|
||||
*
|
||||
* The caller must hold the overlay lock.
|
||||
*
|
||||
* Returns
|
||||
* -ENOMEM if buffer doesn't fit in vram.
|
||||
* -ERESTARTSYS if interrupted.
|
||||
*/
|
||||
static int vmw_overlay_update_stream(struct vmw_private *dev_priv,
|
||||
struct vmw_dma_buffer *buf,
|
||||
struct drm_vmw_control_stream_arg *arg,
|
||||
bool interruptible)
|
||||
{
|
||||
struct vmw_overlay *overlay = dev_priv->overlay_priv;
|
||||
struct vmw_stream *stream = &overlay->stream[arg->stream_id];
|
||||
int ret = 0;
|
||||
|
||||
if (!buf)
|
||||
return -EINVAL;
|
||||
|
||||
DRM_DEBUG(" %s: old %p, new %p, %spaused\n", __func__,
|
||||
stream->buf, buf, stream->paused ? "" : "not ");
|
||||
|
||||
if (stream->buf != buf) {
|
||||
ret = vmw_overlay_stop(dev_priv, arg->stream_id,
|
||||
false, interruptible);
|
||||
if (ret)
|
||||
return ret;
|
||||
} else if (!stream->paused) {
|
||||
/* If the buffers match and not paused then just send
|
||||
* the put command, no need to do anything else.
|
||||
*/
|
||||
ret = vmw_overlay_send_put(dev_priv, buf, arg, interruptible);
|
||||
if (ret == 0)
|
||||
stream->saved = *arg;
|
||||
else
|
||||
BUG_ON(!interruptible);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* We don't start the old stream if we are interrupted.
|
||||
* Might return -ENOMEM if it can't fit the buffer in vram.
|
||||
*/
|
||||
ret = vmw_dmabuf_pin_in_vram(dev_priv, buf, true, interruptible);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = vmw_overlay_send_put(dev_priv, buf, arg, interruptible);
|
||||
if (ret) {
|
||||
/* This one needs to happen no matter what. We only remove
|
||||
* the NO_EVICT flag so this is safe from -ENOMEM.
|
||||
*/
|
||||
BUG_ON(vmw_dmabuf_pin_in_vram(dev_priv, buf, false, false) != 0);
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (stream->buf != buf)
|
||||
stream->buf = vmw_dmabuf_reference(buf);
|
||||
stream->saved = *arg;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Stop all streams.
|
||||
*
|
||||
* Used by the fb code when starting.
|
||||
*
|
||||
* Takes the overlay lock.
|
||||
*/
|
||||
int vmw_overlay_stop_all(struct vmw_private *dev_priv)
|
||||
{
|
||||
struct vmw_overlay *overlay = dev_priv->overlay_priv;
|
||||
int i, ret;
|
||||
|
||||
if (!overlay)
|
||||
return 0;
|
||||
|
||||
mutex_lock(&overlay->mutex);
|
||||
|
||||
for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
|
||||
struct vmw_stream *stream = &overlay->stream[i];
|
||||
if (!stream->buf)
|
||||
continue;
|
||||
|
||||
ret = vmw_overlay_stop(dev_priv, i, false, false);
|
||||
WARN_ON(ret != 0);
|
||||
}
|
||||
|
||||
mutex_unlock(&overlay->mutex);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Try to resume all paused streams.
|
||||
*
|
||||
* Used by the kms code after moving a new scanout buffer to vram.
|
||||
*
|
||||
* Takes the overlay lock.
|
||||
*/
|
||||
int vmw_overlay_resume_all(struct vmw_private *dev_priv)
|
||||
{
|
||||
struct vmw_overlay *overlay = dev_priv->overlay_priv;
|
||||
int i, ret;
|
||||
|
||||
if (!overlay)
|
||||
return 0;
|
||||
|
||||
mutex_lock(&overlay->mutex);
|
||||
|
||||
for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
|
||||
struct vmw_stream *stream = &overlay->stream[i];
|
||||
if (!stream->paused)
|
||||
continue;
|
||||
|
||||
ret = vmw_overlay_update_stream(dev_priv, stream->buf,
|
||||
&stream->saved, false);
|
||||
if (ret != 0)
|
||||
DRM_INFO("%s: *warning* failed to resume stream %i\n",
|
||||
__func__, i);
|
||||
}
|
||||
|
||||
mutex_unlock(&overlay->mutex);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Pauses all active streams.
|
||||
*
|
||||
* Used by the kms code when moving a new scanout buffer to vram.
|
||||
*
|
||||
* Takes the overlay lock.
|
||||
*/
|
||||
int vmw_overlay_pause_all(struct vmw_private *dev_priv)
|
||||
{
|
||||
struct vmw_overlay *overlay = dev_priv->overlay_priv;
|
||||
int i, ret;
|
||||
|
||||
if (!overlay)
|
||||
return 0;
|
||||
|
||||
mutex_lock(&overlay->mutex);
|
||||
|
||||
for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
|
||||
if (overlay->stream[i].paused)
|
||||
DRM_INFO("%s: *warning* stream %i already paused\n",
|
||||
__func__, i);
|
||||
ret = vmw_overlay_stop(dev_priv, i, true, false);
|
||||
WARN_ON(ret != 0);
|
||||
}
|
||||
|
||||
mutex_unlock(&overlay->mutex);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int vmw_overlay_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
|
||||
struct vmw_private *dev_priv = vmw_priv(dev);
|
||||
struct vmw_overlay *overlay = dev_priv->overlay_priv;
|
||||
struct drm_vmw_control_stream_arg *arg =
|
||||
(struct drm_vmw_control_stream_arg *)data;
|
||||
struct vmw_dma_buffer *buf;
|
||||
struct vmw_resource *res;
|
||||
int ret;
|
||||
|
||||
if (!overlay)
|
||||
return -ENOSYS;
|
||||
|
||||
ret = vmw_user_stream_lookup(dev_priv, tfile, &arg->stream_id, &res);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
mutex_lock(&overlay->mutex);
|
||||
|
||||
if (!arg->enabled) {
|
||||
ret = vmw_overlay_stop(dev_priv, arg->stream_id, false, true);
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &buf);
|
||||
if (ret)
|
||||
goto out_unlock;
|
||||
|
||||
ret = vmw_overlay_update_stream(dev_priv, buf, arg, true);
|
||||
|
||||
vmw_dmabuf_unreference(&buf);
|
||||
|
||||
out_unlock:
|
||||
mutex_unlock(&overlay->mutex);
|
||||
vmw_resource_unreference(&res);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int vmw_overlay_num_overlays(struct vmw_private *dev_priv)
|
||||
{
|
||||
if (!dev_priv->overlay_priv)
|
||||
return 0;
|
||||
|
||||
return VMW_MAX_NUM_STREAMS;
|
||||
}
|
||||
|
||||
int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv)
|
||||
{
|
||||
struct vmw_overlay *overlay = dev_priv->overlay_priv;
|
||||
int i, k;
|
||||
|
||||
if (!overlay)
|
||||
return 0;
|
||||
|
||||
mutex_lock(&overlay->mutex);
|
||||
|
||||
for (i = 0, k = 0; i < VMW_MAX_NUM_STREAMS; i++)
|
||||
if (!overlay->stream[i].claimed)
|
||||
k++;
|
||||
|
||||
mutex_unlock(&overlay->mutex);
|
||||
|
||||
return k;
|
||||
}
|
||||
|
||||
int vmw_overlay_claim(struct vmw_private *dev_priv, uint32_t *out)
|
||||
{
|
||||
struct vmw_overlay *overlay = dev_priv->overlay_priv;
|
||||
int i;
|
||||
|
||||
if (!overlay)
|
||||
return -ENOSYS;
|
||||
|
||||
mutex_lock(&overlay->mutex);
|
||||
|
||||
for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
|
||||
|
||||
if (overlay->stream[i].claimed)
|
||||
continue;
|
||||
|
||||
overlay->stream[i].claimed = true;
|
||||
*out = i;
|
||||
mutex_unlock(&overlay->mutex);
|
||||
return 0;
|
||||
}
|
||||
|
||||
mutex_unlock(&overlay->mutex);
|
||||
return -ESRCH;
|
||||
}
|
||||
|
||||
int vmw_overlay_unref(struct vmw_private *dev_priv, uint32_t stream_id)
|
||||
{
|
||||
struct vmw_overlay *overlay = dev_priv->overlay_priv;
|
||||
|
||||
BUG_ON(stream_id >= VMW_MAX_NUM_STREAMS);
|
||||
|
||||
if (!overlay)
|
||||
return -ENOSYS;
|
||||
|
||||
mutex_lock(&overlay->mutex);
|
||||
|
||||
WARN_ON(!overlay->stream[stream_id].claimed);
|
||||
vmw_overlay_stop(dev_priv, stream_id, false, false);
|
||||
overlay->stream[stream_id].claimed = false;
|
||||
|
||||
mutex_unlock(&overlay->mutex);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int vmw_overlay_init(struct vmw_private *dev_priv)
|
||||
{
|
||||
struct vmw_overlay *overlay;
|
||||
int i;
|
||||
|
||||
if (dev_priv->overlay_priv)
|
||||
return -EINVAL;
|
||||
|
||||
if (!(dev_priv->fifo.capabilities & SVGA_FIFO_CAP_VIDEO) &&
|
||||
(dev_priv->fifo.capabilities & SVGA_FIFO_CAP_ESCAPE)) {
|
||||
DRM_INFO("hardware doesn't support overlays\n");
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
overlay = kmalloc(GFP_KERNEL, sizeof(*overlay));
|
||||
if (!overlay)
|
||||
return -ENOMEM;
|
||||
|
||||
memset(overlay, 0, sizeof(*overlay));
|
||||
mutex_init(&overlay->mutex);
|
||||
for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
|
||||
overlay->stream[i].buf = NULL;
|
||||
overlay->stream[i].paused = false;
|
||||
overlay->stream[i].claimed = false;
|
||||
}
|
||||
|
||||
dev_priv->overlay_priv = overlay;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int vmw_overlay_close(struct vmw_private *dev_priv)
|
||||
{
|
||||
struct vmw_overlay *overlay = dev_priv->overlay_priv;
|
||||
bool forgotten_buffer = false;
|
||||
int i;
|
||||
|
||||
if (!overlay)
|
||||
return -ENOSYS;
|
||||
|
||||
for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
|
||||
if (overlay->stream[i].buf) {
|
||||
forgotten_buffer = true;
|
||||
vmw_overlay_stop(dev_priv, i, false, false);
|
||||
}
|
||||
}
|
||||
|
||||
WARN_ON(forgotten_buffer);
|
||||
|
||||
dev_priv->overlay_priv = NULL;
|
||||
kfree(overlay);
|
||||
|
||||
return 0;
|
||||
}
|
|
@ -0,0 +1,57 @@
|
|||
/**************************************************************************
|
||||
*
|
||||
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
|
||||
/**
|
||||
* This file contains virtual hardware defines for kernel space.
|
||||
*/
|
||||
|
||||
#ifndef _VMWGFX_REG_H_
|
||||
#define _VMWGFX_REG_H_
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
#define VMWGFX_INDEX_PORT 0x0
|
||||
#define VMWGFX_VALUE_PORT 0x1
|
||||
#define VMWGFX_IRQSTATUS_PORT 0x8
|
||||
|
||||
struct svga_guest_mem_descriptor {
|
||||
__le32 ppn;
|
||||
__le32 num_pages;
|
||||
};
|
||||
|
||||
struct svga_fifo_cmd_fence {
|
||||
__le32 fence;
|
||||
};
|
||||
|
||||
#define SVGA_SYNC_GENERIC 1
|
||||
#define SVGA_SYNC_FIFOFULL 2
|
||||
|
||||
#include "svga_types.h"
|
||||
|
||||
#include "svga3d_reg.h"
|
||||
|
||||
#endif
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,99 @@
|
|||
/**************************************************************************
|
||||
*
|
||||
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
|
||||
#include "drmP.h"
|
||||
#include "vmwgfx_drv.h"
|
||||
|
||||
int vmw_mmap(struct file *filp, struct vm_area_struct *vma)
|
||||
{
|
||||
struct drm_file *file_priv;
|
||||
struct vmw_private *dev_priv;
|
||||
|
||||
if (unlikely(vma->vm_pgoff < VMWGFX_FILE_PAGE_OFFSET)) {
|
||||
if (vmw_fifo_mmap(filp, vma) == 0)
|
||||
return 0;
|
||||
return drm_mmap(filp, vma);
|
||||
}
|
||||
|
||||
file_priv = (struct drm_file *)filp->private_data;
|
||||
dev_priv = vmw_priv(file_priv->minor->dev);
|
||||
return ttm_bo_mmap(filp, vma, &dev_priv->bdev);
|
||||
}
|
||||
|
||||
static int vmw_ttm_mem_global_init(struct ttm_global_reference *ref)
|
||||
{
|
||||
DRM_INFO("global init.\n");
|
||||
return ttm_mem_global_init(ref->object);
|
||||
}
|
||||
|
||||
static void vmw_ttm_mem_global_release(struct ttm_global_reference *ref)
|
||||
{
|
||||
ttm_mem_global_release(ref->object);
|
||||
}
|
||||
|
||||
int vmw_ttm_global_init(struct vmw_private *dev_priv)
|
||||
{
|
||||
struct ttm_global_reference *global_ref;
|
||||
int ret;
|
||||
|
||||
global_ref = &dev_priv->mem_global_ref;
|
||||
global_ref->global_type = TTM_GLOBAL_TTM_MEM;
|
||||
global_ref->size = sizeof(struct ttm_mem_global);
|
||||
global_ref->init = &vmw_ttm_mem_global_init;
|
||||
global_ref->release = &vmw_ttm_mem_global_release;
|
||||
|
||||
ret = ttm_global_item_ref(global_ref);
|
||||
if (unlikely(ret != 0)) {
|
||||
DRM_ERROR("Failed setting up TTM memory accounting.\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
dev_priv->bo_global_ref.mem_glob =
|
||||
dev_priv->mem_global_ref.object;
|
||||
global_ref = &dev_priv->bo_global_ref.ref;
|
||||
global_ref->global_type = TTM_GLOBAL_TTM_BO;
|
||||
global_ref->size = sizeof(struct ttm_bo_global);
|
||||
global_ref->init = &ttm_bo_global_init;
|
||||
global_ref->release = &ttm_bo_global_release;
|
||||
ret = ttm_global_item_ref(global_ref);
|
||||
|
||||
if (unlikely(ret != 0)) {
|
||||
DRM_ERROR("Failed setting up TTM buffer objects.\n");
|
||||
goto out_no_bo;
|
||||
}
|
||||
|
||||
return 0;
|
||||
out_no_bo:
|
||||
ttm_global_item_unref(&dev_priv->mem_global_ref);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void vmw_ttm_global_release(struct vmw_private *dev_priv)
|
||||
{
|
||||
ttm_global_item_unref(&dev_priv->bo_global_ref.ref);
|
||||
ttm_global_item_unref(&dev_priv->mem_global_ref);
|
||||
}
|
|
@ -101,6 +101,8 @@ source "drivers/staging/p9auth/Kconfig"
|
|||
|
||||
source "drivers/staging/line6/Kconfig"
|
||||
|
||||
source "drivers/gpu/drm/vmwgfx/Kconfig"
|
||||
|
||||
source "drivers/gpu/drm/radeon/Kconfig"
|
||||
|
||||
source "drivers/staging/octeon/Kconfig"
|
||||
|
|
|
@ -7,4 +7,5 @@ unifdef-y += r128_drm.h
|
|||
unifdef-y += radeon_drm.h
|
||||
unifdef-y += sis_drm.h
|
||||
unifdef-y += savage_drm.h
|
||||
unifdef-y += vmwgfx_drm.h
|
||||
unifdef-y += via_drm.h
|
||||
|
|
|
@ -0,0 +1,574 @@
|
|||
/**************************************************************************
|
||||
*
|
||||
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
|
||||
#ifndef __VMWGFX_DRM_H__
|
||||
#define __VMWGFX_DRM_H__
|
||||
|
||||
#define DRM_VMW_MAX_SURFACE_FACES 6
|
||||
#define DRM_VMW_MAX_MIP_LEVELS 24
|
||||
|
||||
#define DRM_VMW_EXT_NAME_LEN 128
|
||||
|
||||
#define DRM_VMW_GET_PARAM 0
|
||||
#define DRM_VMW_ALLOC_DMABUF 1
|
||||
#define DRM_VMW_UNREF_DMABUF 2
|
||||
#define DRM_VMW_CURSOR_BYPASS 3
|
||||
/* guarded by DRM_VMW_PARAM_NUM_STREAMS != 0*/
|
||||
#define DRM_VMW_CONTROL_STREAM 4
|
||||
#define DRM_VMW_CLAIM_STREAM 5
|
||||
#define DRM_VMW_UNREF_STREAM 6
|
||||
/* guarded by DRM_VMW_PARAM_3D == 1 */
|
||||
#define DRM_VMW_CREATE_CONTEXT 7
|
||||
#define DRM_VMW_UNREF_CONTEXT 8
|
||||
#define DRM_VMW_CREATE_SURFACE 9
|
||||
#define DRM_VMW_UNREF_SURFACE 10
|
||||
#define DRM_VMW_REF_SURFACE 11
|
||||
#define DRM_VMW_EXECBUF 12
|
||||
#define DRM_VMW_FIFO_DEBUG 13
|
||||
#define DRM_VMW_FENCE_WAIT 14
|
||||
|
||||
|
||||
/*************************************************************************/
|
||||
/**
|
||||
* DRM_VMW_GET_PARAM - get device information.
|
||||
*
|
||||
* DRM_VMW_PARAM_FIFO_OFFSET:
|
||||
* Offset to use to map the first page of the FIFO read-only.
|
||||
* The fifo is mapped using the mmap() system call on the drm device.
|
||||
*
|
||||
* DRM_VMW_PARAM_OVERLAY_IOCTL:
|
||||
* Does the driver support the overlay ioctl.
|
||||
*/
|
||||
|
||||
#define DRM_VMW_PARAM_NUM_STREAMS 0
|
||||
#define DRM_VMW_PARAM_NUM_FREE_STREAMS 1
|
||||
#define DRM_VMW_PARAM_3D 2
|
||||
#define DRM_VMW_PARAM_FIFO_OFFSET 3
|
||||
|
||||
|
||||
/**
|
||||
* struct drm_vmw_getparam_arg
|
||||
*
|
||||
* @value: Returned value. //Out
|
||||
* @param: Parameter to query. //In.
|
||||
*
|
||||
* Argument to the DRM_VMW_GET_PARAM Ioctl.
|
||||
*/
|
||||
|
||||
struct drm_vmw_getparam_arg {
|
||||
uint64_t value;
|
||||
uint32_t param;
|
||||
uint32_t pad64;
|
||||
};
|
||||
|
||||
/*************************************************************************/
|
||||
/**
|
||||
* DRM_VMW_EXTENSION - Query device extensions.
|
||||
*/
|
||||
|
||||
/**
|
||||
* struct drm_vmw_extension_rep
|
||||
*
|
||||
* @exists: The queried extension exists.
|
||||
* @driver_ioctl_offset: Ioctl number of the first ioctl in the extension.
|
||||
* @driver_sarea_offset: Offset to any space in the DRI SAREA
|
||||
* used by the extension.
|
||||
* @major: Major version number of the extension.
|
||||
* @minor: Minor version number of the extension.
|
||||
* @pl: Patch level version number of the extension.
|
||||
*
|
||||
* Output argument to the DRM_VMW_EXTENSION Ioctl.
|
||||
*/
|
||||
|
||||
struct drm_vmw_extension_rep {
|
||||
int32_t exists;
|
||||
uint32_t driver_ioctl_offset;
|
||||
uint32_t driver_sarea_offset;
|
||||
uint32_t major;
|
||||
uint32_t minor;
|
||||
uint32_t pl;
|
||||
uint32_t pad64;
|
||||
};
|
||||
|
||||
/**
|
||||
* union drm_vmw_extension_arg
|
||||
*
|
||||
* @extension - Ascii name of the extension to be queried. //In
|
||||
* @rep - Reply as defined above. //Out
|
||||
*
|
||||
* Argument to the DRM_VMW_EXTENSION Ioctl.
|
||||
*/
|
||||
|
||||
union drm_vmw_extension_arg {
|
||||
char extension[DRM_VMW_EXT_NAME_LEN];
|
||||
struct drm_vmw_extension_rep rep;
|
||||
};
|
||||
|
||||
/*************************************************************************/
|
||||
/**
|
||||
* DRM_VMW_CREATE_CONTEXT - Create a host context.
|
||||
*
|
||||
* Allocates a device unique context id, and queues a create context command
|
||||
* for the host. Does not wait for host completion.
|
||||
*/
|
||||
|
||||
/**
|
||||
* struct drm_vmw_context_arg
|
||||
*
|
||||
* @cid: Device unique context ID.
|
||||
*
|
||||
* Output argument to the DRM_VMW_CREATE_CONTEXT Ioctl.
|
||||
* Input argument to the DRM_VMW_UNREF_CONTEXT Ioctl.
|
||||
*/
|
||||
|
||||
struct drm_vmw_context_arg {
|
||||
int32_t cid;
|
||||
uint32_t pad64;
|
||||
};
|
||||
|
||||
/*************************************************************************/
|
||||
/**
|
||||
* DRM_VMW_UNREF_CONTEXT - Create a host context.
|
||||
*
|
||||
* Frees a global context id, and queues a destroy host command for the host.
|
||||
* Does not wait for host completion. The context ID can be used directly
|
||||
* in the command stream and shows up as the same context ID on the host.
|
||||
*/
|
||||
|
||||
/*************************************************************************/
|
||||
/**
|
||||
* DRM_VMW_CREATE_SURFACE - Create a host suface.
|
||||
*
|
||||
* Allocates a device unique surface id, and queues a create surface command
|
||||
* for the host. Does not wait for host completion. The surface ID can be
|
||||
* used directly in the command stream and shows up as the same surface
|
||||
* ID on the host.
|
||||
*/
|
||||
|
||||
/**
|
||||
* struct drm_wmv_surface_create_req
|
||||
*
|
||||
* @flags: Surface flags as understood by the host.
|
||||
* @format: Surface format as understood by the host.
|
||||
* @mip_levels: Number of mip levels for each face.
|
||||
* An unused face should have 0 encoded.
|
||||
* @size_addr: Address of a user-space array of sruct drm_vmw_size
|
||||
* cast to an uint64_t for 32-64 bit compatibility.
|
||||
* The size of the array should equal the total number of mipmap levels.
|
||||
* @shareable: Boolean whether other clients (as identified by file descriptors)
|
||||
* may reference this surface.
|
||||
*
|
||||
* Input data to the DRM_VMW_CREATE_SURFACE Ioctl.
|
||||
* Output data from the DRM_VMW_REF_SURFACE Ioctl.
|
||||
*/
|
||||
|
||||
struct drm_vmw_surface_create_req {
|
||||
uint32_t flags;
|
||||
uint32_t format;
|
||||
uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES];
|
||||
uint64_t size_addr;
|
||||
int32_t shareable;
|
||||
uint32_t pad64;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct drm_wmv_surface_arg
|
||||
*
|
||||
* @sid: Surface id of created surface or surface to destroy or reference.
|
||||
*
|
||||
* Output data from the DRM_VMW_CREATE_SURFACE Ioctl.
|
||||
* Input argument to the DRM_VMW_UNREF_SURFACE Ioctl.
|
||||
* Input argument to the DRM_VMW_REF_SURFACE Ioctl.
|
||||
*/
|
||||
|
||||
struct drm_vmw_surface_arg {
|
||||
int32_t sid;
|
||||
uint32_t pad64;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct drm_vmw_size ioctl.
|
||||
*
|
||||
* @width - mip level width
|
||||
* @height - mip level height
|
||||
* @depth - mip level depth
|
||||
*
|
||||
* Description of a mip level.
|
||||
* Input data to the DRM_WMW_CREATE_SURFACE Ioctl.
|
||||
*/
|
||||
|
||||
struct drm_vmw_size {
|
||||
uint32_t width;
|
||||
uint32_t height;
|
||||
uint32_t depth;
|
||||
uint32_t pad64;
|
||||
};
|
||||
|
||||
/**
|
||||
* union drm_vmw_surface_create_arg
|
||||
*
|
||||
* @rep: Output data as described above.
|
||||
* @req: Input data as described above.
|
||||
*
|
||||
* Argument to the DRM_VMW_CREATE_SURFACE Ioctl.
|
||||
*/
|
||||
|
||||
union drm_vmw_surface_create_arg {
|
||||
struct drm_vmw_surface_arg rep;
|
||||
struct drm_vmw_surface_create_req req;
|
||||
};
|
||||
|
||||
/*************************************************************************/
|
||||
/**
|
||||
* DRM_VMW_REF_SURFACE - Reference a host surface.
|
||||
*
|
||||
* Puts a reference on a host surface with a give sid, as previously
|
||||
* returned by the DRM_VMW_CREATE_SURFACE ioctl.
|
||||
* A reference will make sure the surface isn't destroyed while we hold
|
||||
* it and will allow the calling client to use the surface ID in the command
|
||||
* stream.
|
||||
*
|
||||
* On successful return, the Ioctl returns the surface information given
|
||||
* in the DRM_VMW_CREATE_SURFACE ioctl.
|
||||
*/
|
||||
|
||||
/**
|
||||
* union drm_vmw_surface_reference_arg
|
||||
*
|
||||
* @rep: Output data as described above.
|
||||
* @req: Input data as described above.
|
||||
*
|
||||
* Argument to the DRM_VMW_REF_SURFACE Ioctl.
|
||||
*/
|
||||
|
||||
union drm_vmw_surface_reference_arg {
|
||||
struct drm_vmw_surface_create_req rep;
|
||||
struct drm_vmw_surface_arg req;
|
||||
};
|
||||
|
||||
/*************************************************************************/
|
||||
/**
|
||||
* DRM_VMW_UNREF_SURFACE - Unreference a host surface.
|
||||
*
|
||||
* Clear a reference previously put on a host surface.
|
||||
* When all references are gone, including the one implicitly placed
|
||||
* on creation,
|
||||
* a destroy surface command will be queued for the host.
|
||||
* Does not wait for completion.
|
||||
*/
|
||||
|
||||
/*************************************************************************/
|
||||
/**
|
||||
* DRM_VMW_EXECBUF
|
||||
*
|
||||
* Submit a command buffer for execution on the host, and return a
|
||||
* fence sequence that when signaled, indicates that the command buffer has
|
||||
* executed.
|
||||
*/
|
||||
|
||||
/**
|
||||
* struct drm_vmw_execbuf_arg
|
||||
*
|
||||
* @commands: User-space address of a command buffer cast to an uint64_t.
|
||||
* @command-size: Size in bytes of the command buffer.
|
||||
* @fence_rep: User-space address of a struct drm_vmw_fence_rep cast to an
|
||||
* uint64_t.
|
||||
*
|
||||
* Argument to the DRM_VMW_EXECBUF Ioctl.
|
||||
*/
|
||||
|
||||
struct drm_vmw_execbuf_arg {
|
||||
uint64_t commands;
|
||||
uint32_t command_size;
|
||||
uint32_t pad64;
|
||||
uint64_t fence_rep;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct drm_vmw_fence_rep
|
||||
*
|
||||
* @fence_seq: Fence sequence associated with a command submission.
|
||||
* @error: This member should've been set to -EFAULT on submission.
|
||||
* The following actions should be take on completion:
|
||||
* error == -EFAULT: Fence communication failed. The host is synchronized.
|
||||
* Use the last fence id read from the FIFO fence register.
|
||||
* error != 0 && error != -EFAULT:
|
||||
* Fence submission failed. The host is synchronized. Use the fence_seq member.
|
||||
* error == 0: All is OK, The host may not be synchronized.
|
||||
* Use the fence_seq member.
|
||||
*
|
||||
* Input / Output data to the DRM_VMW_EXECBUF Ioctl.
|
||||
*/
|
||||
|
||||
struct drm_vmw_fence_rep {
|
||||
uint64_t fence_seq;
|
||||
int32_t error;
|
||||
uint32_t pad64;
|
||||
};
|
||||
|
||||
/*************************************************************************/
|
||||
/**
|
||||
* DRM_VMW_ALLOC_DMABUF
|
||||
*
|
||||
* Allocate a DMA buffer that is visible also to the host.
|
||||
* NOTE: The buffer is
|
||||
* identified by a handle and an offset, which are private to the guest, but
|
||||
* useable in the command stream. The guest kernel may translate these
|
||||
* and patch up the command stream accordingly. In the future, the offset may
|
||||
* be zero at all times, or it may disappear from the interface before it is
|
||||
* fixed.
|
||||
*
|
||||
* The DMA buffer may stay user-space mapped in the guest at all times,
|
||||
* and is thus suitable for sub-allocation.
|
||||
*
|
||||
* DMA buffers are mapped using the mmap() syscall on the drm device.
|
||||
*/
|
||||
|
||||
/**
|
||||
* struct drm_vmw_alloc_dmabuf_req
|
||||
*
|
||||
* @size: Required minimum size of the buffer.
|
||||
*
|
||||
* Input data to the DRM_VMW_ALLOC_DMABUF Ioctl.
|
||||
*/
|
||||
|
||||
struct drm_vmw_alloc_dmabuf_req {
|
||||
uint32_t size;
|
||||
uint32_t pad64;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct drm_vmw_dmabuf_rep
|
||||
*
|
||||
* @map_handle: Offset to use in the mmap() call used to map the buffer.
|
||||
* @handle: Handle unique to this buffer. Used for unreferencing.
|
||||
* @cur_gmr_id: GMR id to use in the command stream when this buffer is
|
||||
* referenced. See not above.
|
||||
* @cur_gmr_offset: Offset to use in the command stream when this buffer is
|
||||
* referenced. See note above.
|
||||
*
|
||||
* Output data from the DRM_VMW_ALLOC_DMABUF Ioctl.
|
||||
*/
|
||||
|
||||
struct drm_vmw_dmabuf_rep {
|
||||
uint64_t map_handle;
|
||||
uint32_t handle;
|
||||
uint32_t cur_gmr_id;
|
||||
uint32_t cur_gmr_offset;
|
||||
uint32_t pad64;
|
||||
};
|
||||
|
||||
/**
|
||||
* union drm_vmw_dmabuf_arg
|
||||
*
|
||||
* @req: Input data as described above.
|
||||
* @rep: Output data as described above.
|
||||
*
|
||||
* Argument to the DRM_VMW_ALLOC_DMABUF Ioctl.
|
||||
*/
|
||||
|
||||
union drm_vmw_alloc_dmabuf_arg {
|
||||
struct drm_vmw_alloc_dmabuf_req req;
|
||||
struct drm_vmw_dmabuf_rep rep;
|
||||
};
|
||||
|
||||
/*************************************************************************/
|
||||
/**
|
||||
* DRM_VMW_UNREF_DMABUF - Free a DMA buffer.
|
||||
*
|
||||
*/
|
||||
|
||||
/**
|
||||
* struct drm_vmw_unref_dmabuf_arg
|
||||
*
|
||||
* @handle: Handle indicating what buffer to free. Obtained from the
|
||||
* DRM_VMW_ALLOC_DMABUF Ioctl.
|
||||
*
|
||||
* Argument to the DRM_VMW_UNREF_DMABUF Ioctl.
|
||||
*/
|
||||
|
||||
struct drm_vmw_unref_dmabuf_arg {
|
||||
uint32_t handle;
|
||||
uint32_t pad64;
|
||||
};
|
||||
|
||||
/*************************************************************************/
|
||||
/**
|
||||
* DRM_VMW_FIFO_DEBUG - Get last FIFO submission.
|
||||
*
|
||||
* This IOCTL copies the last FIFO submission directly out of the FIFO buffer.
|
||||
*/
|
||||
|
||||
/**
|
||||
* struct drm_vmw_fifo_debug_arg
|
||||
*
|
||||
* @debug_buffer: User space address of a debug_buffer cast to an uint64_t //In
|
||||
* @debug_buffer_size: Size in bytes of debug buffer //In
|
||||
* @used_size: Number of bytes copied to the buffer // Out
|
||||
* @did_not_fit: Boolean indicating that the fifo contents did not fit. //Out
|
||||
*
|
||||
* Argument to the DRM_VMW_FIFO_DEBUG Ioctl.
|
||||
*/
|
||||
|
||||
struct drm_vmw_fifo_debug_arg {
|
||||
uint64_t debug_buffer;
|
||||
uint32_t debug_buffer_size;
|
||||
uint32_t used_size;
|
||||
int32_t did_not_fit;
|
||||
uint32_t pad64;
|
||||
};
|
||||
|
||||
struct drm_vmw_fence_wait_arg {
|
||||
uint64_t sequence;
|
||||
uint64_t kernel_cookie;
|
||||
int32_t cookie_valid;
|
||||
int32_t pad64;
|
||||
};
|
||||
|
||||
/*************************************************************************/
|
||||
/**
|
||||
* DRM_VMW_CONTROL_STREAM - Control overlays, aka streams.
|
||||
*
|
||||
* This IOCTL controls the overlay units of the svga device.
|
||||
* The SVGA overlay units does not work like regular hardware units in
|
||||
* that they do not automaticaly read back the contents of the given dma
|
||||
* buffer. But instead only read back for each call to this ioctl, and
|
||||
* at any point between this call being made and a following call that
|
||||
* either changes the buffer or disables the stream.
|
||||
*/
|
||||
|
||||
/**
|
||||
* struct drm_vmw_rect
|
||||
*
|
||||
* Defines a rectangle. Used in the overlay ioctl to define
|
||||
* source and destination rectangle.
|
||||
*/
|
||||
|
||||
struct drm_vmw_rect {
|
||||
int32_t x;
|
||||
int32_t y;
|
||||
uint32_t w;
|
||||
uint32_t h;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct drm_vmw_control_stream_arg
|
||||
*
|
||||
* @stream_id: Stearm to control
|
||||
* @enabled: If false all following arguments are ignored.
|
||||
* @handle: Handle to buffer for getting data from.
|
||||
* @format: Format of the overlay as understood by the host.
|
||||
* @width: Width of the overlay.
|
||||
* @height: Height of the overlay.
|
||||
* @size: Size of the overlay in bytes.
|
||||
* @pitch: Array of pitches, the two last are only used for YUV12 formats.
|
||||
* @offset: Offset from start of dma buffer to overlay.
|
||||
* @src: Source rect, must be within the defined area above.
|
||||
* @dst: Destination rect, x and y may be negative.
|
||||
*
|
||||
* Argument to the DRM_VMW_CONTROL_STREAM Ioctl.
|
||||
*/
|
||||
|
||||
struct drm_vmw_control_stream_arg {
|
||||
uint32_t stream_id;
|
||||
uint32_t enabled;
|
||||
|
||||
uint32_t flags;
|
||||
uint32_t color_key;
|
||||
|
||||
uint32_t handle;
|
||||
uint32_t offset;
|
||||
int32_t format;
|
||||
uint32_t size;
|
||||
uint32_t width;
|
||||
uint32_t height;
|
||||
uint32_t pitch[3];
|
||||
|
||||
uint32_t pad64;
|
||||
struct drm_vmw_rect src;
|
||||
struct drm_vmw_rect dst;
|
||||
};
|
||||
|
||||
/*************************************************************************/
|
||||
/**
|
||||
* DRM_VMW_CURSOR_BYPASS - Give extra information about cursor bypass.
|
||||
*
|
||||
*/
|
||||
|
||||
#define DRM_VMW_CURSOR_BYPASS_ALL (1 << 0)
|
||||
#define DRM_VMW_CURSOR_BYPASS_FLAGS (1)
|
||||
|
||||
/**
|
||||
* struct drm_vmw_cursor_bypass_arg
|
||||
*
|
||||
* @flags: Flags.
|
||||
* @crtc_id: Crtc id, only used if DMR_CURSOR_BYPASS_ALL isn't passed.
|
||||
* @xpos: X position of cursor.
|
||||
* @ypos: Y position of cursor.
|
||||
* @xhot: X hotspot.
|
||||
* @yhot: Y hotspot.
|
||||
*
|
||||
* Argument to the DRM_VMW_CURSOR_BYPASS Ioctl.
|
||||
*/
|
||||
|
||||
struct drm_vmw_cursor_bypass_arg {
|
||||
uint32_t flags;
|
||||
uint32_t crtc_id;
|
||||
int32_t xpos;
|
||||
int32_t ypos;
|
||||
int32_t xhot;
|
||||
int32_t yhot;
|
||||
};
|
||||
|
||||
/*************************************************************************/
|
||||
/**
|
||||
* DRM_VMW_CLAIM_STREAM - Claim a single stream.
|
||||
*/
|
||||
|
||||
/**
|
||||
* struct drm_vmw_context_arg
|
||||
*
|
||||
* @stream_id: Device unique context ID.
|
||||
*
|
||||
* Output argument to the DRM_VMW_CREATE_CONTEXT Ioctl.
|
||||
* Input argument to the DRM_VMW_UNREF_CONTEXT Ioctl.
|
||||
*/
|
||||
|
||||
struct drm_vmw_stream_arg {
|
||||
uint32_t stream_id;
|
||||
uint32_t pad64;
|
||||
};
|
||||
|
||||
/*************************************************************************/
|
||||
/**
|
||||
* DRM_VMW_UNREF_STREAM - Unclaim a stream.
|
||||
*
|
||||
* Return a single stream that was claimed by this process. Also makes
|
||||
* sure that the stream has been stopped.
|
||||
*/
|
||||
|
||||
#endif
|
Loading…
Reference in New Issue