Merge branch 'drm-fixes' of git://people.freedesktop.org/~airlied/linux
Pull drm fixes from Dave Airlie: "Not too much this time. - One nouveau workaround extended to a few more GPUs - Some amdgpu big endian fixes, and a regression fixer - Some vmwgfx fixes - One ttm locking fix - One vgaarb fix" * 'drm-fixes' of git://people.freedesktop.org/~airlied/linux: vgaarb: fix signal handling in vga_get() radeon: Fix VCE IB test on Big-Endian systems radeon: Fix VCE ring test for Big-Endian systems radeon/cik: Fix GFX IB test on Big-Endian drm/amdgpu: fix the lost duplicates checking drm/nouveau/pmu: remove whitelist for PGOB-exit WAR, enable by default drm/vmwgfx: Implement the cursor_set2 callback v2 drm/vmwgfx: fix a warning message drm/ttm: Fixed a read/write lock imbalance
This commit is contained in:
commit
4be460d96f
|
@ -477,6 +477,14 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
|
|||
if (domain == AMDGPU_GEM_DOMAIN_CPU)
|
||||
goto error_unreserve;
|
||||
}
|
||||
list_for_each_entry(entry, &duplicates, head) {
|
||||
domain = amdgpu_mem_type_to_domain(entry->bo->mem.mem_type);
|
||||
/* if anything is swapped out don't swap it in here,
|
||||
just abort and wait for the next CS */
|
||||
if (domain == AMDGPU_GEM_DOMAIN_CPU)
|
||||
goto error_unreserve;
|
||||
}
|
||||
|
||||
r = amdgpu_vm_update_page_directory(adev, bo_va->vm);
|
||||
if (r)
|
||||
goto error_unreserve;
|
||||
|
|
|
@ -159,7 +159,6 @@ struct nvkm_device_func {
|
|||
struct nvkm_device_quirk {
|
||||
u8 tv_pin_mask;
|
||||
u8 tv_gpio;
|
||||
bool War00C800_0;
|
||||
};
|
||||
|
||||
struct nvkm_device_chip {
|
||||
|
|
|
@ -258,12 +258,6 @@ nvkm_device_pci_10de_0df4[] = {
|
|||
{}
|
||||
};
|
||||
|
||||
static const struct nvkm_device_pci_vendor
|
||||
nvkm_device_pci_10de_0fcd[] = {
|
||||
{ 0x17aa, 0x3801, NULL, { .War00C800_0 = true } }, /* Lenovo Y510P */
|
||||
{}
|
||||
};
|
||||
|
||||
static const struct nvkm_device_pci_vendor
|
||||
nvkm_device_pci_10de_0fd2[] = {
|
||||
{ 0x1028, 0x0595, "GeForce GT 640M LE" },
|
||||
|
@ -278,12 +272,6 @@ nvkm_device_pci_10de_0fe3[] = {
|
|||
{}
|
||||
};
|
||||
|
||||
static const struct nvkm_device_pci_vendor
|
||||
nvkm_device_pci_10de_0fe4[] = {
|
||||
{ 0x144d, 0xc740, NULL, { .War00C800_0 = true } },
|
||||
{}
|
||||
};
|
||||
|
||||
static const struct nvkm_device_pci_vendor
|
||||
nvkm_device_pci_10de_104b[] = {
|
||||
{ 0x1043, 0x844c, "GeForce GT 625" },
|
||||
|
@ -690,13 +678,6 @@ nvkm_device_pci_10de_1189[] = {
|
|||
static const struct nvkm_device_pci_vendor
|
||||
nvkm_device_pci_10de_1199[] = {
|
||||
{ 0x1458, 0xd001, "GeForce GTX 760" },
|
||||
{ 0x1462, 0x1106, "GeForce GTX 780M", { .War00C800_0 = true } }, /* Medion Erazer X7827 */
|
||||
{}
|
||||
};
|
||||
|
||||
static const struct nvkm_device_pci_vendor
|
||||
nvkm_device_pci_10de_11e0[] = {
|
||||
{ 0x1558, 0x5106, NULL, { .War00C800_0 = true } },
|
||||
{}
|
||||
};
|
||||
|
||||
|
@ -706,14 +687,6 @@ nvkm_device_pci_10de_11e3[] = {
|
|||
{}
|
||||
};
|
||||
|
||||
static const struct nvkm_device_pci_vendor
|
||||
nvkm_device_pci_10de_11fc[] = {
|
||||
{ 0x1179, 0x0001, NULL, { .War00C800_0 = true } }, /* Toshiba Tecra W50 */
|
||||
{ 0x17aa, 0x2211, NULL, { .War00C800_0 = true } }, /* Lenovo W541 */
|
||||
{ 0x17aa, 0x221e, NULL, { .War00C800_0 = true } }, /* Lenovo W541 */
|
||||
{}
|
||||
};
|
||||
|
||||
static const struct nvkm_device_pci_vendor
|
||||
nvkm_device_pci_10de_1247[] = {
|
||||
{ 0x1043, 0x212a, "GeForce GT 635M" },
|
||||
|
@ -1368,7 +1341,7 @@ nvkm_device_pci_10de[] = {
|
|||
{ 0x0fc6, "GeForce GTX 650" },
|
||||
{ 0x0fc8, "GeForce GT 740" },
|
||||
{ 0x0fc9, "GeForce GT 730" },
|
||||
{ 0x0fcd, "GeForce GT 755M", nvkm_device_pci_10de_0fcd },
|
||||
{ 0x0fcd, "GeForce GT 755M" },
|
||||
{ 0x0fce, "GeForce GT 640M LE" },
|
||||
{ 0x0fd1, "GeForce GT 650M" },
|
||||
{ 0x0fd2, "GeForce GT 640M", nvkm_device_pci_10de_0fd2 },
|
||||
|
@ -1382,7 +1355,7 @@ nvkm_device_pci_10de[] = {
|
|||
{ 0x0fe1, "GeForce GT 730M" },
|
||||
{ 0x0fe2, "GeForce GT 745M" },
|
||||
{ 0x0fe3, "GeForce GT 745M", nvkm_device_pci_10de_0fe3 },
|
||||
{ 0x0fe4, "GeForce GT 750M", nvkm_device_pci_10de_0fe4 },
|
||||
{ 0x0fe4, "GeForce GT 750M" },
|
||||
{ 0x0fe9, "GeForce GT 750M" },
|
||||
{ 0x0fea, "GeForce GT 755M" },
|
||||
{ 0x0fec, "GeForce 710A" },
|
||||
|
@ -1497,12 +1470,12 @@ nvkm_device_pci_10de[] = {
|
|||
{ 0x11c6, "GeForce GTX 650 Ti" },
|
||||
{ 0x11c8, "GeForce GTX 650" },
|
||||
{ 0x11cb, "GeForce GT 740" },
|
||||
{ 0x11e0, "GeForce GTX 770M", nvkm_device_pci_10de_11e0 },
|
||||
{ 0x11e0, "GeForce GTX 770M" },
|
||||
{ 0x11e1, "GeForce GTX 765M" },
|
||||
{ 0x11e2, "GeForce GTX 765M" },
|
||||
{ 0x11e3, "GeForce GTX 760M", nvkm_device_pci_10de_11e3 },
|
||||
{ 0x11fa, "Quadro K4000" },
|
||||
{ 0x11fc, "Quadro K2100M", nvkm_device_pci_10de_11fc },
|
||||
{ 0x11fc, "Quadro K2100M" },
|
||||
{ 0x1200, "GeForce GTX 560 Ti" },
|
||||
{ 0x1201, "GeForce GTX 560" },
|
||||
{ 0x1203, "GeForce GTX 460 SE v2" },
|
||||
|
|
|
@ -81,9 +81,7 @@ gk104_pmu_pgob(struct nvkm_pmu *pmu, bool enable)
|
|||
nvkm_mask(device, 0x000200, 0x00001000, 0x00001000);
|
||||
nvkm_rd32(device, 0x000200);
|
||||
|
||||
if ( nvkm_boolopt(device->cfgopt, "War00C800_0",
|
||||
device->quirk ? device->quirk->War00C800_0 : false)) {
|
||||
nvkm_info(&pmu->subdev, "hw bug workaround enabled\n");
|
||||
if (nvkm_boolopt(device->cfgopt, "War00C800_0", true)) {
|
||||
switch (device->chipset) {
|
||||
case 0xe4:
|
||||
magic(device, 0x04000000);
|
||||
|
|
|
@ -4173,11 +4173,7 @@ void cik_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
|
|||
control |= ib->length_dw | (vm_id << 24);
|
||||
|
||||
radeon_ring_write(ring, header);
|
||||
radeon_ring_write(ring,
|
||||
#ifdef __BIG_ENDIAN
|
||||
(2 << 0) |
|
||||
#endif
|
||||
(ib->gpu_addr & 0xFFFFFFFC));
|
||||
radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFFC));
|
||||
radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
|
||||
radeon_ring_write(ring, control);
|
||||
}
|
||||
|
|
|
@ -361,31 +361,31 @@ int radeon_vce_get_create_msg(struct radeon_device *rdev, int ring,
|
|||
|
||||
/* stitch together an VCE create msg */
|
||||
ib.length_dw = 0;
|
||||
ib.ptr[ib.length_dw++] = 0x0000000c; /* len */
|
||||
ib.ptr[ib.length_dw++] = 0x00000001; /* session cmd */
|
||||
ib.ptr[ib.length_dw++] = handle;
|
||||
ib.ptr[ib.length_dw++] = cpu_to_le32(0x0000000c); /* len */
|
||||
ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000001); /* session cmd */
|
||||
ib.ptr[ib.length_dw++] = cpu_to_le32(handle);
|
||||
|
||||
ib.ptr[ib.length_dw++] = 0x00000030; /* len */
|
||||
ib.ptr[ib.length_dw++] = 0x01000001; /* create cmd */
|
||||
ib.ptr[ib.length_dw++] = 0x00000000;
|
||||
ib.ptr[ib.length_dw++] = 0x00000042;
|
||||
ib.ptr[ib.length_dw++] = 0x0000000a;
|
||||
ib.ptr[ib.length_dw++] = 0x00000001;
|
||||
ib.ptr[ib.length_dw++] = 0x00000080;
|
||||
ib.ptr[ib.length_dw++] = 0x00000060;
|
||||
ib.ptr[ib.length_dw++] = 0x00000100;
|
||||
ib.ptr[ib.length_dw++] = 0x00000100;
|
||||
ib.ptr[ib.length_dw++] = 0x0000000c;
|
||||
ib.ptr[ib.length_dw++] = 0x00000000;
|
||||
ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000030); /* len */
|
||||
ib.ptr[ib.length_dw++] = cpu_to_le32(0x01000001); /* create cmd */
|
||||
ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000000);
|
||||
ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000042);
|
||||
ib.ptr[ib.length_dw++] = cpu_to_le32(0x0000000a);
|
||||
ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000001);
|
||||
ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000080);
|
||||
ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000060);
|
||||
ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000100);
|
||||
ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000100);
|
||||
ib.ptr[ib.length_dw++] = cpu_to_le32(0x0000000c);
|
||||
ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000000);
|
||||
|
||||
ib.ptr[ib.length_dw++] = 0x00000014; /* len */
|
||||
ib.ptr[ib.length_dw++] = 0x05000005; /* feedback buffer */
|
||||
ib.ptr[ib.length_dw++] = upper_32_bits(dummy);
|
||||
ib.ptr[ib.length_dw++] = dummy;
|
||||
ib.ptr[ib.length_dw++] = 0x00000001;
|
||||
ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000014); /* len */
|
||||
ib.ptr[ib.length_dw++] = cpu_to_le32(0x05000005); /* feedback buffer */
|
||||
ib.ptr[ib.length_dw++] = cpu_to_le32(upper_32_bits(dummy));
|
||||
ib.ptr[ib.length_dw++] = cpu_to_le32(dummy);
|
||||
ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000001);
|
||||
|
||||
for (i = ib.length_dw; i < ib_size_dw; ++i)
|
||||
ib.ptr[i] = 0x0;
|
||||
ib.ptr[i] = cpu_to_le32(0x0);
|
||||
|
||||
r = radeon_ib_schedule(rdev, &ib, NULL, false);
|
||||
if (r) {
|
||||
|
@ -428,21 +428,21 @@ int radeon_vce_get_destroy_msg(struct radeon_device *rdev, int ring,
|
|||
|
||||
/* stitch together an VCE destroy msg */
|
||||
ib.length_dw = 0;
|
||||
ib.ptr[ib.length_dw++] = 0x0000000c; /* len */
|
||||
ib.ptr[ib.length_dw++] = 0x00000001; /* session cmd */
|
||||
ib.ptr[ib.length_dw++] = handle;
|
||||
ib.ptr[ib.length_dw++] = cpu_to_le32(0x0000000c); /* len */
|
||||
ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000001); /* session cmd */
|
||||
ib.ptr[ib.length_dw++] = cpu_to_le32(handle);
|
||||
|
||||
ib.ptr[ib.length_dw++] = 0x00000014; /* len */
|
||||
ib.ptr[ib.length_dw++] = 0x05000005; /* feedback buffer */
|
||||
ib.ptr[ib.length_dw++] = upper_32_bits(dummy);
|
||||
ib.ptr[ib.length_dw++] = dummy;
|
||||
ib.ptr[ib.length_dw++] = 0x00000001;
|
||||
ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000014); /* len */
|
||||
ib.ptr[ib.length_dw++] = cpu_to_le32(0x05000005); /* feedback buffer */
|
||||
ib.ptr[ib.length_dw++] = cpu_to_le32(upper_32_bits(dummy));
|
||||
ib.ptr[ib.length_dw++] = cpu_to_le32(dummy);
|
||||
ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000001);
|
||||
|
||||
ib.ptr[ib.length_dw++] = 0x00000008; /* len */
|
||||
ib.ptr[ib.length_dw++] = 0x02000001; /* destroy cmd */
|
||||
ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000008); /* len */
|
||||
ib.ptr[ib.length_dw++] = cpu_to_le32(0x02000001); /* destroy cmd */
|
||||
|
||||
for (i = ib.length_dw; i < ib_size_dw; ++i)
|
||||
ib.ptr[i] = 0x0;
|
||||
ib.ptr[i] = cpu_to_le32(0x0);
|
||||
|
||||
r = radeon_ib_schedule(rdev, &ib, NULL, false);
|
||||
if (r) {
|
||||
|
@ -699,12 +699,12 @@ bool radeon_vce_semaphore_emit(struct radeon_device *rdev,
|
|||
{
|
||||
uint64_t addr = semaphore->gpu_addr;
|
||||
|
||||
radeon_ring_write(ring, VCE_CMD_SEMAPHORE);
|
||||
radeon_ring_write(ring, (addr >> 3) & 0x000FFFFF);
|
||||
radeon_ring_write(ring, (addr >> 23) & 0x000FFFFF);
|
||||
radeon_ring_write(ring, 0x01003000 | (emit_wait ? 1 : 0));
|
||||
radeon_ring_write(ring, cpu_to_le32(VCE_CMD_SEMAPHORE));
|
||||
radeon_ring_write(ring, cpu_to_le32((addr >> 3) & 0x000FFFFF));
|
||||
radeon_ring_write(ring, cpu_to_le32((addr >> 23) & 0x000FFFFF));
|
||||
radeon_ring_write(ring, cpu_to_le32(0x01003000 | (emit_wait ? 1 : 0)));
|
||||
if (!emit_wait)
|
||||
radeon_ring_write(ring, VCE_CMD_END);
|
||||
radeon_ring_write(ring, cpu_to_le32(VCE_CMD_END));
|
||||
|
||||
return true;
|
||||
}
|
||||
|
@ -719,10 +719,10 @@ bool radeon_vce_semaphore_emit(struct radeon_device *rdev,
|
|||
void radeon_vce_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
|
||||
{
|
||||
struct radeon_ring *ring = &rdev->ring[ib->ring];
|
||||
radeon_ring_write(ring, VCE_CMD_IB);
|
||||
radeon_ring_write(ring, ib->gpu_addr);
|
||||
radeon_ring_write(ring, upper_32_bits(ib->gpu_addr));
|
||||
radeon_ring_write(ring, ib->length_dw);
|
||||
radeon_ring_write(ring, cpu_to_le32(VCE_CMD_IB));
|
||||
radeon_ring_write(ring, cpu_to_le32(ib->gpu_addr));
|
||||
radeon_ring_write(ring, cpu_to_le32(upper_32_bits(ib->gpu_addr)));
|
||||
radeon_ring_write(ring, cpu_to_le32(ib->length_dw));
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -738,12 +738,12 @@ void radeon_vce_fence_emit(struct radeon_device *rdev,
|
|||
struct radeon_ring *ring = &rdev->ring[fence->ring];
|
||||
uint64_t addr = rdev->fence_drv[fence->ring].gpu_addr;
|
||||
|
||||
radeon_ring_write(ring, VCE_CMD_FENCE);
|
||||
radeon_ring_write(ring, addr);
|
||||
radeon_ring_write(ring, upper_32_bits(addr));
|
||||
radeon_ring_write(ring, fence->seq);
|
||||
radeon_ring_write(ring, VCE_CMD_TRAP);
|
||||
radeon_ring_write(ring, VCE_CMD_END);
|
||||
radeon_ring_write(ring, cpu_to_le32(VCE_CMD_FENCE));
|
||||
radeon_ring_write(ring, cpu_to_le32(addr));
|
||||
radeon_ring_write(ring, cpu_to_le32(upper_32_bits(addr)));
|
||||
radeon_ring_write(ring, cpu_to_le32(fence->seq));
|
||||
radeon_ring_write(ring, cpu_to_le32(VCE_CMD_TRAP));
|
||||
radeon_ring_write(ring, cpu_to_le32(VCE_CMD_END));
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -765,7 +765,7 @@ int radeon_vce_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
|
|||
ring->idx, r);
|
||||
return r;
|
||||
}
|
||||
radeon_ring_write(ring, VCE_CMD_END);
|
||||
radeon_ring_write(ring, cpu_to_le32(VCE_CMD_END));
|
||||
radeon_ring_unlock_commit(rdev, ring, false);
|
||||
|
||||
for (i = 0; i < rdev->usec_timeout; i++) {
|
||||
|
|
|
@ -180,7 +180,7 @@ int ttm_write_lock(struct ttm_lock *lock, bool interruptible)
|
|||
spin_unlock(&lock->lock);
|
||||
}
|
||||
} else
|
||||
wait_event(lock->queue, __ttm_read_lock(lock));
|
||||
wait_event(lock->queue, __ttm_write_lock(lock));
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -1233,6 +1233,7 @@ static void vmw_master_drop(struct drm_device *dev,
|
|||
|
||||
vmw_fp->locked_master = drm_master_get(file_priv->master);
|
||||
ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile);
|
||||
vmw_kms_legacy_hotspot_clear(dev_priv);
|
||||
if (unlikely((ret != 0))) {
|
||||
DRM_ERROR("Unable to lock TTM at VT switch.\n");
|
||||
drm_master_put(&vmw_fp->locked_master);
|
||||
|
|
|
@ -925,6 +925,7 @@ int vmw_kms_present(struct vmw_private *dev_priv,
|
|||
uint32_t num_clips);
|
||||
int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv);
|
||||
|
||||
int vmw_dumb_create(struct drm_file *file_priv,
|
||||
struct drm_device *dev,
|
||||
|
|
|
@ -390,7 +390,7 @@ void *vmw_fifo_reserve_dx(struct vmw_private *dev_priv, uint32_t bytes,
|
|||
else if (ctx_id == SVGA3D_INVALID_ID)
|
||||
ret = vmw_local_fifo_reserve(dev_priv, bytes);
|
||||
else {
|
||||
WARN_ON("Command buffer has not been allocated.\n");
|
||||
WARN(1, "Command buffer has not been allocated.\n");
|
||||
ret = NULL;
|
||||
}
|
||||
if (IS_ERR_OR_NULL(ret)) {
|
||||
|
|
|
@ -133,13 +133,19 @@ void vmw_cursor_update_position(struct vmw_private *dev_priv,
|
|||
vmw_mmio_write(++count, fifo_mem + SVGA_FIFO_CURSOR_COUNT);
|
||||
}
|
||||
|
||||
int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
|
||||
uint32_t handle, uint32_t width, uint32_t height)
|
||||
|
||||
/*
|
||||
* vmw_du_crtc_cursor_set2 - Driver cursor_set2 callback.
|
||||
*/
|
||||
int vmw_du_crtc_cursor_set2(struct drm_crtc *crtc, struct drm_file *file_priv,
|
||||
uint32_t handle, uint32_t width, uint32_t height,
|
||||
int32_t hot_x, int32_t hot_y)
|
||||
{
|
||||
struct vmw_private *dev_priv = vmw_priv(crtc->dev);
|
||||
struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
|
||||
struct vmw_surface *surface = NULL;
|
||||
struct vmw_dma_buffer *dmabuf = NULL;
|
||||
s32 hotspot_x, hotspot_y;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
|
@ -151,6 +157,8 @@ int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
|
|||
*/
|
||||
drm_modeset_unlock_crtc(crtc);
|
||||
drm_modeset_lock_all(dev_priv->dev);
|
||||
hotspot_x = hot_x + du->hotspot_x;
|
||||
hotspot_y = hot_y + du->hotspot_y;
|
||||
|
||||
/* A lot of the code assumes this */
|
||||
if (handle && (width != 64 || height != 64)) {
|
||||
|
@ -187,31 +195,34 @@ int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
|
|||
vmw_dmabuf_unreference(&du->cursor_dmabuf);
|
||||
|
||||
/* setup new image */
|
||||
ret = 0;
|
||||
if (surface) {
|
||||
/* vmw_user_surface_lookup takes one reference */
|
||||
du->cursor_surface = surface;
|
||||
|
||||
du->cursor_surface->snooper.crtc = crtc;
|
||||
du->cursor_age = du->cursor_surface->snooper.age;
|
||||
vmw_cursor_update_image(dev_priv, surface->snooper.image,
|
||||
64, 64, du->hotspot_x, du->hotspot_y);
|
||||
ret = vmw_cursor_update_image(dev_priv, surface->snooper.image,
|
||||
64, 64, hotspot_x, hotspot_y);
|
||||
} else if (dmabuf) {
|
||||
/* vmw_user_surface_lookup takes one reference */
|
||||
du->cursor_dmabuf = dmabuf;
|
||||
|
||||
ret = vmw_cursor_update_dmabuf(dev_priv, dmabuf, width, height,
|
||||
du->hotspot_x, du->hotspot_y);
|
||||
hotspot_x, hotspot_y);
|
||||
} else {
|
||||
vmw_cursor_update_position(dev_priv, false, 0, 0);
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
vmw_cursor_update_position(dev_priv, true,
|
||||
du->cursor_x + du->hotspot_x,
|
||||
du->cursor_y + du->hotspot_y);
|
||||
if (!ret) {
|
||||
vmw_cursor_update_position(dev_priv, true,
|
||||
du->cursor_x + hotspot_x,
|
||||
du->cursor_y + hotspot_y);
|
||||
du->core_hotspot_x = hot_x;
|
||||
du->core_hotspot_y = hot_y;
|
||||
}
|
||||
|
||||
ret = 0;
|
||||
out:
|
||||
drm_modeset_unlock_all(dev_priv->dev);
|
||||
drm_modeset_lock_crtc(crtc, crtc->cursor);
|
||||
|
@ -239,8 +250,10 @@ int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
|
|||
drm_modeset_lock_all(dev_priv->dev);
|
||||
|
||||
vmw_cursor_update_position(dev_priv, shown,
|
||||
du->cursor_x + du->hotspot_x,
|
||||
du->cursor_y + du->hotspot_y);
|
||||
du->cursor_x + du->hotspot_x +
|
||||
du->core_hotspot_x,
|
||||
du->cursor_y + du->hotspot_y +
|
||||
du->core_hotspot_y);
|
||||
|
||||
drm_modeset_unlock_all(dev_priv->dev);
|
||||
drm_modeset_lock_crtc(crtc, crtc->cursor);
|
||||
|
@ -334,6 +347,29 @@ err_unreserve:
|
|||
ttm_bo_unreserve(bo);
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_kms_legacy_hotspot_clear - Clear legacy hotspots
|
||||
*
|
||||
* @dev_priv: Pointer to the device private struct.
|
||||
*
|
||||
* Clears all legacy hotspots.
|
||||
*/
|
||||
void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct vmw_display_unit *du;
|
||||
struct drm_crtc *crtc;
|
||||
|
||||
drm_modeset_lock_all(dev);
|
||||
drm_for_each_crtc(crtc, dev) {
|
||||
du = vmw_crtc_to_du(crtc);
|
||||
|
||||
du->hotspot_x = 0;
|
||||
du->hotspot_y = 0;
|
||||
}
|
||||
drm_modeset_unlock_all(dev);
|
||||
}
|
||||
|
||||
void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
|
@ -351,7 +387,9 @@ void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv)
|
|||
du->cursor_age = du->cursor_surface->snooper.age;
|
||||
vmw_cursor_update_image(dev_priv,
|
||||
du->cursor_surface->snooper.image,
|
||||
64, 64, du->hotspot_x, du->hotspot_y);
|
||||
64, 64,
|
||||
du->hotspot_x + du->core_hotspot_x,
|
||||
du->hotspot_y + du->core_hotspot_y);
|
||||
}
|
||||
|
||||
mutex_unlock(&dev->mode_config.mutex);
|
||||
|
|
|
@ -159,6 +159,8 @@ struct vmw_display_unit {
|
|||
|
||||
int hotspot_x;
|
||||
int hotspot_y;
|
||||
s32 core_hotspot_x;
|
||||
s32 core_hotspot_y;
|
||||
|
||||
unsigned unit;
|
||||
|
||||
|
@ -193,8 +195,9 @@ void vmw_du_crtc_restore(struct drm_crtc *crtc);
|
|||
void vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
|
||||
u16 *r, u16 *g, u16 *b,
|
||||
uint32_t start, uint32_t size);
|
||||
int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
|
||||
uint32_t handle, uint32_t width, uint32_t height);
|
||||
int vmw_du_crtc_cursor_set2(struct drm_crtc *crtc, struct drm_file *file_priv,
|
||||
uint32_t handle, uint32_t width, uint32_t height,
|
||||
int32_t hot_x, int32_t hot_y);
|
||||
int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y);
|
||||
int vmw_du_connector_dpms(struct drm_connector *connector, int mode);
|
||||
void vmw_du_connector_save(struct drm_connector *connector);
|
||||
|
|
|
@ -297,7 +297,7 @@ static int vmw_ldu_crtc_set_config(struct drm_mode_set *set)
|
|||
static struct drm_crtc_funcs vmw_legacy_crtc_funcs = {
|
||||
.save = vmw_du_crtc_save,
|
||||
.restore = vmw_du_crtc_restore,
|
||||
.cursor_set = vmw_du_crtc_cursor_set,
|
||||
.cursor_set2 = vmw_du_crtc_cursor_set2,
|
||||
.cursor_move = vmw_du_crtc_cursor_move,
|
||||
.gamma_set = vmw_du_crtc_gamma_set,
|
||||
.destroy = vmw_ldu_crtc_destroy,
|
||||
|
|
|
@ -533,7 +533,7 @@ out_no_fence:
|
|||
static struct drm_crtc_funcs vmw_screen_object_crtc_funcs = {
|
||||
.save = vmw_du_crtc_save,
|
||||
.restore = vmw_du_crtc_restore,
|
||||
.cursor_set = vmw_du_crtc_cursor_set,
|
||||
.cursor_set2 = vmw_du_crtc_cursor_set2,
|
||||
.cursor_move = vmw_du_crtc_cursor_move,
|
||||
.gamma_set = vmw_du_crtc_gamma_set,
|
||||
.destroy = vmw_sou_crtc_destroy,
|
||||
|
|
|
@ -1043,7 +1043,7 @@ out_finish:
|
|||
static struct drm_crtc_funcs vmw_stdu_crtc_funcs = {
|
||||
.save = vmw_du_crtc_save,
|
||||
.restore = vmw_du_crtc_restore,
|
||||
.cursor_set = vmw_du_crtc_cursor_set,
|
||||
.cursor_set2 = vmw_du_crtc_cursor_set2,
|
||||
.cursor_move = vmw_du_crtc_cursor_move,
|
||||
.gamma_set = vmw_du_crtc_gamma_set,
|
||||
.destroy = vmw_stdu_crtc_destroy,
|
||||
|
|
|
@ -395,8 +395,10 @@ int vga_get(struct pci_dev *pdev, unsigned int rsrc, int interruptible)
|
|||
set_current_state(interruptible ?
|
||||
TASK_INTERRUPTIBLE :
|
||||
TASK_UNINTERRUPTIBLE);
|
||||
if (signal_pending(current)) {
|
||||
rc = -EINTR;
|
||||
if (interruptible && signal_pending(current)) {
|
||||
__set_current_state(TASK_RUNNING);
|
||||
remove_wait_queue(&vga_wait_queue, &wait);
|
||||
rc = -ERESTARTSYS;
|
||||
break;
|
||||
}
|
||||
schedule();
|
||||
|
|
Loading…
Reference in New Issue