drm: move dev data clearing from drm_setup to lastclose
We kzalloc this structure, and for real kms devices we should never loose track of things really. But ums/legacy drivers rely on the drm core to clean up a bit of cruft between lastclose and firstopen (i.e. when X is being restarted), so keep this around. But give it a clear drm_legacy_ prefix and conditionalize the code on !DRIVER_MODESET. Cc: David Herrmann <dh.herrmann@gmail.com> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch> Reviewed-by: David Herrmann <dh.herrmann@gmail.com> Signed-off-by: Dave Airlie <airlied@redhat.com>
This commit is contained in:
parent
cb6458f97b
commit
f336ab7600
|
@ -170,6 +170,31 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
|
|||
|
||||
#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
|
||||
|
||||
/**
|
||||
* drm_legacy_dev_reinit
|
||||
*
|
||||
* Reinitializes a legacy/ums drm device in it's lastclose function.
|
||||
*/
|
||||
static void drm_legacy_dev_reinit(struct drm_device *dev)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (drm_core_check_feature(dev, DRIVER_MODESET))
|
||||
return;
|
||||
|
||||
atomic_set(&dev->ioctl_count, 0);
|
||||
atomic_set(&dev->vma_count, 0);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
|
||||
atomic_set(&dev->counts[i], 0);
|
||||
|
||||
dev->sigdata.lock = NULL;
|
||||
|
||||
dev->context_flag = 0;
|
||||
dev->last_context = 0;
|
||||
dev->if_version = 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Take down the DRM device.
|
||||
*
|
||||
|
@ -209,6 +234,8 @@ int drm_lastclose(struct drm_device * dev)
|
|||
dev->dev_mapping = NULL;
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
drm_legacy_dev_reinit(dev);
|
||||
|
||||
DRM_DEBUG("lastclose completed\n");
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -48,7 +48,6 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
|
|||
|
||||
static int drm_setup(struct drm_device * dev)
|
||||
{
|
||||
int i;
|
||||
int ret;
|
||||
|
||||
if (dev->driver->firstopen &&
|
||||
|
@ -58,32 +57,12 @@ static int drm_setup(struct drm_device * dev)
|
|||
return ret;
|
||||
}
|
||||
|
||||
atomic_set(&dev->ioctl_count, 0);
|
||||
atomic_set(&dev->vma_count, 0);
|
||||
ret = drm_legacy_dma_setup(dev);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
i = drm_legacy_dma_setup(dev);
|
||||
if (i < 0)
|
||||
return i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
|
||||
atomic_set(&dev->counts[i], 0);
|
||||
|
||||
dev->sigdata.lock = NULL;
|
||||
|
||||
dev->context_flag = 0;
|
||||
dev->last_context = 0;
|
||||
dev->if_version = 0;
|
||||
|
||||
DRM_DEBUG("\n");
|
||||
|
||||
/*
|
||||
* The kernel's context could be created here, but is now created
|
||||
* in drm_dma_enqueue. This is more resource-efficient for
|
||||
* hardware that does not do DMA, but may mean that
|
||||
* drm_select_queue fails between the time the interrupt is
|
||||
* initialized and the time the queues are initialized.
|
||||
*/
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue