drm/vmwgfx: Fix dma buffer memory size accounting
Also request kernel ttm_buffer objects for buffer objects that obviously aren't visible to user-space, and save some device address space. The accounting was broken in a couple of ways: 1) We did not differentiate between user dma buffers and kernel dma buffers. 2) The ttm_bo_acc_size function is broken in that it a) Doesn't take into account the size of the optional dma address array, b) Doesn't take into account the fact that drivers typically embed the ttm_tt structure. This needs to be fixed in ttm, but meanwhile provide a vmwgfx-specific function to do the job. Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com> Reviewed-by: Jakob Bornecrantz <jakob@vmware.com>
This commit is contained in:
parent
d69d51d73f
commit
308d17ef95
|
@ -150,6 +150,8 @@ struct vmw_ttm_tt {
|
|||
bool mapped;
|
||||
};
|
||||
|
||||
const size_t vmw_tt_size = sizeof(struct vmw_ttm_tt);
|
||||
|
||||
/**
|
||||
* Helper functions to advance a struct vmw_piter iterator.
|
||||
*
|
||||
|
|
|
@ -615,6 +615,7 @@ extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma);
|
|||
* TTM buffer object driver - vmwgfx_buffer.c
|
||||
*/
|
||||
|
||||
extern const size_t vmw_tt_size;
|
||||
extern struct ttm_placement vmw_vram_placement;
|
||||
extern struct ttm_placement vmw_vram_ne_placement;
|
||||
extern struct ttm_placement vmw_vram_sys_placement;
|
||||
|
|
|
@ -352,6 +352,38 @@ int vmw_user_lookup_handle(struct vmw_private *dev_priv,
|
|||
/**
|
||||
* Buffer management.
|
||||
*/
|
||||
|
||||
/**
|
||||
* vmw_dmabuf_acc_size - Calculate the pinned memory usage of buffers
|
||||
*
|
||||
* @dev_priv: Pointer to a struct vmw_private identifying the device.
|
||||
* @size: The requested buffer size.
|
||||
* @user: Whether this is an ordinary dma buffer or a user dma buffer.
|
||||
*/
|
||||
static size_t vmw_dmabuf_acc_size(struct vmw_private *dev_priv, size_t size,
|
||||
bool user)
|
||||
{
|
||||
static size_t struct_size, user_struct_size;
|
||||
size_t num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
|
||||
size_t page_array_size = ttm_round_pot(num_pages * sizeof(void *));
|
||||
|
||||
if (unlikely(struct_size == 0)) {
|
||||
size_t backend_size = ttm_round_pot(vmw_tt_size);
|
||||
|
||||
struct_size = backend_size +
|
||||
ttm_round_pot(sizeof(struct vmw_dma_buffer));
|
||||
user_struct_size = backend_size +
|
||||
ttm_round_pot(sizeof(struct vmw_user_dma_buffer));
|
||||
}
|
||||
|
||||
if (dev_priv->map_mode == vmw_dma_alloc_coherent)
|
||||
page_array_size +=
|
||||
ttm_round_pot(num_pages * sizeof(dma_addr_t));
|
||||
|
||||
return ((user) ? user_struct_size : struct_size) +
|
||||
page_array_size;
|
||||
}
|
||||
|
||||
void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
|
||||
{
|
||||
struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
|
||||
|
@ -359,6 +391,13 @@ void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
|
|||
kfree(vmw_bo);
|
||||
}
|
||||
|
||||
static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
|
||||
{
|
||||
struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
|
||||
|
||||
ttm_prime_object_kfree(vmw_user_bo, prime);
|
||||
}
|
||||
|
||||
int vmw_dmabuf_init(struct vmw_private *dev_priv,
|
||||
struct vmw_dma_buffer *vmw_bo,
|
||||
size_t size, struct ttm_placement *placement,
|
||||
|
@ -368,28 +407,23 @@ int vmw_dmabuf_init(struct vmw_private *dev_priv,
|
|||
struct ttm_bo_device *bdev = &dev_priv->bdev;
|
||||
size_t acc_size;
|
||||
int ret;
|
||||
bool user = (bo_free == &vmw_user_dmabuf_destroy);
|
||||
|
||||
BUG_ON(!bo_free);
|
||||
BUG_ON(!bo_free && (!user && (bo_free != vmw_dmabuf_bo_free)));
|
||||
|
||||
acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct vmw_dma_buffer));
|
||||
acc_size = vmw_dmabuf_acc_size(dev_priv, size, user);
|
||||
memset(vmw_bo, 0, sizeof(*vmw_bo));
|
||||
|
||||
INIT_LIST_HEAD(&vmw_bo->res_list);
|
||||
|
||||
ret = ttm_bo_init(bdev, &vmw_bo->base, size,
|
||||
ttm_bo_type_device, placement,
|
||||
(user) ? ttm_bo_type_device :
|
||||
ttm_bo_type_kernel, placement,
|
||||
0, interruptible,
|
||||
NULL, acc_size, NULL, bo_free);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
|
||||
{
|
||||
struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
|
||||
|
||||
ttm_prime_object_kfree(vmw_user_bo, prime);
|
||||
}
|
||||
|
||||
static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
|
||||
{
|
||||
struct vmw_user_dma_buffer *vmw_user_bo;
|
||||
|
|
Loading…
Reference in New Issue