Merge tag 'vmwgfx-next-2014-07-04' of git://people.freedesktop.org/~thomash/linux into drm-next
Pull request of 2014-07-04 * tag 'vmwgfx-next-2014-07-04' of git://people.freedesktop.org/~thomash/linux: drm/vmwgfx: Fix compat shader namespace
This commit is contained in:
commit
4b7ba8697b
|
@ -6,6 +6,7 @@ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
|
|||
vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \
|
||||
vmwgfx_overlay.o vmwgfx_marker.o vmwgfx_gmrid_manager.o \
|
||||
vmwgfx_fence.o vmwgfx_dmabuf.o vmwgfx_scrn.o vmwgfx_context.o \
|
||||
vmwgfx_surface.o vmwgfx_prime.o vmwgfx_mob.o vmwgfx_shader.o
|
||||
vmwgfx_surface.o vmwgfx_prime.o vmwgfx_mob.o vmwgfx_shader.o \
|
||||
vmwgfx_cmdbuf_res.o \
|
||||
|
||||
obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
|
||||
|
|
|
@ -0,0 +1,341 @@
|
|||
/**************************************************************************
|
||||
*
|
||||
* Copyright © 2014 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
|
||||
#include "vmwgfx_drv.h"
|
||||
|
||||
#define VMW_CMDBUF_RES_MAN_HT_ORDER 12
|
||||
|
||||
enum vmw_cmdbuf_res_state {
|
||||
VMW_CMDBUF_RES_COMMITED,
|
||||
VMW_CMDBUF_RES_ADD,
|
||||
VMW_CMDBUF_RES_DEL
|
||||
};
|
||||
|
||||
/**
|
||||
* struct vmw_cmdbuf_res - Command buffer managed resource entry.
|
||||
*
|
||||
* @res: Refcounted pointer to a struct vmw_resource.
|
||||
* @hash: Hash entry for the manager hash table.
|
||||
* @head: List head used either by the staging list or the manager list
|
||||
* of commited resources.
|
||||
* @state: Staging state of this resource entry.
|
||||
* @man: Pointer to a resource manager for this entry.
|
||||
*/
|
||||
struct vmw_cmdbuf_res {
|
||||
struct vmw_resource *res;
|
||||
struct drm_hash_item hash;
|
||||
struct list_head head;
|
||||
enum vmw_cmdbuf_res_state state;
|
||||
struct vmw_cmdbuf_res_manager *man;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct vmw_cmdbuf_res_manager - Command buffer resource manager.
|
||||
*
|
||||
* @resources: Hash table containing staged and commited command buffer
|
||||
* resources
|
||||
* @list: List of commited command buffer resources.
|
||||
* @dev_priv: Pointer to a device private structure.
|
||||
*
|
||||
* @resources and @list are protected by the cmdbuf mutex for now.
|
||||
*/
|
||||
struct vmw_cmdbuf_res_manager {
|
||||
struct drm_open_hash resources;
|
||||
struct list_head list;
|
||||
struct vmw_private *dev_priv;
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* vmw_cmdbuf_res_lookup - Look up a command buffer resource
|
||||
*
|
||||
* @man: Pointer to the command buffer resource manager
|
||||
* @resource_type: The resource type, that combined with the user key
|
||||
* identifies the resource.
|
||||
* @user_key: The user key.
|
||||
*
|
||||
* Returns a valid refcounted struct vmw_resource pointer on success,
|
||||
* an error pointer on failure.
|
||||
*/
|
||||
struct vmw_resource *
|
||||
vmw_cmdbuf_res_lookup(struct vmw_cmdbuf_res_manager *man,
|
||||
enum vmw_cmdbuf_res_type res_type,
|
||||
u32 user_key)
|
||||
{
|
||||
struct drm_hash_item *hash;
|
||||
int ret;
|
||||
unsigned long key = user_key | (res_type << 24);
|
||||
|
||||
ret = drm_ht_find_item(&man->resources, key, &hash);
|
||||
if (unlikely(ret != 0))
|
||||
return ERR_PTR(ret);
|
||||
|
||||
return vmw_resource_reference
|
||||
(drm_hash_entry(hash, struct vmw_cmdbuf_res, hash)->res);
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_cmdbuf_res_free - Free a command buffer resource.
|
||||
*
|
||||
* @man: Pointer to the command buffer resource manager
|
||||
* @entry: Pointer to a struct vmw_cmdbuf_res.
|
||||
*
|
||||
* Frees a struct vmw_cmdbuf_res entry and drops its reference to the
|
||||
* struct vmw_resource.
|
||||
*/
|
||||
static void vmw_cmdbuf_res_free(struct vmw_cmdbuf_res_manager *man,
|
||||
struct vmw_cmdbuf_res *entry)
|
||||
{
|
||||
list_del(&entry->head);
|
||||
WARN_ON(drm_ht_remove_item(&man->resources, &entry->hash));
|
||||
vmw_resource_unreference(&entry->res);
|
||||
kfree(entry);
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_cmdbuf_res_commit - Commit a list of command buffer resource actions
|
||||
*
|
||||
* @list: Caller's list of command buffer resource actions.
|
||||
*
|
||||
* This function commits a list of command buffer resource
|
||||
* additions or removals.
|
||||
* It is typically called when the execbuf ioctl call triggering these
|
||||
* actions has commited the fifo contents to the device.
|
||||
*/
|
||||
void vmw_cmdbuf_res_commit(struct list_head *list)
|
||||
{
|
||||
struct vmw_cmdbuf_res *entry, *next;
|
||||
|
||||
list_for_each_entry_safe(entry, next, list, head) {
|
||||
list_del(&entry->head);
|
||||
switch (entry->state) {
|
||||
case VMW_CMDBUF_RES_ADD:
|
||||
entry->state = VMW_CMDBUF_RES_COMMITED;
|
||||
list_add_tail(&entry->head, &entry->man->list);
|
||||
break;
|
||||
case VMW_CMDBUF_RES_DEL:
|
||||
vmw_resource_unreference(&entry->res);
|
||||
kfree(entry);
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_cmdbuf_res_revert - Revert a list of command buffer resource actions
|
||||
*
|
||||
* @man: Pointer to the command buffer resource manager
|
||||
* @list: Caller's list of command buffer resource action
|
||||
*
|
||||
* This function reverts a list of command buffer resource
|
||||
* additions or removals.
|
||||
* It is typically called when the execbuf ioctl call triggering these
|
||||
* actions failed for some reason, and the command stream was never
|
||||
* submitted.
|
||||
*/
|
||||
void vmw_cmdbuf_res_revert(struct list_head *list)
|
||||
{
|
||||
struct vmw_cmdbuf_res *entry, *next;
|
||||
int ret;
|
||||
|
||||
list_for_each_entry_safe(entry, next, list, head) {
|
||||
switch (entry->state) {
|
||||
case VMW_CMDBUF_RES_ADD:
|
||||
vmw_cmdbuf_res_free(entry->man, entry);
|
||||
break;
|
||||
case VMW_CMDBUF_RES_DEL:
|
||||
ret = drm_ht_insert_item(&entry->man->resources,
|
||||
&entry->hash);
|
||||
list_del(&entry->head);
|
||||
list_add_tail(&entry->head, &entry->man->list);
|
||||
entry->state = VMW_CMDBUF_RES_COMMITED;
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_cmdbuf_res_add - Stage a command buffer managed resource for addition.
|
||||
*
|
||||
* @man: Pointer to the command buffer resource manager.
|
||||
* @res_type: The resource type.
|
||||
* @user_key: The user-space id of the resource.
|
||||
* @res: Valid (refcount != 0) pointer to a struct vmw_resource.
|
||||
* @list: The staging list.
|
||||
*
|
||||
* This function allocates a struct vmw_cmdbuf_res entry and adds the
|
||||
* resource to the hash table of the manager identified by @man. The
|
||||
* entry is then put on the staging list identified by @list.
|
||||
*/
|
||||
int vmw_cmdbuf_res_add(struct vmw_cmdbuf_res_manager *man,
|
||||
enum vmw_cmdbuf_res_type res_type,
|
||||
u32 user_key,
|
||||
struct vmw_resource *res,
|
||||
struct list_head *list)
|
||||
{
|
||||
struct vmw_cmdbuf_res *cres;
|
||||
int ret;
|
||||
|
||||
cres = kzalloc(sizeof(*cres), GFP_KERNEL);
|
||||
if (unlikely(cres == NULL))
|
||||
return -ENOMEM;
|
||||
|
||||
cres->hash.key = user_key | (res_type << 24);
|
||||
ret = drm_ht_insert_item(&man->resources, &cres->hash);
|
||||
if (unlikely(ret != 0))
|
||||
goto out_invalid_key;
|
||||
|
||||
cres->state = VMW_CMDBUF_RES_ADD;
|
||||
cres->res = vmw_resource_reference(res);
|
||||
cres->man = man;
|
||||
list_add_tail(&cres->head, list);
|
||||
|
||||
out_invalid_key:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_cmdbuf_res_remove - Stage a command buffer managed resource for removal.
|
||||
*
|
||||
* @man: Pointer to the command buffer resource manager.
|
||||
* @res_type: The resource type.
|
||||
* @user_key: The user-space id of the resource.
|
||||
* @list: The staging list.
|
||||
*
|
||||
* This function looks up the struct vmw_cmdbuf_res entry from the manager
|
||||
* hash table and, if it exists, removes it. Depending on its current staging
|
||||
* state it then either removes the entry from the staging list or adds it
|
||||
* to it with a staging state of removal.
|
||||
*/
|
||||
int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man,
|
||||
enum vmw_cmdbuf_res_type res_type,
|
||||
u32 user_key,
|
||||
struct list_head *list)
|
||||
{
|
||||
struct vmw_cmdbuf_res *entry;
|
||||
struct drm_hash_item *hash;
|
||||
int ret;
|
||||
|
||||
ret = drm_ht_find_item(&man->resources, user_key, &hash);
|
||||
if (likely(ret != 0))
|
||||
return -EINVAL;
|
||||
|
||||
entry = drm_hash_entry(hash, struct vmw_cmdbuf_res, hash);
|
||||
|
||||
switch (entry->state) {
|
||||
case VMW_CMDBUF_RES_ADD:
|
||||
vmw_cmdbuf_res_free(man, entry);
|
||||
break;
|
||||
case VMW_CMDBUF_RES_COMMITED:
|
||||
(void) drm_ht_remove_item(&man->resources, &entry->hash);
|
||||
list_del(&entry->head);
|
||||
entry->state = VMW_CMDBUF_RES_DEL;
|
||||
list_add_tail(&entry->head, list);
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
break;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_cmdbuf_res_man_create - Allocate a command buffer managed resource
|
||||
* manager.
|
||||
*
|
||||
* @dev_priv: Pointer to a struct vmw_private
|
||||
*
|
||||
* Allocates and initializes a command buffer managed resource manager. Returns
|
||||
* an error pointer on failure.
|
||||
*/
|
||||
struct vmw_cmdbuf_res_manager *
|
||||
vmw_cmdbuf_res_man_create(struct vmw_private *dev_priv)
|
||||
{
|
||||
struct vmw_cmdbuf_res_manager *man;
|
||||
int ret;
|
||||
|
||||
man = kzalloc(sizeof(*man), GFP_KERNEL);
|
||||
if (man == NULL)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
man->dev_priv = dev_priv;
|
||||
INIT_LIST_HEAD(&man->list);
|
||||
ret = drm_ht_create(&man->resources, VMW_CMDBUF_RES_MAN_HT_ORDER);
|
||||
if (ret == 0)
|
||||
return man;
|
||||
|
||||
kfree(man);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_cmdbuf_res_man_destroy - Destroy a command buffer managed resource
|
||||
* manager.
|
||||
*
|
||||
* @man: Pointer to the manager to destroy.
|
||||
*
|
||||
* This function destroys a command buffer managed resource manager and
|
||||
* unreferences / frees all command buffer managed resources and -entries
|
||||
* associated with it.
|
||||
*/
|
||||
void vmw_cmdbuf_res_man_destroy(struct vmw_cmdbuf_res_manager *man)
|
||||
{
|
||||
struct vmw_cmdbuf_res *entry, *next;
|
||||
|
||||
list_for_each_entry_safe(entry, next, &man->list, head)
|
||||
vmw_cmdbuf_res_free(man, entry);
|
||||
|
||||
kfree(man);
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* vmw_cmdbuf_res_man_size - Return the size of a command buffer managed
|
||||
* resource manager
|
||||
*
|
||||
* Returns the approximate allocation size of a command buffer managed
|
||||
* resource manager.
|
||||
*/
|
||||
size_t vmw_cmdbuf_res_man_size(void)
|
||||
{
|
||||
static size_t res_man_size;
|
||||
|
||||
if (unlikely(res_man_size == 0))
|
||||
res_man_size =
|
||||
ttm_round_pot(sizeof(struct vmw_cmdbuf_res_manager)) +
|
||||
ttm_round_pot(sizeof(struct hlist_head) <<
|
||||
VMW_CMDBUF_RES_MAN_HT_ORDER);
|
||||
|
||||
return res_man_size;
|
||||
}
|
|
@ -33,6 +33,7 @@ struct vmw_user_context {
|
|||
struct ttm_base_object base;
|
||||
struct vmw_resource res;
|
||||
struct vmw_ctx_binding_state cbs;
|
||||
struct vmw_cmdbuf_res_manager *man;
|
||||
};
|
||||
|
||||
|
||||
|
@ -103,7 +104,8 @@ static const vmw_scrub_func vmw_scrub_funcs[vmw_ctx_binding_max] = {
|
|||
|
||||
static void vmw_hw_context_destroy(struct vmw_resource *res)
|
||||
{
|
||||
|
||||
struct vmw_user_context *uctx =
|
||||
container_of(res, struct vmw_user_context, res);
|
||||
struct vmw_private *dev_priv = res->dev_priv;
|
||||
struct {
|
||||
SVGA3dCmdHeader header;
|
||||
|
@ -113,9 +115,9 @@ static void vmw_hw_context_destroy(struct vmw_resource *res)
|
|||
|
||||
if (res->func->destroy == vmw_gb_context_destroy) {
|
||||
mutex_lock(&dev_priv->cmdbuf_mutex);
|
||||
vmw_cmdbuf_res_man_destroy(uctx->man);
|
||||
mutex_lock(&dev_priv->binding_mutex);
|
||||
(void) vmw_context_binding_state_kill
|
||||
(&container_of(res, struct vmw_user_context, res)->cbs);
|
||||
(void) vmw_context_binding_state_kill(&uctx->cbs);
|
||||
(void) vmw_gb_context_destroy(res);
|
||||
mutex_unlock(&dev_priv->binding_mutex);
|
||||
if (dev_priv->pinned_bo != NULL &&
|
||||
|
@ -152,13 +154,16 @@ static int vmw_gb_context_init(struct vmw_private *dev_priv,
|
|||
ret = vmw_resource_init(dev_priv, res, true,
|
||||
res_free, &vmw_gb_context_func);
|
||||
res->backup_size = SVGA3D_CONTEXT_DATA_SIZE;
|
||||
if (unlikely(ret != 0))
|
||||
goto out_err;
|
||||
|
||||
if (unlikely(ret != 0)) {
|
||||
if (res_free)
|
||||
res_free(res);
|
||||
else
|
||||
kfree(res);
|
||||
return ret;
|
||||
if (dev_priv->has_mob) {
|
||||
uctx->man = vmw_cmdbuf_res_man_create(dev_priv);
|
||||
if (unlikely(IS_ERR(uctx->man))) {
|
||||
ret = PTR_ERR(uctx->man);
|
||||
uctx->man = NULL;
|
||||
goto out_err;
|
||||
}
|
||||
}
|
||||
|
||||
memset(&uctx->cbs, 0, sizeof(uctx->cbs));
|
||||
|
@ -166,6 +171,13 @@ static int vmw_gb_context_init(struct vmw_private *dev_priv,
|
|||
|
||||
vmw_resource_activate(res, vmw_hw_context_destroy);
|
||||
return 0;
|
||||
|
||||
out_err:
|
||||
if (res_free)
|
||||
res_free(res);
|
||||
else
|
||||
kfree(res);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int vmw_context_init(struct vmw_private *dev_priv,
|
||||
|
@ -471,7 +483,8 @@ int vmw_context_define_ioctl(struct drm_device *dev, void *data,
|
|||
*/
|
||||
|
||||
if (unlikely(vmw_user_context_size == 0))
|
||||
vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) + 128;
|
||||
vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) + 128 +
|
||||
((dev_priv->has_mob) ? vmw_cmdbuf_res_man_size() : 0);
|
||||
|
||||
ret = ttm_read_lock(&dev_priv->reservation_sem, true);
|
||||
if (unlikely(ret != 0))
|
||||
|
@ -901,3 +914,8 @@ struct list_head *vmw_context_binding_list(struct vmw_resource *ctx)
|
|||
{
|
||||
return &(container_of(ctx, struct vmw_user_context, res)->cbs.list);
|
||||
}
|
||||
|
||||
struct vmw_cmdbuf_res_manager *vmw_context_res_man(struct vmw_resource *ctx)
|
||||
{
|
||||
return container_of(ctx, struct vmw_user_context, res)->man;
|
||||
}
|
||||
|
|
|
@ -946,7 +946,6 @@ static void vmw_postclose(struct drm_device *dev,
|
|||
drm_master_put(&vmw_fp->locked_master);
|
||||
}
|
||||
|
||||
vmw_compat_shader_man_destroy(vmw_fp->shman);
|
||||
ttm_object_file_release(&vmw_fp->tfile);
|
||||
kfree(vmw_fp);
|
||||
}
|
||||
|
@ -966,16 +965,10 @@ static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
|
|||
if (unlikely(vmw_fp->tfile == NULL))
|
||||
goto out_no_tfile;
|
||||
|
||||
vmw_fp->shman = vmw_compat_shader_man_create(dev_priv);
|
||||
if (IS_ERR(vmw_fp->shman))
|
||||
goto out_no_shman;
|
||||
|
||||
file_priv->driver_priv = vmw_fp;
|
||||
|
||||
return 0;
|
||||
|
||||
out_no_shman:
|
||||
ttm_object_file_release(&vmw_fp->tfile);
|
||||
out_no_tfile:
|
||||
kfree(vmw_fp);
|
||||
return ret;
|
||||
|
|
|
@ -40,10 +40,10 @@
|
|||
#include <drm/ttm/ttm_module.h>
|
||||
#include "vmwgfx_fence.h"
|
||||
|
||||
#define VMWGFX_DRIVER_DATE "20140325"
|
||||
#define VMWGFX_DRIVER_DATE "20140704"
|
||||
#define VMWGFX_DRIVER_MAJOR 2
|
||||
#define VMWGFX_DRIVER_MINOR 6
|
||||
#define VMWGFX_DRIVER_PATCHLEVEL 0
|
||||
#define VMWGFX_DRIVER_PATCHLEVEL 1
|
||||
#define VMWGFX_FILE_PAGE_OFFSET 0x00100000
|
||||
#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
|
||||
#define VMWGFX_MAX_RELOCATIONS 2048
|
||||
|
@ -75,14 +75,11 @@
|
|||
#define VMW_RES_FENCE ttm_driver_type3
|
||||
#define VMW_RES_SHADER ttm_driver_type4
|
||||
|
||||
struct vmw_compat_shader_manager;
|
||||
|
||||
struct vmw_fpriv {
|
||||
struct drm_master *locked_master;
|
||||
struct ttm_object_file *tfile;
|
||||
struct list_head fence_events;
|
||||
bool gb_aware;
|
||||
struct vmw_compat_shader_manager *shman;
|
||||
};
|
||||
|
||||
struct vmw_dma_buffer {
|
||||
|
@ -124,6 +121,10 @@ struct vmw_resource {
|
|||
void (*hw_destroy) (struct vmw_resource *res);
|
||||
};
|
||||
|
||||
|
||||
/*
|
||||
* Resources that are managed using ioctls.
|
||||
*/
|
||||
enum vmw_res_type {
|
||||
vmw_res_context,
|
||||
vmw_res_surface,
|
||||
|
@ -132,6 +133,15 @@ enum vmw_res_type {
|
|||
vmw_res_max
|
||||
};
|
||||
|
||||
/*
|
||||
* Resources that are managed using command streams.
|
||||
*/
|
||||
enum vmw_cmdbuf_res_type {
|
||||
vmw_cmdbuf_res_compat_shader
|
||||
};
|
||||
|
||||
struct vmw_cmdbuf_res_manager;
|
||||
|
||||
struct vmw_cursor_snooper {
|
||||
struct drm_crtc *crtc;
|
||||
size_t age;
|
||||
|
@ -341,7 +351,7 @@ struct vmw_sw_context{
|
|||
bool needs_post_query_barrier;
|
||||
struct vmw_resource *error_resource;
|
||||
struct vmw_ctx_binding_state staged_bindings;
|
||||
struct list_head staged_shaders;
|
||||
struct list_head staged_cmd_res;
|
||||
};
|
||||
|
||||
struct vmw_legacy_display;
|
||||
|
@ -974,7 +984,8 @@ extern void vmw_context_binding_res_list_kill(struct list_head *head);
|
|||
extern void vmw_context_binding_res_list_scrub(struct list_head *head);
|
||||
extern int vmw_context_rebind_all(struct vmw_resource *ctx);
|
||||
extern struct list_head *vmw_context_binding_list(struct vmw_resource *ctx);
|
||||
|
||||
extern struct vmw_cmdbuf_res_manager *
|
||||
vmw_context_res_man(struct vmw_resource *ctx);
|
||||
/*
|
||||
* Surface management - vmwgfx_surface.c
|
||||
*/
|
||||
|
@ -1008,27 +1019,42 @@ extern int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
|
|||
struct drm_file *file_priv);
|
||||
extern int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
extern int vmw_compat_shader_lookup(struct vmw_compat_shader_manager *man,
|
||||
SVGA3dShaderType shader_type,
|
||||
u32 *user_key);
|
||||
extern void vmw_compat_shaders_commit(struct vmw_compat_shader_manager *man,
|
||||
struct list_head *list);
|
||||
extern void vmw_compat_shaders_revert(struct vmw_compat_shader_manager *man,
|
||||
struct list_head *list);
|
||||
extern int vmw_compat_shader_remove(struct vmw_compat_shader_manager *man,
|
||||
u32 user_key,
|
||||
SVGA3dShaderType shader_type,
|
||||
struct list_head *list);
|
||||
extern int vmw_compat_shader_add(struct vmw_compat_shader_manager *man,
|
||||
extern int vmw_compat_shader_add(struct vmw_private *dev_priv,
|
||||
struct vmw_cmdbuf_res_manager *man,
|
||||
u32 user_key, const void *bytecode,
|
||||
SVGA3dShaderType shader_type,
|
||||
size_t size,
|
||||
struct ttm_object_file *tfile,
|
||||
struct list_head *list);
|
||||
extern struct vmw_compat_shader_manager *
|
||||
vmw_compat_shader_man_create(struct vmw_private *dev_priv);
|
||||
extern void
|
||||
vmw_compat_shader_man_destroy(struct vmw_compat_shader_manager *man);
|
||||
extern int vmw_compat_shader_remove(struct vmw_cmdbuf_res_manager *man,
|
||||
u32 user_key, SVGA3dShaderType shader_type,
|
||||
struct list_head *list);
|
||||
extern struct vmw_resource *
|
||||
vmw_compat_shader_lookup(struct vmw_cmdbuf_res_manager *man,
|
||||
u32 user_key, SVGA3dShaderType shader_type);
|
||||
|
||||
/*
|
||||
* Command buffer managed resources - vmwgfx_cmdbuf_res.c
|
||||
*/
|
||||
|
||||
extern struct vmw_cmdbuf_res_manager *
|
||||
vmw_cmdbuf_res_man_create(struct vmw_private *dev_priv);
|
||||
extern void vmw_cmdbuf_res_man_destroy(struct vmw_cmdbuf_res_manager *man);
|
||||
extern size_t vmw_cmdbuf_res_man_size(void);
|
||||
extern struct vmw_resource *
|
||||
vmw_cmdbuf_res_lookup(struct vmw_cmdbuf_res_manager *man,
|
||||
enum vmw_cmdbuf_res_type res_type,
|
||||
u32 user_key);
|
||||
extern void vmw_cmdbuf_res_revert(struct list_head *list);
|
||||
extern void vmw_cmdbuf_res_commit(struct list_head *list);
|
||||
extern int vmw_cmdbuf_res_add(struct vmw_cmdbuf_res_manager *man,
|
||||
enum vmw_cmdbuf_res_type res_type,
|
||||
u32 user_key,
|
||||
struct vmw_resource *res,
|
||||
struct list_head *list);
|
||||
extern int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man,
|
||||
enum vmw_cmdbuf_res_type res_type,
|
||||
u32 user_key,
|
||||
struct list_head *list);
|
||||
|
||||
|
||||
/**
|
||||
|
|
|
@ -422,117 +422,71 @@ static int vmw_resources_validate(struct vmw_sw_context *sw_context)
|
|||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* vmw_cmd_compat_res_check - Check that a resource is present and if so, put it
|
||||
* on the resource validate list unless it's already there.
|
||||
* vmw_cmd_res_reloc_add - Add a resource to a software context's
|
||||
* relocation- and validation lists.
|
||||
*
|
||||
* @dev_priv: Pointer to a device private structure.
|
||||
* @dev_priv: Pointer to a struct vmw_private identifying the device.
|
||||
* @sw_context: Pointer to the software context.
|
||||
* @res_type: Resource type.
|
||||
* @converter: User-space visisble type specific information.
|
||||
* @id: user-space resource id handle.
|
||||
* @id_loc: Pointer to the location in the command buffer currently being
|
||||
* parsed from where the user-space resource id handle is located.
|
||||
* @p_val: Pointer to pointer to resource validalidation node. Populated
|
||||
* on exit.
|
||||
* @id_loc: Pointer to where the id that needs translation is located.
|
||||
* @res: Valid pointer to a struct vmw_resource.
|
||||
* @p_val: If non null, a pointer to the struct vmw_resource_validate_node
|
||||
* used for this resource is returned here.
|
||||
*/
|
||||
static int
|
||||
vmw_cmd_compat_res_check(struct vmw_private *dev_priv,
|
||||
struct vmw_sw_context *sw_context,
|
||||
enum vmw_res_type res_type,
|
||||
const struct vmw_user_resource_conv *converter,
|
||||
uint32_t id,
|
||||
uint32_t *id_loc,
|
||||
struct vmw_resource_val_node **p_val)
|
||||
static int vmw_cmd_res_reloc_add(struct vmw_private *dev_priv,
|
||||
struct vmw_sw_context *sw_context,
|
||||
enum vmw_res_type res_type,
|
||||
uint32_t *id_loc,
|
||||
struct vmw_resource *res,
|
||||
struct vmw_resource_val_node **p_val)
|
||||
{
|
||||
struct vmw_res_cache_entry *rcache =
|
||||
&sw_context->res_cache[res_type];
|
||||
struct vmw_resource *res;
|
||||
struct vmw_resource_val_node *node;
|
||||
int ret;
|
||||
struct vmw_resource_val_node *node;
|
||||
|
||||
if (id == SVGA3D_INVALID_ID) {
|
||||
if (p_val)
|
||||
*p_val = NULL;
|
||||
if (res_type == vmw_res_context) {
|
||||
DRM_ERROR("Illegal context invalid id.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Fastpath in case of repeated commands referencing the same
|
||||
* resource
|
||||
*/
|
||||
|
||||
if (likely(rcache->valid && id == rcache->handle)) {
|
||||
const struct vmw_resource *res = rcache->res;
|
||||
|
||||
rcache->node->first_usage = false;
|
||||
if (p_val)
|
||||
*p_val = rcache->node;
|
||||
|
||||
return vmw_resource_relocation_add
|
||||
(&sw_context->res_relocations, res,
|
||||
id_loc - sw_context->buf_start);
|
||||
}
|
||||
|
||||
ret = vmw_user_resource_lookup_handle(dev_priv,
|
||||
sw_context->fp->tfile,
|
||||
id,
|
||||
converter,
|
||||
&res);
|
||||
if (unlikely(ret != 0)) {
|
||||
DRM_ERROR("Could not find or use resource 0x%08x.\n",
|
||||
(unsigned) id);
|
||||
dump_stack();
|
||||
return ret;
|
||||
}
|
||||
|
||||
rcache->valid = true;
|
||||
rcache->res = res;
|
||||
rcache->handle = id;
|
||||
|
||||
*p_val = NULL;
|
||||
ret = vmw_resource_relocation_add(&sw_context->res_relocations,
|
||||
res,
|
||||
id_loc - sw_context->buf_start);
|
||||
if (unlikely(ret != 0))
|
||||
goto out_no_reloc;
|
||||
goto out_err;
|
||||
|
||||
ret = vmw_resource_val_add(sw_context, res, &node);
|
||||
if (unlikely(ret != 0))
|
||||
goto out_no_reloc;
|
||||
goto out_err;
|
||||
|
||||
rcache->node = node;
|
||||
if (p_val)
|
||||
*p_val = node;
|
||||
if (res_type == vmw_res_context && dev_priv->has_mob &&
|
||||
node->first_usage) {
|
||||
|
||||
/*
|
||||
* Put contexts first on the list to be able to exit
|
||||
* list traversal for contexts early.
|
||||
*/
|
||||
list_del(&node->head);
|
||||
list_add(&node->head, &sw_context->resource_list);
|
||||
|
||||
if (dev_priv->has_mob && node->first_usage &&
|
||||
res_type == vmw_res_context) {
|
||||
ret = vmw_resource_context_res_add(dev_priv, sw_context, res);
|
||||
if (unlikely(ret != 0))
|
||||
goto out_no_reloc;
|
||||
goto out_err;
|
||||
node->staged_bindings =
|
||||
kzalloc(sizeof(*node->staged_bindings), GFP_KERNEL);
|
||||
if (node->staged_bindings == NULL) {
|
||||
DRM_ERROR("Failed to allocate context binding "
|
||||
"information.\n");
|
||||
goto out_no_reloc;
|
||||
goto out_err;
|
||||
}
|
||||
INIT_LIST_HEAD(&node->staged_bindings->list);
|
||||
}
|
||||
|
||||
vmw_resource_unreference(&res);
|
||||
return 0;
|
||||
|
||||
out_no_reloc:
|
||||
BUG_ON(sw_context->error_resource != NULL);
|
||||
sw_context->error_resource = res;
|
||||
if (p_val)
|
||||
*p_val = node;
|
||||
|
||||
out_err:
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* vmw_cmd_res_check - Check that a resource is present and if so, put it
|
||||
* on the resource validate list unless it's already there.
|
||||
|
@ -554,8 +508,71 @@ vmw_cmd_res_check(struct vmw_private *dev_priv,
|
|||
uint32_t *id_loc,
|
||||
struct vmw_resource_val_node **p_val)
|
||||
{
|
||||
return vmw_cmd_compat_res_check(dev_priv, sw_context, res_type,
|
||||
converter, *id_loc, id_loc, p_val);
|
||||
struct vmw_res_cache_entry *rcache =
|
||||
&sw_context->res_cache[res_type];
|
||||
struct vmw_resource *res;
|
||||
struct vmw_resource_val_node *node;
|
||||
int ret;
|
||||
|
||||
if (*id_loc == SVGA3D_INVALID_ID) {
|
||||
if (p_val)
|
||||
*p_val = NULL;
|
||||
if (res_type == vmw_res_context) {
|
||||
DRM_ERROR("Illegal context invalid id.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Fastpath in case of repeated commands referencing the same
|
||||
* resource
|
||||
*/
|
||||
|
||||
if (likely(rcache->valid && *id_loc == rcache->handle)) {
|
||||
const struct vmw_resource *res = rcache->res;
|
||||
|
||||
rcache->node->first_usage = false;
|
||||
if (p_val)
|
||||
*p_val = rcache->node;
|
||||
|
||||
return vmw_resource_relocation_add
|
||||
(&sw_context->res_relocations, res,
|
||||
id_loc - sw_context->buf_start);
|
||||
}
|
||||
|
||||
ret = vmw_user_resource_lookup_handle(dev_priv,
|
||||
sw_context->fp->tfile,
|
||||
*id_loc,
|
||||
converter,
|
||||
&res);
|
||||
if (unlikely(ret != 0)) {
|
||||
DRM_ERROR("Could not find or use resource 0x%08x.\n",
|
||||
(unsigned) *id_loc);
|
||||
dump_stack();
|
||||
return ret;
|
||||
}
|
||||
|
||||
rcache->valid = true;
|
||||
rcache->res = res;
|
||||
rcache->handle = *id_loc;
|
||||
|
||||
ret = vmw_cmd_res_reloc_add(dev_priv, sw_context, res_type, id_loc,
|
||||
res, &node);
|
||||
if (unlikely(ret != 0))
|
||||
goto out_no_reloc;
|
||||
|
||||
rcache->node = node;
|
||||
if (p_val)
|
||||
*p_val = node;
|
||||
vmw_resource_unreference(&res);
|
||||
return 0;
|
||||
|
||||
out_no_reloc:
|
||||
BUG_ON(sw_context->error_resource != NULL);
|
||||
sw_context->error_resource = res;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -572,8 +589,8 @@ static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
|
|||
int ret;
|
||||
|
||||
list_for_each_entry(val, &sw_context->resource_list, head) {
|
||||
if (likely(!val->staged_bindings))
|
||||
continue;
|
||||
if (unlikely(!val->staged_bindings))
|
||||
break;
|
||||
|
||||
ret = vmw_context_rebind_all(val->res);
|
||||
if (unlikely(ret != 0)) {
|
||||
|
@ -1626,13 +1643,14 @@ static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
|
|||
} *cmd;
|
||||
int ret;
|
||||
size_t size;
|
||||
struct vmw_resource_val_node *val;
|
||||
|
||||
cmd = container_of(header, struct vmw_shader_define_cmd,
|
||||
header);
|
||||
|
||||
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
|
||||
user_context_converter, &cmd->body.cid,
|
||||
NULL);
|
||||
&val);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
|
||||
|
@ -1640,11 +1658,11 @@ static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
|
|||
return 0;
|
||||
|
||||
size = cmd->header.size - sizeof(cmd->body);
|
||||
ret = vmw_compat_shader_add(sw_context->fp->shman,
|
||||
ret = vmw_compat_shader_add(dev_priv,
|
||||
vmw_context_res_man(val->res),
|
||||
cmd->body.shid, cmd + 1,
|
||||
cmd->body.type, size,
|
||||
sw_context->fp->tfile,
|
||||
&sw_context->staged_shaders);
|
||||
&sw_context->staged_cmd_res);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
|
||||
|
@ -1672,23 +1690,24 @@ static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
|
|||
SVGA3dCmdDestroyShader body;
|
||||
} *cmd;
|
||||
int ret;
|
||||
struct vmw_resource_val_node *val;
|
||||
|
||||
cmd = container_of(header, struct vmw_shader_destroy_cmd,
|
||||
header);
|
||||
|
||||
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
|
||||
user_context_converter, &cmd->body.cid,
|
||||
NULL);
|
||||
&val);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
|
||||
if (unlikely(!dev_priv->has_mob))
|
||||
return 0;
|
||||
|
||||
ret = vmw_compat_shader_remove(sw_context->fp->shman,
|
||||
ret = vmw_compat_shader_remove(vmw_context_res_man(val->res),
|
||||
cmd->body.shid,
|
||||
cmd->body.type,
|
||||
&sw_context->staged_shaders);
|
||||
&sw_context->staged_cmd_res);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
|
||||
|
@ -1715,7 +1734,9 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
|
|||
SVGA3dCmdHeader header;
|
||||
SVGA3dCmdSetShader body;
|
||||
} *cmd;
|
||||
struct vmw_resource_val_node *ctx_node;
|
||||
struct vmw_resource_val_node *ctx_node, *res_node = NULL;
|
||||
struct vmw_ctx_bindinfo bi;
|
||||
struct vmw_resource *res = NULL;
|
||||
int ret;
|
||||
|
||||
cmd = container_of(header, struct vmw_set_shader_cmd,
|
||||
|
@ -1727,32 +1748,40 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
|
|||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
|
||||
if (dev_priv->has_mob) {
|
||||
struct vmw_ctx_bindinfo bi;
|
||||
struct vmw_resource_val_node *res_node;
|
||||
u32 shid = cmd->body.shid;
|
||||
if (!dev_priv->has_mob)
|
||||
return 0;
|
||||
|
||||
if (shid != SVGA3D_INVALID_ID)
|
||||
(void) vmw_compat_shader_lookup(sw_context->fp->shman,
|
||||
cmd->body.type,
|
||||
&shid);
|
||||
if (cmd->body.shid != SVGA3D_INVALID_ID) {
|
||||
res = vmw_compat_shader_lookup
|
||||
(vmw_context_res_man(ctx_node->res),
|
||||
cmd->body.shid,
|
||||
cmd->body.type);
|
||||
|
||||
ret = vmw_cmd_compat_res_check(dev_priv, sw_context,
|
||||
vmw_res_shader,
|
||||
user_shader_converter,
|
||||
shid,
|
||||
&cmd->body.shid, &res_node);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
|
||||
bi.ctx = ctx_node->res;
|
||||
bi.res = res_node ? res_node->res : NULL;
|
||||
bi.bt = vmw_ctx_binding_shader;
|
||||
bi.i1.shader_type = cmd->body.type;
|
||||
return vmw_context_binding_add(ctx_node->staged_bindings, &bi);
|
||||
if (!IS_ERR(res)) {
|
||||
ret = vmw_cmd_res_reloc_add(dev_priv, sw_context,
|
||||
vmw_res_shader,
|
||||
&cmd->body.shid, res,
|
||||
&res_node);
|
||||
vmw_resource_unreference(&res);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
if (!res_node) {
|
||||
ret = vmw_cmd_res_check(dev_priv, sw_context,
|
||||
vmw_res_shader,
|
||||
user_shader_converter,
|
||||
&cmd->body.shid, &res_node);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
}
|
||||
|
||||
bi.ctx = ctx_node->res;
|
||||
bi.res = res_node ? res_node->res : NULL;
|
||||
bi.bt = vmw_ctx_binding_shader;
|
||||
bi.i1.shader_type = cmd->body.type;
|
||||
return vmw_context_binding_add(ctx_node->staged_bindings, &bi);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -2394,6 +2423,8 @@ vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
int vmw_execbuf_process(struct drm_file *file_priv,
|
||||
struct vmw_private *dev_priv,
|
||||
void __user *user_commands,
|
||||
|
@ -2453,7 +2484,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
|
|||
goto out_unlock;
|
||||
sw_context->res_ht_initialized = true;
|
||||
}
|
||||
INIT_LIST_HEAD(&sw_context->staged_shaders);
|
||||
INIT_LIST_HEAD(&sw_context->staged_cmd_res);
|
||||
|
||||
INIT_LIST_HEAD(&resource_list);
|
||||
ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
|
||||
|
@ -2548,8 +2579,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
|
|||
}
|
||||
|
||||
list_splice_init(&sw_context->resource_list, &resource_list);
|
||||
vmw_compat_shaders_commit(sw_context->fp->shman,
|
||||
&sw_context->staged_shaders);
|
||||
vmw_cmdbuf_res_commit(&sw_context->staged_cmd_res);
|
||||
mutex_unlock(&dev_priv->cmdbuf_mutex);
|
||||
|
||||
/*
|
||||
|
@ -2576,8 +2606,7 @@ out_unlock:
|
|||
list_splice_init(&sw_context->resource_list, &resource_list);
|
||||
error_resource = sw_context->error_resource;
|
||||
sw_context->error_resource = NULL;
|
||||
vmw_compat_shaders_revert(sw_context->fp->shman,
|
||||
&sw_context->staged_shaders);
|
||||
vmw_cmdbuf_res_revert(&sw_context->staged_cmd_res);
|
||||
mutex_unlock(&dev_priv->cmdbuf_mutex);
|
||||
|
||||
/*
|
||||
|
|
|
@ -29,8 +29,6 @@
|
|||
#include "vmwgfx_resource_priv.h"
|
||||
#include "ttm/ttm_placement.h"
|
||||
|
||||
#define VMW_COMPAT_SHADER_HT_ORDER 12
|
||||
|
||||
struct vmw_shader {
|
||||
struct vmw_resource res;
|
||||
SVGA3dShaderType type;
|
||||
|
@ -42,49 +40,8 @@ struct vmw_user_shader {
|
|||
struct vmw_shader shader;
|
||||
};
|
||||
|
||||
/**
|
||||
* enum vmw_compat_shader_state - Staging state for compat shaders
|
||||
*/
|
||||
enum vmw_compat_shader_state {
|
||||
VMW_COMPAT_COMMITED,
|
||||
VMW_COMPAT_ADD,
|
||||
VMW_COMPAT_DEL
|
||||
};
|
||||
|
||||
/**
|
||||
* struct vmw_compat_shader - Metadata for compat shaders.
|
||||
*
|
||||
* @handle: The TTM handle of the guest backed shader.
|
||||
* @tfile: The struct ttm_object_file the guest backed shader is registered
|
||||
* with.
|
||||
* @hash: Hash item for lookup.
|
||||
* @head: List head for staging lists or the compat shader manager list.
|
||||
* @state: Staging state.
|
||||
*
|
||||
* The structure is protected by the cmdbuf lock.
|
||||
*/
|
||||
struct vmw_compat_shader {
|
||||
u32 handle;
|
||||
struct ttm_object_file *tfile;
|
||||
struct drm_hash_item hash;
|
||||
struct list_head head;
|
||||
enum vmw_compat_shader_state state;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct vmw_compat_shader_manager - Compat shader manager.
|
||||
*
|
||||
* @shaders: Hash table containing staged and commited compat shaders
|
||||
* @list: List of commited shaders.
|
||||
* @dev_priv: Pointer to a device private structure.
|
||||
*
|
||||
* @shaders and @list are protected by the cmdbuf mutex for now.
|
||||
*/
|
||||
struct vmw_compat_shader_manager {
|
||||
struct drm_open_hash shaders;
|
||||
struct list_head list;
|
||||
struct vmw_private *dev_priv;
|
||||
};
|
||||
static uint64_t vmw_user_shader_size;
|
||||
static uint64_t vmw_shader_size;
|
||||
|
||||
static void vmw_user_shader_free(struct vmw_resource *res);
|
||||
static struct vmw_resource *
|
||||
|
@ -98,8 +55,6 @@ static int vmw_gb_shader_unbind(struct vmw_resource *res,
|
|||
struct ttm_validate_buffer *val_buf);
|
||||
static int vmw_gb_shader_destroy(struct vmw_resource *res);
|
||||
|
||||
static uint64_t vmw_user_shader_size;
|
||||
|
||||
static const struct vmw_user_resource_conv user_shader_conv = {
|
||||
.object_type = VMW_RES_SHADER,
|
||||
.base_obj_to_res = vmw_user_shader_base_to_res,
|
||||
|
@ -347,6 +302,16 @@ static void vmw_user_shader_free(struct vmw_resource *res)
|
|||
vmw_user_shader_size);
|
||||
}
|
||||
|
||||
static void vmw_shader_free(struct vmw_resource *res)
|
||||
{
|
||||
struct vmw_shader *shader = vmw_res_to_shader(res);
|
||||
struct vmw_private *dev_priv = res->dev_priv;
|
||||
|
||||
kfree(shader);
|
||||
ttm_mem_global_free(vmw_mem_glob(dev_priv),
|
||||
vmw_shader_size);
|
||||
}
|
||||
|
||||
/**
|
||||
* This function is called when user space has no more references on the
|
||||
* base object. It releases the base-object's reference on the resource object.
|
||||
|
@ -371,13 +336,13 @@ int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data,
|
|||
TTM_REF_USAGE);
|
||||
}
|
||||
|
||||
static int vmw_shader_alloc(struct vmw_private *dev_priv,
|
||||
struct vmw_dma_buffer *buffer,
|
||||
size_t shader_size,
|
||||
size_t offset,
|
||||
SVGA3dShaderType shader_type,
|
||||
struct ttm_object_file *tfile,
|
||||
u32 *handle)
|
||||
static int vmw_user_shader_alloc(struct vmw_private *dev_priv,
|
||||
struct vmw_dma_buffer *buffer,
|
||||
size_t shader_size,
|
||||
size_t offset,
|
||||
SVGA3dShaderType shader_type,
|
||||
struct ttm_object_file *tfile,
|
||||
u32 *handle)
|
||||
{
|
||||
struct vmw_user_shader *ushader;
|
||||
struct vmw_resource *res, *tmp;
|
||||
|
@ -442,6 +407,56 @@ out:
|
|||
}
|
||||
|
||||
|
||||
struct vmw_resource *vmw_shader_alloc(struct vmw_private *dev_priv,
|
||||
struct vmw_dma_buffer *buffer,
|
||||
size_t shader_size,
|
||||
size_t offset,
|
||||
SVGA3dShaderType shader_type)
|
||||
{
|
||||
struct vmw_shader *shader;
|
||||
struct vmw_resource *res;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* Approximate idr memory usage with 128 bytes. It will be limited
|
||||
* by maximum number_of shaders anyway.
|
||||
*/
|
||||
if (unlikely(vmw_shader_size == 0))
|
||||
vmw_shader_size =
|
||||
ttm_round_pot(sizeof(struct vmw_shader)) + 128;
|
||||
|
||||
ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
|
||||
vmw_shader_size,
|
||||
false, true);
|
||||
if (unlikely(ret != 0)) {
|
||||
if (ret != -ERESTARTSYS)
|
||||
DRM_ERROR("Out of graphics memory for shader "
|
||||
"creation.\n");
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
shader = kzalloc(sizeof(*shader), GFP_KERNEL);
|
||||
if (unlikely(shader == NULL)) {
|
||||
ttm_mem_global_free(vmw_mem_glob(dev_priv),
|
||||
vmw_shader_size);
|
||||
ret = -ENOMEM;
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
res = &shader->res;
|
||||
|
||||
/*
|
||||
* From here on, the destructor takes over resource freeing.
|
||||
*/
|
||||
ret = vmw_gb_shader_init(dev_priv, res, shader_size,
|
||||
offset, shader_type, buffer,
|
||||
vmw_shader_free);
|
||||
|
||||
out_err:
|
||||
return ret ? ERR_PTR(ret) : res;
|
||||
}
|
||||
|
||||
|
||||
int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
|
@ -490,8 +505,8 @@ int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
|
|||
if (unlikely(ret != 0))
|
||||
goto out_bad_arg;
|
||||
|
||||
ret = vmw_shader_alloc(dev_priv, buffer, arg->size, arg->offset,
|
||||
shader_type, tfile, &arg->shader_handle);
|
||||
ret = vmw_user_shader_alloc(dev_priv, buffer, arg->size, arg->offset,
|
||||
shader_type, tfile, &arg->shader_handle);
|
||||
|
||||
ttm_read_unlock(&dev_priv->reservation_sem);
|
||||
out_bad_arg:
|
||||
|
@ -500,202 +515,83 @@ out_bad_arg:
|
|||
}
|
||||
|
||||
/**
|
||||
* vmw_compat_shader_lookup - Look up a compat shader
|
||||
* vmw_compat_shader_id_ok - Check whether a compat shader user key and
|
||||
* shader type are within valid bounds.
|
||||
*
|
||||
* @man: Pointer to the compat shader manager.
|
||||
* @shader_type: The shader type, that combined with the user_key identifies
|
||||
* the shader.
|
||||
* @user_key: On entry, this should be a pointer to the user_key.
|
||||
* On successful exit, it will contain the guest-backed shader's TTM handle.
|
||||
* @user_key: User space id of the shader.
|
||||
* @shader_type: Shader type.
|
||||
*
|
||||
* Returns 0 on success. Non-zero on failure, in which case the value pointed
|
||||
* to by @user_key is unmodified.
|
||||
* Returns true if valid false if not.
|
||||
*/
|
||||
int vmw_compat_shader_lookup(struct vmw_compat_shader_manager *man,
|
||||
SVGA3dShaderType shader_type,
|
||||
u32 *user_key)
|
||||
static bool vmw_compat_shader_id_ok(u32 user_key, SVGA3dShaderType shader_type)
|
||||
{
|
||||
struct drm_hash_item *hash;
|
||||
int ret;
|
||||
unsigned long key = *user_key | (shader_type << 24);
|
||||
|
||||
ret = drm_ht_find_item(&man->shaders, key, &hash);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
|
||||
*user_key = drm_hash_entry(hash, struct vmw_compat_shader,
|
||||
hash)->handle;
|
||||
|
||||
return 0;
|
||||
return user_key <= ((1 << 20) - 1) && (unsigned) shader_type < 16;
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_compat_shader_free - Free a compat shader.
|
||||
* vmw_compat_shader_key - Compute a hash key suitable for a compat shader.
|
||||
*
|
||||
* @man: Pointer to the compat shader manager.
|
||||
* @entry: Pointer to a struct vmw_compat_shader.
|
||||
* @user_key: User space id of the shader.
|
||||
* @shader_type: Shader type.
|
||||
*
|
||||
* Frees a struct vmw_compat_shder entry and drops its reference to the
|
||||
* guest backed shader.
|
||||
* Returns a hash key suitable for a command buffer managed resource
|
||||
* manager hash table.
|
||||
*/
|
||||
static void vmw_compat_shader_free(struct vmw_compat_shader_manager *man,
|
||||
struct vmw_compat_shader *entry)
|
||||
static u32 vmw_compat_shader_key(u32 user_key, SVGA3dShaderType shader_type)
|
||||
{
|
||||
list_del(&entry->head);
|
||||
WARN_ON(drm_ht_remove_item(&man->shaders, &entry->hash));
|
||||
WARN_ON(ttm_ref_object_base_unref(entry->tfile, entry->handle,
|
||||
TTM_REF_USAGE));
|
||||
kfree(entry);
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_compat_shaders_commit - Commit a list of compat shader actions.
|
||||
*
|
||||
* @man: Pointer to the compat shader manager.
|
||||
* @list: Caller's list of compat shader actions.
|
||||
*
|
||||
* This function commits a list of compat shader additions or removals.
|
||||
* It is typically called when the execbuf ioctl call triggering these
|
||||
* actions has commited the fifo contents to the device.
|
||||
*/
|
||||
void vmw_compat_shaders_commit(struct vmw_compat_shader_manager *man,
|
||||
struct list_head *list)
|
||||
{
|
||||
struct vmw_compat_shader *entry, *next;
|
||||
|
||||
list_for_each_entry_safe(entry, next, list, head) {
|
||||
list_del(&entry->head);
|
||||
switch (entry->state) {
|
||||
case VMW_COMPAT_ADD:
|
||||
entry->state = VMW_COMPAT_COMMITED;
|
||||
list_add_tail(&entry->head, &man->list);
|
||||
break;
|
||||
case VMW_COMPAT_DEL:
|
||||
ttm_ref_object_base_unref(entry->tfile, entry->handle,
|
||||
TTM_REF_USAGE);
|
||||
kfree(entry);
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_compat_shaders_revert - Revert a list of compat shader actions
|
||||
*
|
||||
* @man: Pointer to the compat shader manager.
|
||||
* @list: Caller's list of compat shader actions.
|
||||
*
|
||||
* This function reverts a list of compat shader additions or removals.
|
||||
* It is typically called when the execbuf ioctl call triggering these
|
||||
* actions failed for some reason, and the command stream was never
|
||||
* submitted.
|
||||
*/
|
||||
void vmw_compat_shaders_revert(struct vmw_compat_shader_manager *man,
|
||||
struct list_head *list)
|
||||
{
|
||||
struct vmw_compat_shader *entry, *next;
|
||||
int ret;
|
||||
|
||||
list_for_each_entry_safe(entry, next, list, head) {
|
||||
switch (entry->state) {
|
||||
case VMW_COMPAT_ADD:
|
||||
vmw_compat_shader_free(man, entry);
|
||||
break;
|
||||
case VMW_COMPAT_DEL:
|
||||
ret = drm_ht_insert_item(&man->shaders, &entry->hash);
|
||||
list_del(&entry->head);
|
||||
list_add_tail(&entry->head, &man->list);
|
||||
entry->state = VMW_COMPAT_COMMITED;
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
break;
|
||||
}
|
||||
}
|
||||
return user_key | (shader_type << 20);
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_compat_shader_remove - Stage a compat shader for removal.
|
||||
*
|
||||
* @man: Pointer to the compat shader manager
|
||||
* @man: Pointer to the compat shader manager identifying the shader namespace.
|
||||
* @user_key: The key that is used to identify the shader. The key is
|
||||
* unique to the shader type.
|
||||
* @shader_type: Shader type.
|
||||
* @list: Caller's list of staged shader actions.
|
||||
*
|
||||
* This function stages a compat shader for removal and removes the key from
|
||||
* the shader manager's hash table. If the shader was previously only staged
|
||||
* for addition it is completely removed (But the execbuf code may keep a
|
||||
* reference if it was bound to a context between addition and removal). If
|
||||
* it was previously commited to the manager, it is staged for removal.
|
||||
* @list: Caller's list of staged command buffer resource actions.
|
||||
*/
|
||||
int vmw_compat_shader_remove(struct vmw_compat_shader_manager *man,
|
||||
int vmw_compat_shader_remove(struct vmw_cmdbuf_res_manager *man,
|
||||
u32 user_key, SVGA3dShaderType shader_type,
|
||||
struct list_head *list)
|
||||
{
|
||||
struct vmw_compat_shader *entry;
|
||||
struct drm_hash_item *hash;
|
||||
int ret;
|
||||
|
||||
ret = drm_ht_find_item(&man->shaders, user_key | (shader_type << 24),
|
||||
&hash);
|
||||
if (likely(ret != 0))
|
||||
if (!vmw_compat_shader_id_ok(user_key, shader_type))
|
||||
return -EINVAL;
|
||||
|
||||
entry = drm_hash_entry(hash, struct vmw_compat_shader, hash);
|
||||
|
||||
switch (entry->state) {
|
||||
case VMW_COMPAT_ADD:
|
||||
vmw_compat_shader_free(man, entry);
|
||||
break;
|
||||
case VMW_COMPAT_COMMITED:
|
||||
(void) drm_ht_remove_item(&man->shaders, &entry->hash);
|
||||
list_del(&entry->head);
|
||||
entry->state = VMW_COMPAT_DEL;
|
||||
list_add_tail(&entry->head, list);
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
break;
|
||||
}
|
||||
|
||||
return 0;
|
||||
return vmw_cmdbuf_res_remove(man, vmw_cmdbuf_res_compat_shader,
|
||||
vmw_compat_shader_key(user_key,
|
||||
shader_type),
|
||||
list);
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_compat_shader_add - Create a compat shader and add the
|
||||
* key to the manager
|
||||
* vmw_compat_shader_add - Create a compat shader and stage it for addition
|
||||
* as a command buffer managed resource.
|
||||
*
|
||||
* @man: Pointer to the compat shader manager
|
||||
* @man: Pointer to the compat shader manager identifying the shader namespace.
|
||||
* @user_key: The key that is used to identify the shader. The key is
|
||||
* unique to the shader type.
|
||||
* @bytecode: Pointer to the bytecode of the shader.
|
||||
* @shader_type: Shader type.
|
||||
* @tfile: Pointer to a struct ttm_object_file that the guest-backed shader is
|
||||
* to be created with.
|
||||
* @list: Caller's list of staged shader actions.
|
||||
* @list: Caller's list of staged command buffer resource actions.
|
||||
*
|
||||
* Note that only the key is added to the shader manager's hash table.
|
||||
* The shader is not yet added to the shader manager's list of shaders.
|
||||
*/
|
||||
int vmw_compat_shader_add(struct vmw_compat_shader_manager *man,
|
||||
int vmw_compat_shader_add(struct vmw_private *dev_priv,
|
||||
struct vmw_cmdbuf_res_manager *man,
|
||||
u32 user_key, const void *bytecode,
|
||||
SVGA3dShaderType shader_type,
|
||||
size_t size,
|
||||
struct ttm_object_file *tfile,
|
||||
struct list_head *list)
|
||||
{
|
||||
struct vmw_dma_buffer *buf;
|
||||
struct ttm_bo_kmap_obj map;
|
||||
bool is_iomem;
|
||||
struct vmw_compat_shader *compat;
|
||||
u32 handle;
|
||||
int ret;
|
||||
struct vmw_resource *res;
|
||||
|
||||
if (user_key > ((1 << 24) - 1) || (unsigned) shader_type > 16)
|
||||
if (!vmw_compat_shader_id_ok(user_key, shader_type))
|
||||
return -EINVAL;
|
||||
|
||||
/* Allocate and pin a DMA buffer */
|
||||
|
@ -703,7 +599,7 @@ int vmw_compat_shader_add(struct vmw_compat_shader_manager *man,
|
|||
if (unlikely(buf == NULL))
|
||||
return -ENOMEM;
|
||||
|
||||
ret = vmw_dmabuf_init(man->dev_priv, buf, size, &vmw_sys_ne_placement,
|
||||
ret = vmw_dmabuf_init(dev_priv, buf, size, &vmw_sys_ne_placement,
|
||||
true, vmw_dmabuf_bo_free);
|
||||
if (unlikely(ret != 0))
|
||||
goto out;
|
||||
|
@ -728,84 +624,40 @@ int vmw_compat_shader_add(struct vmw_compat_shader_manager *man,
|
|||
WARN_ON(ret != 0);
|
||||
ttm_bo_unreserve(&buf->base);
|
||||
|
||||
/* Create a guest-backed shader container backed by the dma buffer */
|
||||
ret = vmw_shader_alloc(man->dev_priv, buf, size, 0, shader_type,
|
||||
tfile, &handle);
|
||||
vmw_dmabuf_unreference(&buf);
|
||||
res = vmw_shader_alloc(dev_priv, buf, size, 0, shader_type);
|
||||
if (unlikely(ret != 0))
|
||||
goto no_reserve;
|
||||
/*
|
||||
* Create a compat shader structure and stage it for insertion
|
||||
* in the manager
|
||||
*/
|
||||
compat = kzalloc(sizeof(*compat), GFP_KERNEL);
|
||||
if (compat == NULL)
|
||||
goto no_compat;
|
||||
|
||||
compat->hash.key = user_key | (shader_type << 24);
|
||||
ret = drm_ht_insert_item(&man->shaders, &compat->hash);
|
||||
if (unlikely(ret != 0))
|
||||
goto out_invalid_key;
|
||||
|
||||
compat->state = VMW_COMPAT_ADD;
|
||||
compat->handle = handle;
|
||||
compat->tfile = tfile;
|
||||
list_add_tail(&compat->head, list);
|
||||
|
||||
return 0;
|
||||
|
||||
out_invalid_key:
|
||||
kfree(compat);
|
||||
no_compat:
|
||||
ttm_ref_object_base_unref(tfile, handle, TTM_REF_USAGE);
|
||||
ret = vmw_cmdbuf_res_add(man, vmw_cmdbuf_res_compat_shader,
|
||||
vmw_compat_shader_key(user_key, shader_type),
|
||||
res, list);
|
||||
vmw_resource_unreference(&res);
|
||||
no_reserve:
|
||||
vmw_dmabuf_unreference(&buf);
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_compat_shader_man_create - Create a compat shader manager
|
||||
* vmw_compat_shader_lookup - Look up a compat shader
|
||||
*
|
||||
* @dev_priv: Pointer to a device private structure.
|
||||
* @man: Pointer to the command buffer managed resource manager identifying
|
||||
* the shader namespace.
|
||||
* @user_key: The user space id of the shader.
|
||||
* @shader_type: The shader type.
|
||||
*
|
||||
* Typically done at file open time. If successful returns a pointer to a
|
||||
* compat shader manager. Otherwise returns an error pointer.
|
||||
* Returns a refcounted pointer to a struct vmw_resource if the shader was
|
||||
* found. An error pointer otherwise.
|
||||
*/
|
||||
struct vmw_compat_shader_manager *
|
||||
vmw_compat_shader_man_create(struct vmw_private *dev_priv)
|
||||
struct vmw_resource *
|
||||
vmw_compat_shader_lookup(struct vmw_cmdbuf_res_manager *man,
|
||||
u32 user_key,
|
||||
SVGA3dShaderType shader_type)
|
||||
{
|
||||
struct vmw_compat_shader_manager *man;
|
||||
int ret;
|
||||
if (!vmw_compat_shader_id_ok(user_key, shader_type))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
man = kzalloc(sizeof(*man), GFP_KERNEL);
|
||||
if (man == NULL)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
man->dev_priv = dev_priv;
|
||||
INIT_LIST_HEAD(&man->list);
|
||||
ret = drm_ht_create(&man->shaders, VMW_COMPAT_SHADER_HT_ORDER);
|
||||
if (ret == 0)
|
||||
return man;
|
||||
|
||||
kfree(man);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_compat_shader_man_destroy - Destroy a compat shader manager
|
||||
*
|
||||
* @man: Pointer to the shader manager to destroy.
|
||||
*
|
||||
* Typically done at file close time.
|
||||
*/
|
||||
void vmw_compat_shader_man_destroy(struct vmw_compat_shader_manager *man)
|
||||
{
|
||||
struct vmw_compat_shader *entry, *next;
|
||||
|
||||
mutex_lock(&man->dev_priv->cmdbuf_mutex);
|
||||
list_for_each_entry_safe(entry, next, &man->list, head)
|
||||
vmw_compat_shader_free(man, entry);
|
||||
|
||||
mutex_unlock(&man->dev_priv->cmdbuf_mutex);
|
||||
kfree(man);
|
||||
return vmw_cmdbuf_res_lookup(man, vmw_cmdbuf_res_compat_shader,
|
||||
vmw_compat_shader_key(user_key,
|
||||
shader_type));
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue