habanalabs: verify that kernel CB is destroyed only once
Remove the distinction between user CB and kernel CB, and verify for both that they are not destroyed more than once. As kernel CB might be taken from the pre-allocated CB pool, so we need to clear the handle destroyed indication when returning a CB to the pool. Signed-off-by: Tomer Tayar <ttayar@habana.ai> Reviewed-by: Oded Gabbay <ogabbay@kernel.org> Signed-off-by: Oded Gabbay <ogabbay@kernel.org>
This commit is contained in:
parent
20faaeec37
commit
e2a079a206
|
@ -88,6 +88,7 @@ static void cb_fini(struct hl_device *hdev, struct hl_cb *cb)
|
|||
static void cb_do_release(struct hl_device *hdev, struct hl_cb *cb)
|
||||
{
|
||||
if (cb->is_pool) {
|
||||
atomic_set(&cb->is_handle_destroyed, 0);
|
||||
spin_lock(&hdev->cb_pool_lock);
|
||||
list_add(&cb->pool_list, &hdev->cb_pool);
|
||||
spin_unlock(&hdev->cb_pool_lock);
|
||||
|
@ -301,28 +302,23 @@ int hl_cb_destroy(struct hl_mem_mgr *mmg, u64 cb_handle)
|
|||
struct hl_cb *cb;
|
||||
int rc;
|
||||
|
||||
/* Make sure that a CB handle isn't destroyed by user more than once */
|
||||
if (!mmg->is_kernel_mem_mgr) {
|
||||
cb = hl_cb_get(mmg, cb_handle);
|
||||
if (!cb) {
|
||||
dev_dbg(mmg->dev, "CB destroy failed, no CB was found for handle %#llx\n",
|
||||
cb_handle);
|
||||
rc = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
cb = hl_cb_get(mmg, cb_handle);
|
||||
if (!cb) {
|
||||
dev_dbg(mmg->dev, "CB destroy failed, no CB was found for handle %#llx\n",
|
||||
cb_handle);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
rc = atomic_cmpxchg(&cb->is_handle_destroyed, 0, 1);
|
||||
hl_cb_put(cb);
|
||||
if (rc) {
|
||||
dev_dbg(mmg->dev, "CB destroy failed, handle %#llx was already destroyed\n",
|
||||
cb_handle);
|
||||
rc = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
/* Make sure that CB handle isn't destroyed more than once */
|
||||
rc = atomic_cmpxchg(&cb->is_handle_destroyed, 0, 1);
|
||||
hl_cb_put(cb);
|
||||
if (rc) {
|
||||
dev_dbg(mmg->dev, "CB destroy failed, handle %#llx was already destroyed\n",
|
||||
cb_handle);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
rc = hl_mmap_mem_buf_put_handle(mmg, cb_handle);
|
||||
out:
|
||||
if (rc < 0)
|
||||
return rc; /* Invalid handle */
|
||||
|
||||
|
|
|
@ -855,7 +855,7 @@ static int device_early_init(struct hl_device *hdev)
|
|||
if (rc)
|
||||
goto free_chip_info;
|
||||
|
||||
hl_mem_mgr_init(hdev->dev, &hdev->kernel_mem_mgr, 1);
|
||||
hl_mem_mgr_init(hdev->dev, &hdev->kernel_mem_mgr);
|
||||
|
||||
hdev->reset_wq = create_singlethread_workqueue("hl_device_reset");
|
||||
if (!hdev->reset_wq) {
|
||||
|
|
|
@ -876,13 +876,11 @@ struct hl_mmap_mem_buf;
|
|||
* @dev: back pointer to the owning device
|
||||
* @lock: protects handles
|
||||
* @handles: an idr holding all active handles to the memory buffers in the system.
|
||||
* @is_kernel_mem_mgr: indicate whether the memory manager is the per-device kernel memory manager
|
||||
*/
|
||||
struct hl_mem_mgr {
|
||||
struct device *dev;
|
||||
spinlock_t lock;
|
||||
struct idr handles;
|
||||
u8 is_kernel_mem_mgr;
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -3824,7 +3822,7 @@ __printf(4, 5) int hl_snprintf_resize(char **buf, size_t *size, size_t *offset,
|
|||
char *hl_format_as_binary(char *buf, size_t buf_len, u32 n);
|
||||
const char *hl_sync_engine_to_string(enum hl_sync_engine_type engine_type);
|
||||
|
||||
void hl_mem_mgr_init(struct device *dev, struct hl_mem_mgr *mmg, u8 is_kernel_mem_mgr);
|
||||
void hl_mem_mgr_init(struct device *dev, struct hl_mem_mgr *mmg);
|
||||
void hl_mem_mgr_fini(struct hl_mem_mgr *mmg);
|
||||
int hl_mem_mgr_mmap(struct hl_mem_mgr *mmg, struct vm_area_struct *vma,
|
||||
void *args);
|
||||
|
|
|
@ -164,7 +164,7 @@ int hl_device_open(struct inode *inode, struct file *filp)
|
|||
nonseekable_open(inode, filp);
|
||||
|
||||
hl_ctx_mgr_init(&hpriv->ctx_mgr);
|
||||
hl_mem_mgr_init(hpriv->hdev->dev, &hpriv->mem_mgr, 0);
|
||||
hl_mem_mgr_init(hpriv->hdev->dev, &hpriv->mem_mgr);
|
||||
|
||||
hpriv->taskpid = get_task_pid(current, PIDTYPE_PID);
|
||||
|
||||
|
|
|
@ -308,16 +308,14 @@ put_mem:
|
|||
*
|
||||
* @dev: owner device pointer
|
||||
* @mmg: structure to initialize
|
||||
* @is_kernel_mem_mgr: indicate whether the memory manager is the per-device kernel memory manager
|
||||
*
|
||||
* Initialize an instance of unified memory manager
|
||||
*/
|
||||
void hl_mem_mgr_init(struct device *dev, struct hl_mem_mgr *mmg, u8 is_kernel_mem_mgr)
|
||||
void hl_mem_mgr_init(struct device *dev, struct hl_mem_mgr *mmg)
|
||||
{
|
||||
mmg->dev = dev;
|
||||
spin_lock_init(&mmg->lock);
|
||||
idr_init(&mmg->handles);
|
||||
mmg->is_kernel_mem_mgr = is_kernel_mem_mgr;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
Loading…
Reference in New Issue