2019-02-16 06:39:21 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
|
|
|
|
|
|
|
/*
|
2022-06-26 23:20:03 +08:00
|
|
|
* Copyright 2016-2022 HabanaLabs, Ltd.
|
2019-02-16 06:39:21 +08:00
|
|
|
* All Rights Reserved.
|
|
|
|
*/
|
|
|
|
|
2019-02-16 06:39:22 +08:00
|
|
|
#include <uapi/misc/habanalabs.h>
|
2019-02-16 06:39:21 +08:00
|
|
|
#include "habanalabs.h"
|
2020-07-29 01:18:51 +08:00
|
|
|
#include "../include/hw_ip/mmu/mmu_general.h"
|
2019-02-16 06:39:21 +08:00
|
|
|
|
|
|
|
#include <linux/uaccess.h>
|
|
|
|
#include <linux/slab.h>
|
2022-02-02 16:36:29 +08:00
|
|
|
#include <linux/vmalloc.h>
|
2021-06-15 15:55:43 +08:00
|
|
|
#include <linux/pci-p2pdma.h>
|
2019-02-16 06:39:22 +08:00
|
|
|
|
2021-10-10 20:46:28 +08:00
|
|
|
MODULE_IMPORT_NS(DMA_BUF);
|
|
|
|
|
2019-02-16 06:39:22 +08:00
|
|
|
#define HL_MMU_DEBUG 0
|
|
|
|
|
2020-11-19 02:15:29 +08:00
|
|
|
/* use small pages for supporting non-pow2 (32M/40M/48M) DRAM phys page sizes */
|
|
|
|
#define DRAM_POOL_PAGE_SIZE SZ_8M
|
|
|
|
|
2021-12-23 19:24:34 +08:00
|
|
|
static int allocate_timestamps_buffers(struct hl_fpriv *hpriv,
|
|
|
|
struct hl_mem_in *args, u64 *handle);
|
|
|
|
|
2022-02-14 21:22:00 +08:00
|
|
|
static int set_alloc_page_size(struct hl_device *hdev, struct hl_mem_in *args, u32 *page_size)
|
|
|
|
{
|
|
|
|
struct asic_fixed_properties *prop = &hdev->asic_prop;
|
2022-06-26 04:36:13 +08:00
|
|
|
u64 psize;
|
2022-02-14 21:22:00 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* for ASIC that supports setting the allocation page size by user we will address
|
|
|
|
* user's choice only if it is not 0 (as 0 means taking the default page size)
|
|
|
|
*/
|
|
|
|
if (prop->supports_user_set_page_size && args->alloc.page_size) {
|
|
|
|
psize = args->alloc.page_size;
|
|
|
|
|
2022-06-22 20:38:56 +08:00
|
|
|
if (!is_power_of_2(psize)) {
|
2022-06-26 04:36:13 +08:00
|
|
|
dev_err(hdev->dev, "user page size (%#llx) is not power of 2\n", psize);
|
2022-02-14 21:22:00 +08:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
} else {
|
2022-02-23 17:27:36 +08:00
|
|
|
psize = prop->device_mem_alloc_default_page_size;
|
2022-02-14 21:22:00 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
*page_size = psize;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-02-16 06:39:22 +08:00
|
|
|
/*
|
|
|
|
* The va ranges in context object contain a list with the available chunks of
|
|
|
|
* device virtual memory.
|
|
|
|
* There is one range for host allocations and one for DRAM allocations.
|
|
|
|
*
|
|
|
|
* On initialization each range contains one chunk of all of its available
|
|
|
|
* virtual range which is a half of the total device virtual range.
|
|
|
|
*
|
|
|
|
* On each mapping of physical pages, a suitable virtual range chunk (with a
|
|
|
|
* minimum size) is selected from the list. If the chunk size equals the
|
|
|
|
* requested size, the chunk is returned. Otherwise, the chunk is split into
|
|
|
|
* two chunks - one to return as result and a remainder to stay in the list.
|
|
|
|
*
|
|
|
|
* On each Unmapping of a virtual address, the relevant virtual chunk is
|
|
|
|
* returned to the list. The chunk is added to the list and if its edges match
|
|
|
|
* the edges of the adjacent chunks (means a contiguous chunk can be created),
|
|
|
|
* the chunks are merged.
|
|
|
|
*
|
|
|
|
* On finish, the list is checked to have only one chunk of all the relevant
|
|
|
|
* virtual range (which is a half of the device total virtual range).
|
|
|
|
* If not (means not all mappings were unmapped), a warning is printed.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
2020-12-09 19:28:46 +08:00
|
|
|
* alloc_device_memory() - allocate device memory.
|
|
|
|
* @ctx: pointer to the context structure.
|
|
|
|
* @args: host parameters containing the requested size.
|
|
|
|
* @ret_handle: result handle.
|
2019-02-16 06:39:22 +08:00
|
|
|
*
|
|
|
|
* This function does the following:
|
2020-12-09 19:28:46 +08:00
|
|
|
* - Allocate the requested size rounded up to 'dram_page_size' pages.
|
|
|
|
* - Return unique handle for later map/unmap/free.
|
2019-02-16 06:39:22 +08:00
|
|
|
*/
|
|
|
|
static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args,
|
|
|
|
u32 *ret_handle)
|
|
|
|
{
|
|
|
|
struct hl_device *hdev = ctx->hdev;
|
|
|
|
struct hl_vm *vm = &hdev->vm;
|
|
|
|
struct hl_vm_phys_pg_pack *phys_pg_pack;
|
2019-03-05 16:59:16 +08:00
|
|
|
u64 paddr = 0, total_size, num_pgs, i;
|
2020-11-19 02:15:29 +08:00
|
|
|
u32 num_curr_pgs, page_size;
|
2019-02-16 06:39:22 +08:00
|
|
|
bool contiguous;
|
2022-02-06 14:50:43 +08:00
|
|
|
int handle, rc;
|
2019-02-16 06:39:22 +08:00
|
|
|
|
|
|
|
num_curr_pgs = 0;
|
2022-02-14 21:22:00 +08:00
|
|
|
|
|
|
|
rc = set_alloc_page_size(hdev, args, &page_size);
|
|
|
|
if (rc)
|
|
|
|
return rc;
|
|
|
|
|
2020-11-19 02:15:29 +08:00
|
|
|
num_pgs = DIV_ROUND_UP_ULL(args->alloc.mem_size, page_size);
|
|
|
|
total_size = num_pgs * page_size;
|
2019-02-16 06:39:22 +08:00
|
|
|
|
2020-08-11 13:57:45 +08:00
|
|
|
if (!total_size) {
|
|
|
|
dev_err(hdev->dev, "Cannot allocate 0 bytes\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2019-02-16 06:39:22 +08:00
|
|
|
contiguous = args->flags & HL_MEM_CONTIGUOUS;
|
|
|
|
|
|
|
|
if (contiguous) {
|
2022-02-06 14:50:43 +08:00
|
|
|
if (is_power_of_2(page_size))
|
2022-04-04 21:48:59 +08:00
|
|
|
paddr = (uintptr_t) gen_pool_dma_alloc_align(vm->dram_pg_pool,
|
|
|
|
total_size, NULL, page_size);
|
2022-02-06 14:50:43 +08:00
|
|
|
else
|
2022-04-04 21:48:59 +08:00
|
|
|
paddr = gen_pool_alloc(vm->dram_pg_pool, total_size);
|
2019-02-16 06:39:22 +08:00
|
|
|
if (!paddr) {
|
|
|
|
dev_err(hdev->dev,
|
2022-03-22 15:21:29 +08:00
|
|
|
"Cannot allocate %llu contiguous pages with total size of %llu\n",
|
2020-09-23 19:07:32 +08:00
|
|
|
num_pgs, total_size);
|
2019-02-16 06:39:22 +08:00
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
phys_pg_pack = kzalloc(sizeof(*phys_pg_pack), GFP_KERNEL);
|
|
|
|
if (!phys_pg_pack) {
|
|
|
|
rc = -ENOMEM;
|
|
|
|
goto pages_pack_err;
|
|
|
|
}
|
|
|
|
|
|
|
|
phys_pg_pack->vm_type = VM_TYPE_PHYS_PACK;
|
|
|
|
phys_pg_pack->asid = ctx->asid;
|
|
|
|
phys_pg_pack->npages = num_pgs;
|
|
|
|
phys_pg_pack->page_size = page_size;
|
|
|
|
phys_pg_pack->total_size = total_size;
|
|
|
|
phys_pg_pack->flags = args->flags;
|
|
|
|
phys_pg_pack->contiguous = contiguous;
|
|
|
|
|
2019-03-07 21:47:19 +08:00
|
|
|
phys_pg_pack->pages = kvmalloc_array(num_pgs, sizeof(u64), GFP_KERNEL);
|
2020-08-11 13:57:45 +08:00
|
|
|
if (ZERO_OR_NULL_PTR(phys_pg_pack->pages)) {
|
2019-02-16 06:39:22 +08:00
|
|
|
rc = -ENOMEM;
|
|
|
|
goto pages_arr_err;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (phys_pg_pack->contiguous) {
|
|
|
|
for (i = 0 ; i < num_pgs ; i++)
|
|
|
|
phys_pg_pack->pages[i] = paddr + i * page_size;
|
|
|
|
} else {
|
|
|
|
for (i = 0 ; i < num_pgs ; i++) {
|
2022-02-06 14:50:43 +08:00
|
|
|
if (is_power_of_2(page_size))
|
|
|
|
phys_pg_pack->pages[i] =
|
2022-04-04 21:48:59 +08:00
|
|
|
(uintptr_t)gen_pool_dma_alloc_align(vm->dram_pg_pool,
|
|
|
|
page_size, NULL,
|
|
|
|
page_size);
|
2022-02-06 14:50:43 +08:00
|
|
|
else
|
2022-04-04 21:48:59 +08:00
|
|
|
phys_pg_pack->pages[i] = gen_pool_alloc(vm->dram_pg_pool,
|
|
|
|
page_size);
|
2022-03-22 15:21:29 +08:00
|
|
|
|
2019-02-16 06:39:22 +08:00
|
|
|
if (!phys_pg_pack->pages[i]) {
|
|
|
|
dev_err(hdev->dev,
|
2022-03-22 15:21:29 +08:00
|
|
|
"Cannot allocate device memory (out of memory)\n");
|
2019-02-16 06:39:22 +08:00
|
|
|
rc = -ENOMEM;
|
|
|
|
goto page_err;
|
|
|
|
}
|
|
|
|
|
|
|
|
num_curr_pgs++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
spin_lock(&vm->idr_lock);
|
|
|
|
handle = idr_alloc(&vm->phys_pg_pack_handles, phys_pg_pack, 1, 0,
|
2021-08-15 16:16:16 +08:00
|
|
|
GFP_ATOMIC);
|
2019-02-16 06:39:22 +08:00
|
|
|
spin_unlock(&vm->idr_lock);
|
|
|
|
|
|
|
|
if (handle < 0) {
|
|
|
|
dev_err(hdev->dev, "Failed to get handle for page\n");
|
|
|
|
rc = -EFAULT;
|
|
|
|
goto idr_err;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0 ; i < num_pgs ; i++)
|
|
|
|
kref_get(&vm->dram_pg_pool_refcount);
|
|
|
|
|
|
|
|
phys_pg_pack->handle = handle;
|
|
|
|
|
|
|
|
atomic64_add(phys_pg_pack->total_size, &ctx->dram_phys_mem);
|
|
|
|
atomic64_add(phys_pg_pack->total_size, &hdev->dram_used_mem);
|
|
|
|
|
|
|
|
*ret_handle = handle;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
idr_err:
|
|
|
|
page_err:
|
|
|
|
if (!phys_pg_pack->contiguous)
|
|
|
|
for (i = 0 ; i < num_curr_pgs ; i++)
|
|
|
|
gen_pool_free(vm->dram_pg_pool, phys_pg_pack->pages[i],
|
|
|
|
page_size);
|
|
|
|
|
2019-03-07 21:47:19 +08:00
|
|
|
kvfree(phys_pg_pack->pages);
|
2019-02-16 06:39:22 +08:00
|
|
|
pages_arr_err:
|
|
|
|
kfree(phys_pg_pack);
|
|
|
|
pages_pack_err:
|
|
|
|
if (contiguous)
|
|
|
|
gen_pool_free(vm->dram_pg_pool, paddr, total_size);
|
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2020-12-09 19:28:46 +08:00
|
|
|
/**
|
|
|
|
* dma_map_host_va() - DMA mapping of the given host virtual address.
|
|
|
|
* @hdev: habanalabs device structure.
|
|
|
|
* @addr: the host virtual address of the memory area.
|
|
|
|
* @size: the size of the memory area.
|
|
|
|
* @p_userptr: pointer to result userptr structure.
|
2019-02-16 06:39:22 +08:00
|
|
|
*
|
|
|
|
* This function does the following:
|
2020-12-09 19:28:46 +08:00
|
|
|
* - Allocate userptr structure.
|
|
|
|
* - Pin the given host memory using the userptr structure.
|
|
|
|
* - Perform DMA mapping to have the DMA addresses of the pages.
|
2019-02-16 06:39:22 +08:00
|
|
|
*/
|
2019-08-12 16:48:46 +08:00
|
|
|
static int dma_map_host_va(struct hl_device *hdev, u64 addr, u64 size,
|
|
|
|
struct hl_userptr **p_userptr)
|
2019-02-16 06:39:22 +08:00
|
|
|
{
|
|
|
|
struct hl_userptr *userptr;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
userptr = kzalloc(sizeof(*userptr), GFP_KERNEL);
|
|
|
|
if (!userptr) {
|
|
|
|
rc = -ENOMEM;
|
|
|
|
goto userptr_err;
|
|
|
|
}
|
|
|
|
|
2019-08-12 16:48:46 +08:00
|
|
|
rc = hl_pin_host_memory(hdev, addr, size, userptr);
|
2019-02-16 06:39:22 +08:00
|
|
|
if (rc) {
|
|
|
|
dev_err(hdev->dev, "Failed to pin host memory\n");
|
|
|
|
goto pin_err;
|
|
|
|
}
|
|
|
|
|
|
|
|
userptr->dma_mapped = true;
|
|
|
|
userptr->dir = DMA_BIDIRECTIONAL;
|
|
|
|
userptr->vm_type = VM_TYPE_USERPTR;
|
|
|
|
|
|
|
|
*p_userptr = userptr;
|
|
|
|
|
2022-03-24 22:34:49 +08:00
|
|
|
rc = hdev->asic_funcs->asic_dma_map_sgtable(hdev, userptr->sgt, DMA_BIDIRECTIONAL);
|
|
|
|
if (rc) {
|
|
|
|
dev_err(hdev->dev, "failed to map sgt with DMA region\n");
|
|
|
|
goto dma_map_err;
|
|
|
|
}
|
|
|
|
|
2019-02-16 06:39:22 +08:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
dma_map_err:
|
|
|
|
hl_unpin_host_memory(hdev, userptr);
|
|
|
|
pin_err:
|
|
|
|
kfree(userptr);
|
|
|
|
userptr_err:
|
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2020-12-09 19:28:46 +08:00
|
|
|
/**
|
|
|
|
* dma_unmap_host_va() - DMA unmapping of the given host virtual address.
|
|
|
|
* @hdev: habanalabs device structure.
|
|
|
|
* @userptr: userptr to free.
|
2019-02-16 06:39:22 +08:00
|
|
|
*
|
|
|
|
* This function does the following:
|
2020-12-09 19:28:46 +08:00
|
|
|
* - Unpins the physical pages.
|
|
|
|
* - Frees the userptr structure.
|
2019-02-16 06:39:22 +08:00
|
|
|
*/
|
2019-08-12 16:48:46 +08:00
|
|
|
static void dma_unmap_host_va(struct hl_device *hdev,
|
|
|
|
struct hl_userptr *userptr)
|
2019-02-16 06:39:22 +08:00
|
|
|
{
|
|
|
|
hl_unpin_host_memory(hdev, userptr);
|
|
|
|
kfree(userptr);
|
|
|
|
}
|
|
|
|
|
2020-12-09 19:28:46 +08:00
|
|
|
/**
|
|
|
|
* dram_pg_pool_do_release() - free DRAM pages pool
|
|
|
|
* @ref: pointer to reference object.
|
2019-02-16 06:39:22 +08:00
|
|
|
*
|
|
|
|
* This function does the following:
|
2020-12-09 19:28:46 +08:00
|
|
|
* - Frees the idr structure of physical pages handles.
|
|
|
|
* - Frees the generic pool of DRAM physical pages.
|
2019-02-16 06:39:22 +08:00
|
|
|
*/
|
|
|
|
static void dram_pg_pool_do_release(struct kref *ref)
|
|
|
|
{
|
|
|
|
struct hl_vm *vm = container_of(ref, struct hl_vm,
|
|
|
|
dram_pg_pool_refcount);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* free the idr here as only here we know for sure that there are no
|
|
|
|
* allocated physical pages and hence there are no handles in use
|
|
|
|
*/
|
|
|
|
idr_destroy(&vm->phys_pg_pack_handles);
|
|
|
|
gen_pool_destroy(vm->dram_pg_pool);
|
|
|
|
}
|
|
|
|
|
2020-12-09 19:28:46 +08:00
|
|
|
/**
|
|
|
|
* free_phys_pg_pack() - free physical page pack.
|
|
|
|
* @hdev: habanalabs device structure.
|
|
|
|
* @phys_pg_pack: physical page pack to free.
|
2019-02-16 06:39:22 +08:00
|
|
|
*
|
|
|
|
* This function does the following:
|
2021-03-18 18:11:19 +08:00
|
|
|
* - For DRAM memory only
|
2022-05-10 21:36:02 +08:00
|
|
|
* - iterate over the pack, free each physical block structure by
|
2021-03-18 18:11:19 +08:00
|
|
|
* returning it to the general pool.
|
2020-12-09 19:28:46 +08:00
|
|
|
* - Free the hl_vm_phys_pg_pack structure.
|
2019-02-16 06:39:22 +08:00
|
|
|
*/
|
2022-05-10 21:36:02 +08:00
|
|
|
static void free_phys_pg_pack(struct hl_device *hdev,
|
2019-08-12 16:48:46 +08:00
|
|
|
struct hl_vm_phys_pg_pack *phys_pg_pack)
|
2019-02-16 06:39:22 +08:00
|
|
|
{
|
|
|
|
struct hl_vm *vm = &hdev->vm;
|
2019-03-05 16:59:16 +08:00
|
|
|
u64 i;
|
2021-03-18 18:11:19 +08:00
|
|
|
|
|
|
|
if (phys_pg_pack->created_from_userptr)
|
|
|
|
goto end;
|
2019-02-16 06:39:22 +08:00
|
|
|
|
2021-03-18 18:11:19 +08:00
|
|
|
if (phys_pg_pack->contiguous) {
|
|
|
|
gen_pool_free(vm->dram_pg_pool, phys_pg_pack->pages[0],
|
|
|
|
phys_pg_pack->total_size);
|
|
|
|
|
|
|
|
for (i = 0; i < phys_pg_pack->npages ; i++)
|
|
|
|
kref_put(&vm->dram_pg_pool_refcount,
|
|
|
|
dram_pg_pool_do_release);
|
|
|
|
} else {
|
|
|
|
for (i = 0 ; i < phys_pg_pack->npages ; i++) {
|
|
|
|
gen_pool_free(vm->dram_pg_pool,
|
|
|
|
phys_pg_pack->pages[i],
|
|
|
|
phys_pg_pack->page_size);
|
|
|
|
kref_put(&vm->dram_pg_pool_refcount,
|
|
|
|
dram_pg_pool_do_release);
|
2019-02-16 06:39:22 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-18 18:11:19 +08:00
|
|
|
end:
|
2019-03-07 21:47:19 +08:00
|
|
|
kvfree(phys_pg_pack->pages);
|
2019-02-16 06:39:22 +08:00
|
|
|
kfree(phys_pg_pack);
|
2021-03-18 18:11:19 +08:00
|
|
|
|
2022-05-10 21:36:02 +08:00
|
|
|
return;
|
2019-02-16 06:39:22 +08:00
|
|
|
}
|
|
|
|
|
2020-12-09 19:28:46 +08:00
|
|
|
/**
|
|
|
|
* free_device_memory() - free device memory.
|
|
|
|
* @ctx: pointer to the context structure.
|
2020-12-09 19:34:11 +08:00
|
|
|
* @args: host parameters containing the requested size.
|
2019-02-16 06:39:22 +08:00
|
|
|
*
|
|
|
|
* This function does the following:
|
2020-12-09 19:28:46 +08:00
|
|
|
* - Free the device memory related to the given handle.
|
2019-02-16 06:39:22 +08:00
|
|
|
*/
|
2020-12-09 19:34:11 +08:00
|
|
|
static int free_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args)
|
2019-02-16 06:39:22 +08:00
|
|
|
{
|
|
|
|
struct hl_device *hdev = ctx->hdev;
|
|
|
|
struct hl_vm *vm = &hdev->vm;
|
|
|
|
struct hl_vm_phys_pg_pack *phys_pg_pack;
|
2020-12-09 19:34:11 +08:00
|
|
|
u32 handle = args->free.handle;
|
2019-02-16 06:39:22 +08:00
|
|
|
|
|
|
|
spin_lock(&vm->idr_lock);
|
|
|
|
phys_pg_pack = idr_find(&vm->phys_pg_pack_handles, handle);
|
2022-06-24 18:05:23 +08:00
|
|
|
if (!phys_pg_pack) {
|
2019-02-16 06:39:22 +08:00
|
|
|
spin_unlock(&vm->idr_lock);
|
2022-06-24 18:05:23 +08:00
|
|
|
dev_err(hdev->dev, "free device memory failed, no match for handle %u\n", handle);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
2019-02-16 06:39:22 +08:00
|
|
|
|
2022-06-24 18:05:23 +08:00
|
|
|
if (atomic_read(&phys_pg_pack->mapping_cnt) > 0) {
|
|
|
|
spin_unlock(&vm->idr_lock);
|
|
|
|
dev_err(hdev->dev, "handle %u is mapped, cannot free\n", handle);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
2019-02-16 06:39:22 +08:00
|
|
|
|
2022-06-24 18:05:23 +08:00
|
|
|
if (phys_pg_pack->exporting_cnt) {
|
2019-02-16 06:39:22 +08:00
|
|
|
spin_unlock(&vm->idr_lock);
|
2022-06-24 18:05:23 +08:00
|
|
|
dev_dbg(hdev->dev, "handle %u is exported, cannot free\n", handle);
|
2019-02-16 06:39:22 +08:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2022-06-24 18:05:23 +08:00
|
|
|
/* must remove from idr before the freeing of the physical pages as the refcount of the pool
|
|
|
|
* is also the trigger of the idr destroy
|
|
|
|
*/
|
|
|
|
idr_remove(&vm->phys_pg_pack_handles, handle);
|
|
|
|
spin_unlock(&vm->idr_lock);
|
|
|
|
|
|
|
|
atomic64_sub(phys_pg_pack->total_size, &ctx->dram_phys_mem);
|
|
|
|
atomic64_sub(phys_pg_pack->total_size, &hdev->dram_used_mem);
|
|
|
|
|
|
|
|
free_phys_pg_pack(hdev, phys_pg_pack);
|
|
|
|
|
2019-02-16 06:39:22 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-12-09 19:28:46 +08:00
|
|
|
/**
|
|
|
|
* clear_va_list_locked() - free virtual addresses list.
|
|
|
|
* @hdev: habanalabs device structure.
|
|
|
|
* @va_list: list of virtual addresses to free.
|
2019-02-16 06:39:22 +08:00
|
|
|
*
|
|
|
|
* This function does the following:
|
2020-12-09 19:28:46 +08:00
|
|
|
* - Iterate over the list and free each virtual addresses block.
|
2019-02-16 06:39:22 +08:00
|
|
|
*
|
2020-12-09 19:28:46 +08:00
|
|
|
* This function should be called only when va_list lock is taken.
|
2019-02-16 06:39:22 +08:00
|
|
|
*/
|
|
|
|
static void clear_va_list_locked(struct hl_device *hdev,
|
|
|
|
struct list_head *va_list)
|
|
|
|
{
|
|
|
|
struct hl_vm_va_block *va_block, *tmp;
|
|
|
|
|
|
|
|
list_for_each_entry_safe(va_block, tmp, va_list, node) {
|
|
|
|
list_del(&va_block->node);
|
|
|
|
kfree(va_block);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-12-09 19:28:46 +08:00
|
|
|
/**
|
|
|
|
* print_va_list_locked() - print virtual addresses list.
|
|
|
|
* @hdev: habanalabs device structure.
|
|
|
|
* @va_list: list of virtual addresses to print.
|
2019-02-16 06:39:22 +08:00
|
|
|
*
|
|
|
|
* This function does the following:
|
2020-12-09 19:28:46 +08:00
|
|
|
* - Iterate over the list and print each virtual addresses block.
|
2019-02-16 06:39:22 +08:00
|
|
|
*
|
2020-12-09 19:28:46 +08:00
|
|
|
* This function should be called only when va_list lock is taken.
|
2019-02-16 06:39:22 +08:00
|
|
|
*/
|
|
|
|
static void print_va_list_locked(struct hl_device *hdev,
|
|
|
|
struct list_head *va_list)
|
|
|
|
{
|
|
|
|
#if HL_MMU_DEBUG
|
|
|
|
struct hl_vm_va_block *va_block;
|
|
|
|
|
|
|
|
dev_dbg(hdev->dev, "print va list:\n");
|
|
|
|
|
|
|
|
list_for_each_entry(va_block, va_list, node)
|
|
|
|
dev_dbg(hdev->dev,
|
|
|
|
"va block, start: 0x%llx, end: 0x%llx, size: %llu\n",
|
|
|
|
va_block->start, va_block->end, va_block->size);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2020-12-09 19:28:46 +08:00
|
|
|
/**
|
|
|
|
* merge_va_blocks_locked() - merge a virtual block if possible.
|
|
|
|
* @hdev: pointer to the habanalabs device structure.
|
|
|
|
* @va_list: pointer to the virtual addresses block list.
|
|
|
|
* @va_block: virtual block to merge with adjacent blocks.
|
2019-02-16 06:39:22 +08:00
|
|
|
*
|
|
|
|
* This function does the following:
|
|
|
|
* - Merge the given blocks with the adjacent blocks if their virtual ranges
|
2020-12-09 19:28:46 +08:00
|
|
|
* create a contiguous virtual range.
|
2019-02-16 06:39:22 +08:00
|
|
|
*
|
2020-12-09 19:28:46 +08:00
|
|
|
* This Function should be called only when va_list lock is taken.
|
2019-02-16 06:39:22 +08:00
|
|
|
*/
|
|
|
|
static void merge_va_blocks_locked(struct hl_device *hdev,
|
|
|
|
struct list_head *va_list, struct hl_vm_va_block *va_block)
|
|
|
|
{
|
|
|
|
struct hl_vm_va_block *prev, *next;
|
|
|
|
|
|
|
|
prev = list_prev_entry(va_block, node);
|
|
|
|
if (&prev->node != va_list && prev->end + 1 == va_block->start) {
|
|
|
|
prev->end = va_block->end;
|
|
|
|
prev->size = prev->end - prev->start;
|
|
|
|
list_del(&va_block->node);
|
|
|
|
kfree(va_block);
|
|
|
|
va_block = prev;
|
|
|
|
}
|
|
|
|
|
|
|
|
next = list_next_entry(va_block, node);
|
|
|
|
if (&next->node != va_list && va_block->end + 1 == next->start) {
|
|
|
|
next->start = va_block->start;
|
|
|
|
next->size = next->end - next->start;
|
|
|
|
list_del(&va_block->node);
|
|
|
|
kfree(va_block);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-12-09 19:28:46 +08:00
|
|
|
/**
|
|
|
|
* add_va_block_locked() - add a virtual block to the virtual addresses list.
|
|
|
|
* @hdev: pointer to the habanalabs device structure.
|
|
|
|
* @va_list: pointer to the virtual addresses block list.
|
|
|
|
* @start: start virtual address.
|
|
|
|
* @end: end virtual address.
|
2019-02-16 06:39:22 +08:00
|
|
|
*
|
|
|
|
* This function does the following:
|
2020-12-09 19:28:46 +08:00
|
|
|
* - Add the given block to the virtual blocks list and merge with other blocks
|
|
|
|
* if a contiguous virtual block can be created.
|
2019-02-16 06:39:22 +08:00
|
|
|
*
|
2020-12-09 19:28:46 +08:00
|
|
|
* This Function should be called only when va_list lock is taken.
|
2019-02-16 06:39:22 +08:00
|
|
|
*/
|
|
|
|
static int add_va_block_locked(struct hl_device *hdev,
|
|
|
|
struct list_head *va_list, u64 start, u64 end)
|
|
|
|
{
|
|
|
|
struct hl_vm_va_block *va_block, *res = NULL;
|
2021-10-14 15:33:27 +08:00
|
|
|
u64 size = end - start + 1;
|
2019-02-16 06:39:22 +08:00
|
|
|
|
|
|
|
print_va_list_locked(hdev, va_list);
|
|
|
|
|
|
|
|
list_for_each_entry(va_block, va_list, node) {
|
|
|
|
/* TODO: remove upon matureness */
|
|
|
|
if (hl_mem_area_crosses_range(start, size, va_block->start,
|
|
|
|
va_block->end)) {
|
|
|
|
dev_err(hdev->dev,
|
|
|
|
"block crossing ranges at start 0x%llx, end 0x%llx\n",
|
|
|
|
va_block->start, va_block->end);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (va_block->end < start)
|
|
|
|
res = va_block;
|
|
|
|
}
|
|
|
|
|
|
|
|
va_block = kmalloc(sizeof(*va_block), GFP_KERNEL);
|
|
|
|
if (!va_block)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
va_block->start = start;
|
|
|
|
va_block->end = end;
|
|
|
|
va_block->size = size;
|
|
|
|
|
|
|
|
if (!res)
|
|
|
|
list_add(&va_block->node, va_list);
|
|
|
|
else
|
|
|
|
list_add(&va_block->node, &res->node);
|
|
|
|
|
|
|
|
merge_va_blocks_locked(hdev, va_list, va_block);
|
|
|
|
|
|
|
|
print_va_list_locked(hdev, va_list);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-12-09 19:28:46 +08:00
|
|
|
/**
|
|
|
|
* add_va_block() - wrapper for add_va_block_locked.
|
|
|
|
* @hdev: pointer to the habanalabs device structure.
|
2021-12-19 17:38:01 +08:00
|
|
|
* @va_range: pointer to the virtual addresses range object.
|
2020-12-09 19:28:46 +08:00
|
|
|
* @start: start virtual address.
|
|
|
|
* @end: end virtual address.
|
2019-02-16 06:39:22 +08:00
|
|
|
*
|
|
|
|
* This function does the following:
|
2020-12-09 19:28:46 +08:00
|
|
|
* - Takes the list lock and calls add_va_block_locked.
|
2019-02-16 06:39:22 +08:00
|
|
|
*/
|
|
|
|
static inline int add_va_block(struct hl_device *hdev,
|
|
|
|
struct hl_va_range *va_range, u64 start, u64 end)
|
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
mutex_lock(&va_range->lock);
|
|
|
|
rc = add_va_block_locked(hdev, &va_range->list, start, end);
|
|
|
|
mutex_unlock(&va_range->lock);
|
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2021-02-01 00:56:03 +08:00
|
|
|
/**
|
2021-12-19 17:38:01 +08:00
|
|
|
* is_hint_crossing_range() - check if hint address crossing specified reserved.
|
|
|
|
* @range_type: virtual space range type.
|
|
|
|
* @start_addr: start virtual address.
|
|
|
|
* @size: block size.
|
|
|
|
* @prop: asic properties structure to retrieve reserved ranges from.
|
2021-02-01 00:56:03 +08:00
|
|
|
*/
|
|
|
|
static inline bool is_hint_crossing_range(enum hl_va_range_type range_type,
|
|
|
|
u64 start_addr, u32 size, struct asic_fixed_properties *prop) {
|
|
|
|
bool range_cross;
|
|
|
|
|
|
|
|
if (range_type == HL_VA_RANGE_TYPE_DRAM)
|
|
|
|
range_cross =
|
|
|
|
hl_mem_area_crosses_range(start_addr, size,
|
|
|
|
prop->hints_dram_reserved_va_range.start_addr,
|
|
|
|
prop->hints_dram_reserved_va_range.end_addr);
|
|
|
|
else if (range_type == HL_VA_RANGE_TYPE_HOST)
|
|
|
|
range_cross =
|
|
|
|
hl_mem_area_crosses_range(start_addr, size,
|
|
|
|
prop->hints_host_reserved_va_range.start_addr,
|
|
|
|
prop->hints_host_reserved_va_range.end_addr);
|
|
|
|
else
|
|
|
|
range_cross =
|
|
|
|
hl_mem_area_crosses_range(start_addr, size,
|
|
|
|
prop->hints_host_hpage_reserved_va_range.start_addr,
|
|
|
|
prop->hints_host_hpage_reserved_va_range.end_addr);
|
|
|
|
|
|
|
|
return range_cross;
|
|
|
|
}
|
|
|
|
|
2020-12-09 19:28:46 +08:00
|
|
|
/**
|
2020-06-29 02:15:53 +08:00
|
|
|
* get_va_block() - get a virtual block for the given size and alignment.
|
2021-01-11 16:10:00 +08:00
|
|
|
*
|
2020-06-29 02:15:53 +08:00
|
|
|
* @hdev: pointer to the habanalabs device structure.
|
|
|
|
* @va_range: pointer to the virtual addresses range.
|
|
|
|
* @size: requested block size.
|
|
|
|
* @hint_addr: hint for requested address by the user.
|
|
|
|
* @va_block_align: required alignment of the virtual block start address.
|
2021-02-01 00:56:03 +08:00
|
|
|
* @range_type: va range type (host, dram)
|
2021-06-03 22:51:58 +08:00
|
|
|
* @flags: additional memory flags, currently only uses HL_MEM_FORCE_HINT
|
2019-02-16 06:39:22 +08:00
|
|
|
*
|
|
|
|
* This function does the following:
|
|
|
|
* - Iterate on the virtual block list to find a suitable virtual block for the
|
2021-01-11 16:10:00 +08:00
|
|
|
* given size, hint address and alignment.
|
2020-06-29 02:15:53 +08:00
|
|
|
* - Reserve the requested block and update the list.
|
|
|
|
* - Return the start address of the virtual block.
|
2019-02-16 06:39:22 +08:00
|
|
|
*/
|
2021-01-11 16:10:00 +08:00
|
|
|
static u64 get_va_block(struct hl_device *hdev,
|
2020-11-19 02:15:29 +08:00
|
|
|
struct hl_va_range *va_range,
|
2021-02-01 00:56:03 +08:00
|
|
|
u64 size, u64 hint_addr, u32 va_block_align,
|
2021-06-03 22:51:58 +08:00
|
|
|
enum hl_va_range_type range_type,
|
|
|
|
u32 flags)
|
2019-02-16 06:39:22 +08:00
|
|
|
{
|
|
|
|
struct hl_vm_va_block *va_block, *new_va_block = NULL;
|
2021-02-01 00:56:03 +08:00
|
|
|
struct asic_fixed_properties *prop = &hdev->asic_prop;
|
2021-01-11 16:10:00 +08:00
|
|
|
u64 tmp_hint_addr, valid_start, valid_size, prev_start, prev_end,
|
2021-02-01 00:56:03 +08:00
|
|
|
align_mask, reserved_valid_start = 0, reserved_valid_size = 0,
|
|
|
|
dram_hint_mask = prop->dram_hints_align_mask;
|
2019-02-16 06:39:22 +08:00
|
|
|
bool add_prev = false;
|
2021-01-11 16:10:00 +08:00
|
|
|
bool is_align_pow_2 = is_power_of_2(va_range->page_size);
|
2021-02-01 00:56:03 +08:00
|
|
|
bool is_hint_dram_addr = hl_is_dram_va(hdev, hint_addr);
|
2021-06-03 22:51:58 +08:00
|
|
|
bool force_hint = flags & HL_MEM_FORCE_HINT;
|
2021-01-11 16:10:00 +08:00
|
|
|
|
|
|
|
if (is_align_pow_2)
|
|
|
|
align_mask = ~((u64)va_block_align - 1);
|
|
|
|
else
|
|
|
|
/*
|
|
|
|
* with non-power-of-2 range we work only with page granularity
|
|
|
|
* and the start address is page aligned,
|
|
|
|
* so no need for alignment checking.
|
|
|
|
*/
|
|
|
|
size = DIV_ROUND_UP_ULL(size, va_range->page_size) *
|
|
|
|
va_range->page_size;
|
2019-02-16 06:39:22 +08:00
|
|
|
|
2021-02-01 00:56:03 +08:00
|
|
|
tmp_hint_addr = hint_addr & ~dram_hint_mask;
|
2019-11-15 02:23:55 +08:00
|
|
|
|
2021-01-11 16:10:00 +08:00
|
|
|
/* Check if we need to ignore hint address */
|
|
|
|
if ((is_align_pow_2 && (hint_addr & (va_block_align - 1))) ||
|
2022-06-27 22:21:39 +08:00
|
|
|
(!is_align_pow_2 && is_hint_dram_addr &&
|
2021-02-01 00:56:03 +08:00
|
|
|
do_div(tmp_hint_addr, va_range->page_size))) {
|
2021-04-26 23:32:25 +08:00
|
|
|
|
2021-06-03 22:51:58 +08:00
|
|
|
if (force_hint) {
|
2021-06-29 23:08:05 +08:00
|
|
|
/* Hint must be respected, so here we just fail */
|
2021-06-03 22:51:58 +08:00
|
|
|
dev_err(hdev->dev,
|
|
|
|
"Hint address 0x%llx is not page aligned - cannot be respected\n",
|
|
|
|
hint_addr);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-04-26 23:32:25 +08:00
|
|
|
dev_dbg(hdev->dev,
|
|
|
|
"Hint address 0x%llx will be ignored because it is not aligned\n",
|
|
|
|
hint_addr);
|
2020-06-29 02:15:53 +08:00
|
|
|
hint_addr = 0;
|
2021-01-11 16:10:00 +08:00
|
|
|
}
|
2019-02-16 06:39:22 +08:00
|
|
|
|
|
|
|
mutex_lock(&va_range->lock);
|
|
|
|
|
|
|
|
print_va_list_locked(hdev, &va_range->list);
|
|
|
|
|
|
|
|
list_for_each_entry(va_block, &va_range->list, node) {
|
2021-01-11 16:10:00 +08:00
|
|
|
/* Calc the first possible aligned addr */
|
2019-02-16 06:39:22 +08:00
|
|
|
valid_start = va_block->start;
|
|
|
|
|
2021-01-11 16:10:00 +08:00
|
|
|
if (is_align_pow_2 && (valid_start & (va_block_align - 1))) {
|
2020-06-29 02:15:53 +08:00
|
|
|
valid_start &= align_mask;
|
|
|
|
valid_start += va_block_align;
|
2019-02-16 06:39:22 +08:00
|
|
|
if (valid_start > va_block->end)
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2021-10-14 15:33:27 +08:00
|
|
|
valid_size = va_block->end - valid_start + 1;
|
2021-01-11 16:10:00 +08:00
|
|
|
if (valid_size < size)
|
|
|
|
continue;
|
2019-02-16 06:39:22 +08:00
|
|
|
|
2021-02-01 00:56:03 +08:00
|
|
|
/*
|
2022-01-28 16:11:39 +08:00
|
|
|
* In case hint address is 0, and hints_range_reservation
|
2021-02-01 00:56:03 +08:00
|
|
|
* property enabled, then avoid allocating va blocks from the
|
|
|
|
* range reserved for hint addresses
|
|
|
|
*/
|
|
|
|
if (prop->hints_range_reservation && !hint_addr)
|
|
|
|
if (is_hint_crossing_range(range_type, valid_start,
|
|
|
|
size, prop))
|
|
|
|
continue;
|
|
|
|
|
2021-01-11 16:10:00 +08:00
|
|
|
/* Pick the minimal length block which has the required size */
|
|
|
|
if (!new_va_block || (valid_size < reserved_valid_size)) {
|
2019-02-16 06:39:22 +08:00
|
|
|
new_va_block = va_block;
|
2020-11-19 02:15:29 +08:00
|
|
|
reserved_valid_start = valid_start;
|
|
|
|
reserved_valid_size = valid_size;
|
2019-02-16 06:39:22 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (hint_addr && hint_addr >= valid_start &&
|
2020-11-19 02:15:29 +08:00
|
|
|
(hint_addr + size) <= va_block->end) {
|
2019-02-16 06:39:22 +08:00
|
|
|
new_va_block = va_block;
|
2020-11-19 02:15:29 +08:00
|
|
|
reserved_valid_start = hint_addr;
|
|
|
|
reserved_valid_size = valid_size;
|
2019-02-16 06:39:22 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!new_va_block) {
|
2019-03-05 16:59:16 +08:00
|
|
|
dev_err(hdev->dev, "no available va block for size %llu\n",
|
2021-01-11 16:10:00 +08:00
|
|
|
size);
|
2019-02-16 06:39:22 +08:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2021-06-03 22:51:58 +08:00
|
|
|
if (force_hint && reserved_valid_start != hint_addr) {
|
|
|
|
/* Hint address must be respected. If we are here - this means
|
|
|
|
* we could not respect it.
|
|
|
|
*/
|
|
|
|
dev_err(hdev->dev,
|
|
|
|
"Hint address 0x%llx could not be respected\n",
|
|
|
|
hint_addr);
|
|
|
|
reserved_valid_start = 0;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2021-01-11 16:10:00 +08:00
|
|
|
/*
|
|
|
|
* Check if there is some leftover range due to reserving the new
|
|
|
|
* va block, then return it to the main virtual addresses list.
|
|
|
|
*/
|
2020-11-19 02:15:29 +08:00
|
|
|
if (reserved_valid_start > new_va_block->start) {
|
2019-02-16 06:39:22 +08:00
|
|
|
prev_start = new_va_block->start;
|
2020-11-19 02:15:29 +08:00
|
|
|
prev_end = reserved_valid_start - 1;
|
2019-02-16 06:39:22 +08:00
|
|
|
|
2020-11-19 02:15:29 +08:00
|
|
|
new_va_block->start = reserved_valid_start;
|
|
|
|
new_va_block->size = reserved_valid_size;
|
2019-02-16 06:39:22 +08:00
|
|
|
|
|
|
|
add_prev = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (new_va_block->size > size) {
|
|
|
|
new_va_block->start += size;
|
2021-10-14 15:33:27 +08:00
|
|
|
new_va_block->size = new_va_block->end - new_va_block->start + 1;
|
2019-02-16 06:39:22 +08:00
|
|
|
} else {
|
|
|
|
list_del(&new_va_block->node);
|
|
|
|
kfree(new_va_block);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (add_prev)
|
|
|
|
add_va_block_locked(hdev, &va_range->list, prev_start,
|
|
|
|
prev_end);
|
|
|
|
|
|
|
|
print_va_list_locked(hdev, &va_range->list);
|
|
|
|
out:
|
|
|
|
mutex_unlock(&va_range->lock);
|
|
|
|
|
2020-11-19 02:15:29 +08:00
|
|
|
return reserved_valid_start;
|
2019-02-16 06:39:22 +08:00
|
|
|
}
|
|
|
|
|
2020-10-22 20:04:10 +08:00
|
|
|
/*
|
|
|
|
* hl_reserve_va_block() - reserve a virtual block of a given size.
|
|
|
|
* @hdev: pointer to the habanalabs device structure.
|
|
|
|
* @ctx: current context
|
|
|
|
* @type: virtual addresses range type.
|
|
|
|
* @size: requested block size.
|
2020-11-04 21:18:55 +08:00
|
|
|
* @alignment: required alignment in bytes of the virtual block start address,
|
|
|
|
* 0 means no alignment.
|
2020-10-22 20:04:10 +08:00
|
|
|
*
|
|
|
|
* This function does the following:
|
|
|
|
* - Iterate on the virtual block list to find a suitable virtual block for the
|
2020-11-04 21:18:55 +08:00
|
|
|
* given size and alignment.
|
2020-10-22 20:04:10 +08:00
|
|
|
* - Reserve the requested block and update the list.
|
|
|
|
* - Return the start address of the virtual block.
|
|
|
|
*/
|
|
|
|
u64 hl_reserve_va_block(struct hl_device *hdev, struct hl_ctx *ctx,
|
2020-11-04 21:18:55 +08:00
|
|
|
enum hl_va_range_type type, u32 size, u32 alignment)
|
2020-10-22 20:04:10 +08:00
|
|
|
{
|
|
|
|
return get_va_block(hdev, ctx->va_range[type], size, 0,
|
2021-06-03 22:51:58 +08:00
|
|
|
max(alignment, ctx->va_range[type]->page_size),
|
|
|
|
type, 0);
|
2020-10-22 20:04:10 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* hl_get_va_range_type() - get va_range type for the given address and size.
|
2021-12-19 17:38:01 +08:00
|
|
|
* @ctx: context to fetch va_range from.
|
2020-12-09 19:28:46 +08:00
|
|
|
* @address: the start address of the area we want to validate.
|
|
|
|
* @size: the size in bytes of the area we want to validate.
|
|
|
|
* @type: returned va_range type.
|
2020-10-22 20:04:10 +08:00
|
|
|
*
|
|
|
|
* Return: true if the area is inside a valid range, false otherwise.
|
|
|
|
*/
|
|
|
|
static int hl_get_va_range_type(struct hl_ctx *ctx, u64 address, u64 size,
|
|
|
|
enum hl_va_range_type *type)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0 ; i < HL_VA_RANGE_TYPE_MAX; i++) {
|
|
|
|
if (hl_mem_area_inside_range(address, size,
|
|
|
|
ctx->va_range[i]->start_addr,
|
|
|
|
ctx->va_range[i]->end_addr)) {
|
|
|
|
*type = i;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2020-12-09 19:28:46 +08:00
|
|
|
/**
|
|
|
|
* hl_unreserve_va_block() - wrapper for add_va_block to unreserve a va block.
|
2020-10-22 20:04:10 +08:00
|
|
|
* @hdev: pointer to the habanalabs device structure
|
2020-12-09 19:28:46 +08:00
|
|
|
* @ctx: pointer to the context structure.
|
2021-12-19 17:38:01 +08:00
|
|
|
* @start_addr: start virtual address.
|
|
|
|
* @size: number of bytes to unreserve.
|
2020-10-22 20:04:10 +08:00
|
|
|
*
|
|
|
|
* This function does the following:
|
2020-12-09 19:28:46 +08:00
|
|
|
* - Takes the list lock and calls add_va_block_locked.
|
2020-10-22 20:04:10 +08:00
|
|
|
*/
|
|
|
|
int hl_unreserve_va_block(struct hl_device *hdev, struct hl_ctx *ctx,
|
|
|
|
u64 start_addr, u64 size)
|
|
|
|
{
|
|
|
|
enum hl_va_range_type type;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
rc = hl_get_va_range_type(ctx, start_addr, size, &type);
|
|
|
|
if (rc) {
|
|
|
|
dev_err(hdev->dev,
|
|
|
|
"cannot find va_range for va %#llx size %llu",
|
|
|
|
start_addr, size);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
rc = add_va_block(hdev, ctx->va_range[type], start_addr,
|
|
|
|
start_addr + size - 1);
|
|
|
|
if (rc)
|
|
|
|
dev_warn(hdev->dev,
|
|
|
|
"add va block failed for vaddr: 0x%llx\n", start_addr);
|
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2020-12-09 19:28:46 +08:00
|
|
|
/**
|
|
|
|
* init_phys_pg_pack_from_userptr() - initialize physical page pack from host
|
|
|
|
* memory
|
|
|
|
* @ctx: pointer to the context structure.
|
|
|
|
* @userptr: userptr to initialize from.
|
|
|
|
* @pphys_pg_pack: result pointer.
|
2021-06-29 23:23:41 +08:00
|
|
|
* @force_regular_page: tell the function to ignore huge page optimization,
|
|
|
|
* even if possible. Needed for cases where the device VA
|
|
|
|
* is allocated before we know the composition of the
|
|
|
|
* physical pages
|
2019-02-16 06:39:22 +08:00
|
|
|
*
|
|
|
|
* This function does the following:
|
2020-12-09 19:28:46 +08:00
|
|
|
* - Pin the physical pages related to the given virtual block.
|
2019-02-16 06:39:22 +08:00
|
|
|
* - Create a physical page pack from the physical pages related to the given
|
2020-12-09 19:28:46 +08:00
|
|
|
* virtual block.
|
2019-02-16 06:39:22 +08:00
|
|
|
*/
|
2019-11-15 02:23:55 +08:00
|
|
|
static int init_phys_pg_pack_from_userptr(struct hl_ctx *ctx,
|
|
|
|
struct hl_userptr *userptr,
|
2021-06-29 23:23:41 +08:00
|
|
|
struct hl_vm_phys_pg_pack **pphys_pg_pack,
|
|
|
|
bool force_regular_page)
|
2019-02-16 06:39:22 +08:00
|
|
|
{
|
2021-06-29 23:23:41 +08:00
|
|
|
u32 npages, page_size = PAGE_SIZE,
|
|
|
|
huge_page_size = ctx->hdev->asic_prop.pmmu_huge.page_size;
|
|
|
|
u32 pgs_in_huge_page = huge_page_size >> __ffs(page_size);
|
2019-02-16 06:39:22 +08:00
|
|
|
struct hl_vm_phys_pg_pack *phys_pg_pack;
|
2021-06-29 23:23:41 +08:00
|
|
|
bool first = true, is_huge_page_opt;
|
|
|
|
u64 page_mask, total_npages;
|
2019-02-16 06:39:22 +08:00
|
|
|
struct scatterlist *sg;
|
|
|
|
dma_addr_t dma_addr;
|
|
|
|
int rc, i, j;
|
|
|
|
|
|
|
|
phys_pg_pack = kzalloc(sizeof(*phys_pg_pack), GFP_KERNEL);
|
|
|
|
if (!phys_pg_pack)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
phys_pg_pack->vm_type = userptr->vm_type;
|
|
|
|
phys_pg_pack->created_from_userptr = true;
|
2019-11-15 02:23:55 +08:00
|
|
|
phys_pg_pack->asid = ctx->asid;
|
2019-02-16 06:39:22 +08:00
|
|
|
atomic_set(&phys_pg_pack->mapping_cnt, 1);
|
|
|
|
|
2021-06-29 23:23:41 +08:00
|
|
|
is_huge_page_opt = (force_regular_page ? false : true);
|
|
|
|
|
2019-02-16 06:39:22 +08:00
|
|
|
/* Only if all dma_addrs are aligned to 2MB and their
|
|
|
|
* sizes is at least 2MB, we can use huge page mapping.
|
|
|
|
* We limit the 2MB optimization to this condition,
|
|
|
|
* since later on we acquire the related VA range as one
|
|
|
|
* consecutive block.
|
|
|
|
*/
|
|
|
|
total_npages = 0;
|
2022-03-24 22:34:49 +08:00
|
|
|
for_each_sgtable_dma_sg(userptr->sgt, sg, i) {
|
2021-07-29 16:44:28 +08:00
|
|
|
npages = hl_get_sg_info(sg, &dma_addr);
|
2019-02-16 06:39:22 +08:00
|
|
|
|
|
|
|
total_npages += npages;
|
|
|
|
|
2019-11-15 02:23:55 +08:00
|
|
|
if ((npages % pgs_in_huge_page) ||
|
|
|
|
(dma_addr & (huge_page_size - 1)))
|
2019-02-16 06:39:22 +08:00
|
|
|
is_huge_page_opt = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (is_huge_page_opt) {
|
2019-11-15 02:23:55 +08:00
|
|
|
page_size = huge_page_size;
|
|
|
|
do_div(total_npages, pgs_in_huge_page);
|
2019-02-16 06:39:22 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
page_mask = ~(((u64) page_size) - 1);
|
|
|
|
|
2019-03-07 21:47:19 +08:00
|
|
|
phys_pg_pack->pages = kvmalloc_array(total_npages, sizeof(u64),
|
|
|
|
GFP_KERNEL);
|
2020-08-11 13:57:45 +08:00
|
|
|
if (ZERO_OR_NULL_PTR(phys_pg_pack->pages)) {
|
2019-02-16 06:39:22 +08:00
|
|
|
rc = -ENOMEM;
|
|
|
|
goto page_pack_arr_mem_err;
|
|
|
|
}
|
|
|
|
|
|
|
|
phys_pg_pack->npages = total_npages;
|
|
|
|
phys_pg_pack->page_size = page_size;
|
|
|
|
phys_pg_pack->total_size = total_npages * page_size;
|
|
|
|
|
|
|
|
j = 0;
|
2022-03-24 22:34:49 +08:00
|
|
|
for_each_sgtable_dma_sg(userptr->sgt, sg, i) {
|
2021-07-29 16:44:28 +08:00
|
|
|
npages = hl_get_sg_info(sg, &dma_addr);
|
2019-02-16 06:39:22 +08:00
|
|
|
|
|
|
|
/* align down to physical page size and save the offset */
|
|
|
|
if (first) {
|
|
|
|
first = false;
|
|
|
|
phys_pg_pack->offset = dma_addr & (page_size - 1);
|
|
|
|
dma_addr &= page_mask;
|
|
|
|
}
|
|
|
|
|
|
|
|
while (npages) {
|
|
|
|
phys_pg_pack->pages[j++] = dma_addr;
|
|
|
|
dma_addr += page_size;
|
|
|
|
|
|
|
|
if (is_huge_page_opt)
|
2019-11-15 02:23:55 +08:00
|
|
|
npages -= pgs_in_huge_page;
|
2019-02-16 06:39:22 +08:00
|
|
|
else
|
|
|
|
npages--;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
*pphys_pg_pack = phys_pg_pack;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
page_pack_arr_mem_err:
|
|
|
|
kfree(phys_pg_pack);
|
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2020-12-09 19:28:46 +08:00
|
|
|
/**
|
|
|
|
* map_phys_pg_pack() - maps the physical page pack..
|
|
|
|
* @ctx: pointer to the context structure.
|
|
|
|
* @vaddr: start address of the virtual area to map from.
|
|
|
|
* @phys_pg_pack: the pack of physical pages to map to.
|
2019-02-16 06:39:22 +08:00
|
|
|
*
|
|
|
|
* This function does the following:
|
2020-12-09 19:28:46 +08:00
|
|
|
* - Maps each chunk of virtual memory to matching physical chunk.
|
|
|
|
* - Stores number of successful mappings in the given argument.
|
|
|
|
* - Returns 0 on success, error code otherwise.
|
2019-02-16 06:39:22 +08:00
|
|
|
*/
|
2019-08-12 16:48:46 +08:00
|
|
|
static int map_phys_pg_pack(struct hl_ctx *ctx, u64 vaddr,
|
|
|
|
struct hl_vm_phys_pg_pack *phys_pg_pack)
|
2019-02-16 06:39:22 +08:00
|
|
|
{
|
|
|
|
struct hl_device *hdev = ctx->hdev;
|
2019-03-05 16:59:16 +08:00
|
|
|
u64 next_vaddr = vaddr, paddr, mapped_pg_cnt = 0, i;
|
2019-02-16 06:39:22 +08:00
|
|
|
u32 page_size = phys_pg_pack->page_size;
|
2019-03-05 16:59:16 +08:00
|
|
|
int rc = 0;
|
2021-03-11 17:24:57 +08:00
|
|
|
bool is_host_addr;
|
2019-02-16 06:39:22 +08:00
|
|
|
|
|
|
|
for (i = 0 ; i < phys_pg_pack->npages ; i++) {
|
|
|
|
paddr = phys_pg_pack->pages[i];
|
|
|
|
|
2020-10-22 20:13:10 +08:00
|
|
|
rc = hl_mmu_map_page(ctx, next_vaddr, paddr, page_size,
|
2019-12-06 23:32:38 +08:00
|
|
|
(i + 1) == phys_pg_pack->npages);
|
2019-02-16 06:39:22 +08:00
|
|
|
if (rc) {
|
|
|
|
dev_err(hdev->dev,
|
2019-03-05 16:59:16 +08:00
|
|
|
"map failed for handle %u, npages: %llu, mapped: %llu",
|
2019-02-16 06:39:22 +08:00
|
|
|
phys_pg_pack->handle, phys_pg_pack->npages,
|
|
|
|
mapped_pg_cnt);
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
|
|
|
mapped_pg_cnt++;
|
|
|
|
next_vaddr += page_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err:
|
2021-03-11 17:24:57 +08:00
|
|
|
is_host_addr = !hl_is_dram_va(hdev, vaddr);
|
|
|
|
|
2019-02-16 06:39:22 +08:00
|
|
|
next_vaddr = vaddr;
|
|
|
|
for (i = 0 ; i < mapped_pg_cnt ; i++) {
|
2020-10-22 20:13:10 +08:00
|
|
|
if (hl_mmu_unmap_page(ctx, next_vaddr, page_size,
|
2019-12-06 23:32:38 +08:00
|
|
|
(i + 1) == mapped_pg_cnt))
|
2019-02-16 06:39:22 +08:00
|
|
|
dev_warn_ratelimited(hdev->dev,
|
|
|
|
"failed to unmap handle %u, va: 0x%llx, pa: 0x%llx, page size: %u\n",
|
|
|
|
phys_pg_pack->handle, next_vaddr,
|
|
|
|
phys_pg_pack->pages[i], page_size);
|
|
|
|
|
|
|
|
next_vaddr += page_size;
|
2021-03-11 17:24:57 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* unmapping on Palladium can be really long, so avoid a CPU
|
|
|
|
* soft lockup bug by sleeping a little between unmapping pages
|
|
|
|
*
|
|
|
|
* In addition, on host num of pages could be huge,
|
|
|
|
* because page size could be 4KB, so when unmapping host
|
|
|
|
* pages sleep every 32K pages to avoid soft lockup
|
|
|
|
*/
|
|
|
|
if (hdev->pldm || (is_host_addr && (i & 0x7FFF) == 0))
|
|
|
|
usleep_range(50, 200);
|
2019-02-16 06:39:22 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2020-12-09 19:28:46 +08:00
|
|
|
/**
|
|
|
|
* unmap_phys_pg_pack() - unmaps the physical page pack.
|
|
|
|
* @ctx: pointer to the context structure.
|
|
|
|
* @vaddr: start address of the virtual area to unmap.
|
|
|
|
* @phys_pg_pack: the pack of physical pages to unmap.
|
2019-08-12 16:48:46 +08:00
|
|
|
*/
|
|
|
|
static void unmap_phys_pg_pack(struct hl_ctx *ctx, u64 vaddr,
|
|
|
|
struct hl_vm_phys_pg_pack *phys_pg_pack)
|
|
|
|
{
|
|
|
|
struct hl_device *hdev = ctx->hdev;
|
|
|
|
u64 next_vaddr, i;
|
2021-01-11 23:49:30 +08:00
|
|
|
bool is_host_addr;
|
2019-08-12 16:48:46 +08:00
|
|
|
u32 page_size;
|
|
|
|
|
2021-01-11 23:49:30 +08:00
|
|
|
is_host_addr = !hl_is_dram_va(hdev, vaddr);
|
2019-08-12 16:48:46 +08:00
|
|
|
page_size = phys_pg_pack->page_size;
|
|
|
|
next_vaddr = vaddr;
|
|
|
|
|
|
|
|
for (i = 0 ; i < phys_pg_pack->npages ; i++, next_vaddr += page_size) {
|
2020-10-22 20:13:10 +08:00
|
|
|
if (hl_mmu_unmap_page(ctx, next_vaddr, page_size,
|
2019-12-06 23:32:38 +08:00
|
|
|
(i + 1) == phys_pg_pack->npages))
|
2019-08-12 16:48:46 +08:00
|
|
|
dev_warn_ratelimited(hdev->dev,
|
|
|
|
"unmap failed for vaddr: 0x%llx\n", next_vaddr);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* unmapping on Palladium can be really long, so avoid a CPU
|
|
|
|
* soft lockup bug by sleeping a little between unmapping pages
|
2021-01-11 23:49:30 +08:00
|
|
|
*
|
2021-03-11 17:24:57 +08:00
|
|
|
* In addition, on host num of pages could be huge,
|
|
|
|
* because page size could be 4KB, so when unmapping host
|
|
|
|
* pages sleep every 32K pages to avoid soft lockup
|
2019-08-12 16:48:46 +08:00
|
|
|
*/
|
2021-01-11 23:49:30 +08:00
|
|
|
if (hdev->pldm || (is_host_addr && (i & 0x7FFF) == 0))
|
|
|
|
usleep_range(50, 200);
|
2019-08-12 16:48:46 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-02-16 06:39:22 +08:00
|
|
|
static int get_paddr_from_handle(struct hl_ctx *ctx, struct hl_mem_in *args,
|
2020-12-09 19:28:46 +08:00
|
|
|
u64 *paddr)
|
2019-02-16 06:39:22 +08:00
|
|
|
{
|
|
|
|
struct hl_device *hdev = ctx->hdev;
|
|
|
|
struct hl_vm *vm = &hdev->vm;
|
|
|
|
struct hl_vm_phys_pg_pack *phys_pg_pack;
|
|
|
|
u32 handle;
|
|
|
|
|
|
|
|
handle = lower_32_bits(args->map_device.handle);
|
|
|
|
spin_lock(&vm->idr_lock);
|
|
|
|
phys_pg_pack = idr_find(&vm->phys_pg_pack_handles, handle);
|
|
|
|
if (!phys_pg_pack) {
|
|
|
|
spin_unlock(&vm->idr_lock);
|
|
|
|
dev_err(hdev->dev, "no match for handle %u\n", handle);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
*paddr = phys_pg_pack->pages[0];
|
|
|
|
|
|
|
|
spin_unlock(&vm->idr_lock);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-12-09 19:28:46 +08:00
|
|
|
/**
|
|
|
|
* map_device_va() - map the given memory.
|
|
|
|
* @ctx: pointer to the context structure.
|
|
|
|
* @args: host parameters with handle/host virtual address.
|
|
|
|
* @device_addr: pointer to result device virtual address.
|
2019-02-16 06:39:22 +08:00
|
|
|
*
|
|
|
|
* This function does the following:
|
|
|
|
* - If given a physical device memory handle, map to a device virtual block
|
2020-12-09 19:28:46 +08:00
|
|
|
* and return the start address of this block.
|
2019-02-16 06:39:22 +08:00
|
|
|
* - If given a host virtual address and size, find the related physical pages,
|
|
|
|
* map a device virtual block to this pages and return the start address of
|
2020-12-09 19:28:46 +08:00
|
|
|
* this block.
|
2019-02-16 06:39:22 +08:00
|
|
|
*/
|
2022-04-11 14:31:32 +08:00
|
|
|
static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args, u64 *device_addr)
|
2019-02-16 06:39:22 +08:00
|
|
|
{
|
|
|
|
struct hl_vm_phys_pg_pack *phys_pg_pack;
|
2022-04-11 14:31:32 +08:00
|
|
|
enum hl_va_range_type va_range_type = 0;
|
|
|
|
struct hl_device *hdev = ctx->hdev;
|
2019-02-16 06:39:22 +08:00
|
|
|
struct hl_userptr *userptr = NULL;
|
2022-04-11 14:31:32 +08:00
|
|
|
u32 handle = 0, va_block_align;
|
2019-02-16 06:39:22 +08:00
|
|
|
struct hl_vm_hash_node *hnode;
|
2022-04-11 14:31:32 +08:00
|
|
|
struct hl_vm *vm = &hdev->vm;
|
2020-01-05 17:05:45 +08:00
|
|
|
struct hl_va_range *va_range;
|
2022-04-11 14:31:32 +08:00
|
|
|
bool is_userptr, do_prefetch;
|
2019-02-16 06:39:22 +08:00
|
|
|
u64 ret_vaddr, hint_addr;
|
2022-04-11 14:31:32 +08:00
|
|
|
enum vm_type *vm_type;
|
2019-02-16 06:39:22 +08:00
|
|
|
int rc;
|
2022-04-11 14:31:32 +08:00
|
|
|
|
|
|
|
/* set map flags */
|
|
|
|
is_userptr = args->flags & HL_MEM_USERPTR;
|
|
|
|
do_prefetch = hdev->supports_mmu_prefetch && (args->flags & HL_MEM_PREFETCH);
|
2019-02-16 06:39:22 +08:00
|
|
|
|
|
|
|
/* Assume failure */
|
|
|
|
*device_addr = 0;
|
|
|
|
|
|
|
|
if (is_userptr) {
|
2019-08-12 16:48:46 +08:00
|
|
|
u64 addr = args->map_host.host_virt_addr,
|
|
|
|
size = args->map_host.mem_size;
|
2020-06-29 02:15:53 +08:00
|
|
|
u32 page_size = hdev->asic_prop.pmmu.page_size,
|
|
|
|
huge_page_size = hdev->asic_prop.pmmu_huge.page_size;
|
2019-08-12 16:48:46 +08:00
|
|
|
|
|
|
|
rc = dma_map_host_va(hdev, addr, size, &userptr);
|
2019-02-16 06:39:22 +08:00
|
|
|
if (rc) {
|
|
|
|
dev_err(hdev->dev, "failed to get userptr from va\n");
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2019-11-15 02:23:55 +08:00
|
|
|
rc = init_phys_pg_pack_from_userptr(ctx, userptr,
|
2021-06-29 23:23:41 +08:00
|
|
|
&phys_pg_pack, false);
|
2019-02-16 06:39:22 +08:00
|
|
|
if (rc) {
|
|
|
|
dev_err(hdev->dev,
|
|
|
|
"unable to init page pack for vaddr 0x%llx\n",
|
2019-08-12 16:48:46 +08:00
|
|
|
addr);
|
2019-02-16 06:39:22 +08:00
|
|
|
goto init_page_pack_err;
|
|
|
|
}
|
|
|
|
|
2021-06-29 23:08:05 +08:00
|
|
|
vm_type = (enum vm_type *) userptr;
|
2019-02-16 06:39:22 +08:00
|
|
|
hint_addr = args->map_host.hint_addr;
|
2020-05-25 04:06:59 +08:00
|
|
|
handle = phys_pg_pack->handle;
|
2020-06-29 02:15:53 +08:00
|
|
|
|
|
|
|
/* get required alignment */
|
|
|
|
if (phys_pg_pack->page_size == page_size) {
|
2020-10-22 16:05:55 +08:00
|
|
|
va_range = ctx->va_range[HL_VA_RANGE_TYPE_HOST];
|
2021-02-01 00:56:03 +08:00
|
|
|
va_range_type = HL_VA_RANGE_TYPE_HOST;
|
2020-06-29 02:15:53 +08:00
|
|
|
/*
|
|
|
|
* huge page alignment may be needed in case of regular
|
|
|
|
* page mapping, depending on the host VA alignment
|
|
|
|
*/
|
|
|
|
if (addr & (huge_page_size - 1))
|
|
|
|
va_block_align = page_size;
|
|
|
|
else
|
|
|
|
va_block_align = huge_page_size;
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* huge page alignment is needed in case of huge page
|
|
|
|
* mapping
|
|
|
|
*/
|
2020-10-22 16:05:55 +08:00
|
|
|
va_range = ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE];
|
2021-02-01 00:56:03 +08:00
|
|
|
va_range_type = HL_VA_RANGE_TYPE_HOST_HUGE;
|
2020-06-29 02:15:53 +08:00
|
|
|
va_block_align = huge_page_size;
|
|
|
|
}
|
2019-02-16 06:39:22 +08:00
|
|
|
} else {
|
|
|
|
handle = lower_32_bits(args->map_device.handle);
|
|
|
|
|
|
|
|
spin_lock(&vm->idr_lock);
|
|
|
|
phys_pg_pack = idr_find(&vm->phys_pg_pack_handles, handle);
|
|
|
|
if (!phys_pg_pack) {
|
|
|
|
spin_unlock(&vm->idr_lock);
|
|
|
|
dev_err(hdev->dev,
|
|
|
|
"no match for handle %u\n", handle);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* increment now to avoid freeing device memory while mapping */
|
|
|
|
atomic_inc(&phys_pg_pack->mapping_cnt);
|
|
|
|
|
|
|
|
spin_unlock(&vm->idr_lock);
|
|
|
|
|
2021-06-29 23:08:05 +08:00
|
|
|
vm_type = (enum vm_type *) phys_pg_pack;
|
2019-02-16 06:39:22 +08:00
|
|
|
|
|
|
|
hint_addr = args->map_device.hint_addr;
|
2020-06-29 02:15:53 +08:00
|
|
|
|
2020-11-19 02:15:29 +08:00
|
|
|
/* DRAM VA alignment is the same as the MMU page size */
|
2020-10-22 16:05:55 +08:00
|
|
|
va_range = ctx->va_range[HL_VA_RANGE_TYPE_DRAM];
|
2021-02-01 00:56:03 +08:00
|
|
|
va_range_type = HL_VA_RANGE_TYPE_DRAM;
|
2020-06-29 02:15:53 +08:00
|
|
|
va_block_align = hdev->asic_prop.dmmu.page_size;
|
2019-02-16 06:39:22 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* relevant for mapping device physical memory only, as host memory is
|
|
|
|
* implicitly shared
|
|
|
|
*/
|
|
|
|
if (!is_userptr && !(phys_pg_pack->flags & HL_MEM_SHARED) &&
|
|
|
|
phys_pg_pack->asid != ctx->asid) {
|
|
|
|
dev_err(hdev->dev,
|
|
|
|
"Failed to map memory, handle %u is not shared\n",
|
|
|
|
handle);
|
|
|
|
rc = -EPERM;
|
|
|
|
goto shared_err;
|
|
|
|
}
|
|
|
|
|
|
|
|
hnode = kzalloc(sizeof(*hnode), GFP_KERNEL);
|
|
|
|
if (!hnode) {
|
|
|
|
rc = -ENOMEM;
|
|
|
|
goto hnode_err;
|
|
|
|
}
|
|
|
|
|
2021-06-03 22:51:58 +08:00
|
|
|
if (hint_addr && phys_pg_pack->offset) {
|
|
|
|
if (args->flags & HL_MEM_FORCE_HINT) {
|
2021-06-29 23:08:05 +08:00
|
|
|
/* Fail if hint must be respected but it can't be */
|
2021-06-03 22:51:58 +08:00
|
|
|
dev_err(hdev->dev,
|
|
|
|
"Hint address 0x%llx cannot be respected because source memory is not aligned 0x%x\n",
|
|
|
|
hint_addr, phys_pg_pack->offset);
|
|
|
|
rc = -EINVAL;
|
|
|
|
goto va_block_err;
|
|
|
|
}
|
|
|
|
dev_dbg(hdev->dev,
|
|
|
|
"Hint address 0x%llx will be ignored because source memory is not aligned 0x%x\n",
|
|
|
|
hint_addr, phys_pg_pack->offset);
|
|
|
|
}
|
|
|
|
|
2020-01-05 17:05:45 +08:00
|
|
|
ret_vaddr = get_va_block(hdev, va_range, phys_pg_pack->total_size,
|
2021-02-01 00:56:03 +08:00
|
|
|
hint_addr, va_block_align,
|
2021-06-03 22:51:58 +08:00
|
|
|
va_range_type, args->flags);
|
2019-02-16 06:39:22 +08:00
|
|
|
if (!ret_vaddr) {
|
|
|
|
dev_err(hdev->dev, "no available va block for handle %u\n",
|
|
|
|
handle);
|
|
|
|
rc = -ENOMEM;
|
|
|
|
goto va_block_err;
|
|
|
|
}
|
|
|
|
|
|
|
|
mutex_lock(&ctx->mmu_lock);
|
|
|
|
|
2019-08-12 16:48:46 +08:00
|
|
|
rc = map_phys_pg_pack(ctx, ret_vaddr, phys_pg_pack);
|
2019-02-16 06:39:22 +08:00
|
|
|
if (rc) {
|
2022-03-21 20:39:19 +08:00
|
|
|
dev_err(hdev->dev, "mapping page pack failed for handle %u\n", handle);
|
2022-05-25 20:25:06 +08:00
|
|
|
mutex_unlock(&ctx->mmu_lock);
|
2019-02-16 06:39:22 +08:00
|
|
|
goto map_err;
|
|
|
|
}
|
|
|
|
|
2021-11-09 19:12:38 +08:00
|
|
|
rc = hl_mmu_invalidate_cache_range(hdev, false, *vm_type | MMU_OP_SKIP_LOW_CACHE_INV,
|
|
|
|
ctx->asid, ret_vaddr, phys_pg_pack->total_size);
|
2022-05-25 20:25:06 +08:00
|
|
|
mutex_unlock(&ctx->mmu_lock);
|
2022-03-21 20:39:19 +08:00
|
|
|
if (rc)
|
|
|
|
goto map_err;
|
2019-02-16 06:39:22 +08:00
|
|
|
|
2022-04-11 14:31:32 +08:00
|
|
|
/*
|
|
|
|
* prefetch is done upon user's request. it is performed in WQ as and so can
|
|
|
|
* be outside the MMU lock. the operation itself is already protected by the mmu lock
|
|
|
|
*/
|
|
|
|
if (do_prefetch) {
|
|
|
|
rc = hl_mmu_prefetch_cache_range(ctx, *vm_type, ctx->asid, ret_vaddr,
|
2022-04-10 16:19:42 +08:00
|
|
|
phys_pg_pack->total_size);
|
|
|
|
if (rc)
|
|
|
|
goto map_err;
|
|
|
|
}
|
2020-05-25 04:06:59 +08:00
|
|
|
|
2019-02-16 06:39:22 +08:00
|
|
|
ret_vaddr += phys_pg_pack->offset;
|
|
|
|
|
|
|
|
hnode->ptr = vm_type;
|
|
|
|
hnode->vaddr = ret_vaddr;
|
|
|
|
|
|
|
|
mutex_lock(&ctx->mem_hash_lock);
|
|
|
|
hash_add(ctx->mem_hash, &hnode->node, ret_vaddr);
|
|
|
|
mutex_unlock(&ctx->mem_hash_lock);
|
|
|
|
|
|
|
|
*device_addr = ret_vaddr;
|
|
|
|
|
|
|
|
if (is_userptr)
|
2022-05-10 21:36:02 +08:00
|
|
|
free_phys_pg_pack(hdev, phys_pg_pack);
|
2019-02-16 06:39:22 +08:00
|
|
|
|
2021-03-18 18:11:19 +08:00
|
|
|
return rc;
|
2019-02-16 06:39:22 +08:00
|
|
|
|
|
|
|
map_err:
|
2020-01-05 17:05:45 +08:00
|
|
|
if (add_va_block(hdev, va_range, ret_vaddr,
|
|
|
|
ret_vaddr + phys_pg_pack->total_size - 1))
|
2019-02-16 06:39:22 +08:00
|
|
|
dev_warn(hdev->dev,
|
|
|
|
"release va block failed for handle 0x%x, vaddr: 0x%llx\n",
|
|
|
|
handle, ret_vaddr);
|
|
|
|
|
|
|
|
va_block_err:
|
|
|
|
kfree(hnode);
|
|
|
|
hnode_err:
|
|
|
|
shared_err:
|
|
|
|
atomic_dec(&phys_pg_pack->mapping_cnt);
|
|
|
|
if (is_userptr)
|
|
|
|
free_phys_pg_pack(hdev, phys_pg_pack);
|
|
|
|
init_page_pack_err:
|
|
|
|
if (is_userptr)
|
2019-08-12 16:48:46 +08:00
|
|
|
dma_unmap_host_va(hdev, userptr);
|
2019-02-16 06:39:22 +08:00
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2020-12-09 19:28:46 +08:00
|
|
|
/**
|
|
|
|
* unmap_device_va() - unmap the given device virtual address.
|
|
|
|
* @ctx: pointer to the context structure.
|
2020-12-09 19:34:11 +08:00
|
|
|
* @args: host parameters with device virtual address to unmap.
|
2020-12-09 19:28:46 +08:00
|
|
|
* @ctx_free: true if in context free flow, false otherwise.
|
2019-02-16 06:39:22 +08:00
|
|
|
*
|
|
|
|
* This function does the following:
|
2020-12-09 19:28:46 +08:00
|
|
|
* - unmap the physical pages related to the given virtual address.
|
|
|
|
* - return the device virtual block to the virtual block list.
|
2019-02-16 06:39:22 +08:00
|
|
|
*/
|
2020-12-09 19:34:11 +08:00
|
|
|
static int unmap_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
|
|
|
|
bool ctx_free)
|
2019-02-16 06:39:22 +08:00
|
|
|
{
|
|
|
|
struct hl_vm_phys_pg_pack *phys_pg_pack = NULL;
|
2021-06-29 23:23:41 +08:00
|
|
|
u64 vaddr = args->unmap.device_virt_addr;
|
2019-02-16 06:39:22 +08:00
|
|
|
struct hl_vm_hash_node *hnode = NULL;
|
2021-06-29 23:23:41 +08:00
|
|
|
struct asic_fixed_properties *prop;
|
|
|
|
struct hl_device *hdev = ctx->hdev;
|
2019-02-16 06:39:22 +08:00
|
|
|
struct hl_userptr *userptr = NULL;
|
2019-11-15 02:23:57 +08:00
|
|
|
struct hl_va_range *va_range;
|
2021-06-29 23:08:05 +08:00
|
|
|
enum vm_type *vm_type;
|
2019-02-16 06:39:22 +08:00
|
|
|
bool is_userptr;
|
2020-06-01 14:56:47 +08:00
|
|
|
int rc = 0;
|
2019-02-16 06:39:22 +08:00
|
|
|
|
2021-06-29 23:23:41 +08:00
|
|
|
prop = &hdev->asic_prop;
|
|
|
|
|
2019-02-16 06:39:22 +08:00
|
|
|
/* protect from double entrance */
|
|
|
|
mutex_lock(&ctx->mem_hash_lock);
|
|
|
|
hash_for_each_possible(ctx->mem_hash, hnode, node, (unsigned long)vaddr)
|
|
|
|
if (vaddr == hnode->vaddr)
|
|
|
|
break;
|
|
|
|
|
|
|
|
if (!hnode) {
|
|
|
|
mutex_unlock(&ctx->mem_hash_lock);
|
|
|
|
dev_err(hdev->dev,
|
|
|
|
"unmap failed, no mem hnode for vaddr 0x%llx\n",
|
|
|
|
vaddr);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
hash_del(&hnode->node);
|
|
|
|
mutex_unlock(&ctx->mem_hash_lock);
|
|
|
|
|
|
|
|
vm_type = hnode->ptr;
|
|
|
|
|
|
|
|
if (*vm_type == VM_TYPE_USERPTR) {
|
|
|
|
is_userptr = true;
|
|
|
|
userptr = hnode->ptr;
|
2021-06-29 23:23:41 +08:00
|
|
|
|
|
|
|
rc = init_phys_pg_pack_from_userptr(ctx, userptr, &phys_pg_pack,
|
|
|
|
false);
|
2019-02-16 06:39:22 +08:00
|
|
|
if (rc) {
|
|
|
|
dev_err(hdev->dev,
|
|
|
|
"unable to init page pack for vaddr 0x%llx\n",
|
|
|
|
vaddr);
|
|
|
|
goto vm_type_err;
|
|
|
|
}
|
2020-01-05 17:05:45 +08:00
|
|
|
|
|
|
|
if (phys_pg_pack->page_size ==
|
|
|
|
hdev->asic_prop.pmmu.page_size)
|
2020-10-22 16:05:55 +08:00
|
|
|
va_range = ctx->va_range[HL_VA_RANGE_TYPE_HOST];
|
2020-01-05 17:05:45 +08:00
|
|
|
else
|
2020-10-22 16:05:55 +08:00
|
|
|
va_range = ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE];
|
2019-02-16 06:39:22 +08:00
|
|
|
} else if (*vm_type == VM_TYPE_PHYS_PACK) {
|
|
|
|
is_userptr = false;
|
2020-10-22 16:05:55 +08:00
|
|
|
va_range = ctx->va_range[HL_VA_RANGE_TYPE_DRAM];
|
2019-02-16 06:39:22 +08:00
|
|
|
phys_pg_pack = hnode->ptr;
|
|
|
|
} else {
|
|
|
|
dev_warn(hdev->dev,
|
|
|
|
"unmap failed, unknown vm desc for vaddr 0x%llx\n",
|
|
|
|
vaddr);
|
|
|
|
rc = -EFAULT;
|
|
|
|
goto vm_type_err;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (atomic_read(&phys_pg_pack->mapping_cnt) == 0) {
|
|
|
|
dev_err(hdev->dev, "vaddr 0x%llx is not mapped\n", vaddr);
|
|
|
|
rc = -EINVAL;
|
|
|
|
goto mapping_cnt_err;
|
|
|
|
}
|
|
|
|
|
2020-11-19 02:15:29 +08:00
|
|
|
if (!is_userptr && !is_power_of_2(phys_pg_pack->page_size))
|
|
|
|
vaddr = prop->dram_base_address +
|
|
|
|
DIV_ROUND_DOWN_ULL(vaddr - prop->dram_base_address,
|
|
|
|
phys_pg_pack->page_size) *
|
|
|
|
phys_pg_pack->page_size;
|
|
|
|
else
|
|
|
|
vaddr &= ~(((u64) phys_pg_pack->page_size) - 1);
|
2019-02-16 06:39:22 +08:00
|
|
|
|
|
|
|
mutex_lock(&ctx->mmu_lock);
|
|
|
|
|
2019-08-12 16:48:46 +08:00
|
|
|
unmap_phys_pg_pack(ctx, vaddr, phys_pg_pack);
|
2019-02-16 06:39:22 +08:00
|
|
|
|
2019-11-15 02:23:58 +08:00
|
|
|
/*
|
|
|
|
* During context free this function is called in a loop to clean all
|
|
|
|
* the context mappings. Hence the cache invalidation can be called once
|
|
|
|
* at the loop end rather than for each iteration
|
|
|
|
*/
|
|
|
|
if (!ctx_free)
|
2021-11-09 19:12:38 +08:00
|
|
|
rc = hl_mmu_invalidate_cache_range(hdev, true, *vm_type, ctx->asid, vaddr,
|
|
|
|
phys_pg_pack->total_size);
|
2019-02-16 06:39:22 +08:00
|
|
|
|
|
|
|
mutex_unlock(&ctx->mmu_lock);
|
|
|
|
|
2019-11-15 02:23:57 +08:00
|
|
|
/*
|
2020-05-25 04:06:59 +08:00
|
|
|
* If the context is closing we don't need to check for the MMU cache
|
|
|
|
* invalidation return code and update the VA free list as in this flow
|
|
|
|
* we invalidate the MMU cache outside of this unmap function and the VA
|
|
|
|
* free list will be freed anyway.
|
2019-11-15 02:23:57 +08:00
|
|
|
*/
|
|
|
|
if (!ctx_free) {
|
2020-05-25 04:06:59 +08:00
|
|
|
int tmp_rc;
|
|
|
|
|
|
|
|
tmp_rc = add_va_block(hdev, va_range, vaddr,
|
|
|
|
vaddr + phys_pg_pack->total_size - 1);
|
|
|
|
if (tmp_rc) {
|
2019-11-15 02:23:57 +08:00
|
|
|
dev_warn(hdev->dev,
|
|
|
|
"add va block failed for vaddr: 0x%llx\n",
|
|
|
|
vaddr);
|
2020-05-25 04:06:59 +08:00
|
|
|
if (!rc)
|
|
|
|
rc = tmp_rc;
|
|
|
|
}
|
2019-11-15 02:23:57 +08:00
|
|
|
}
|
2019-02-16 06:39:22 +08:00
|
|
|
|
|
|
|
atomic_dec(&phys_pg_pack->mapping_cnt);
|
|
|
|
kfree(hnode);
|
|
|
|
|
|
|
|
if (is_userptr) {
|
2021-06-29 23:23:41 +08:00
|
|
|
free_phys_pg_pack(hdev, phys_pg_pack);
|
2019-08-12 16:48:46 +08:00
|
|
|
dma_unmap_host_va(hdev, userptr);
|
2019-02-16 06:39:22 +08:00
|
|
|
}
|
|
|
|
|
2020-05-25 04:06:59 +08:00
|
|
|
return rc;
|
2019-02-16 06:39:22 +08:00
|
|
|
|
|
|
|
mapping_cnt_err:
|
|
|
|
if (is_userptr)
|
|
|
|
free_phys_pg_pack(hdev, phys_pg_pack);
|
|
|
|
vm_type_err:
|
|
|
|
mutex_lock(&ctx->mem_hash_lock);
|
|
|
|
hash_add(ctx->mem_hash, &hnode->node, vaddr);
|
|
|
|
mutex_unlock(&ctx->mem_hash_lock);
|
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2022-07-27 14:04:13 +08:00
|
|
|
static int map_block(struct hl_device *hdev, u64 address, u64 *handle, u32 *size)
|
2021-01-05 18:55:06 +08:00
|
|
|
{
|
2022-07-27 14:04:13 +08:00
|
|
|
u32 block_id;
|
2021-01-05 18:55:06 +08:00
|
|
|
int rc;
|
|
|
|
|
2022-07-27 14:04:13 +08:00
|
|
|
*handle = 0;
|
|
|
|
if (size)
|
|
|
|
*size = 0;
|
|
|
|
|
2021-02-05 22:04:34 +08:00
|
|
|
rc = hdev->asic_funcs->get_hw_block_id(hdev, address, size, &block_id);
|
2022-07-27 14:04:13 +08:00
|
|
|
if (rc)
|
|
|
|
return rc;
|
2021-01-05 18:55:06 +08:00
|
|
|
|
|
|
|
*handle = block_id | HL_MMAP_TYPE_BLOCK;
|
|
|
|
*handle <<= PAGE_SHIFT;
|
|
|
|
|
2022-07-27 14:04:13 +08:00
|
|
|
return 0;
|
2021-01-05 18:55:06 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void hw_block_vm_close(struct vm_area_struct *vma)
|
|
|
|
{
|
2021-02-23 17:01:08 +08:00
|
|
|
struct hl_vm_hw_block_list_node *lnode =
|
|
|
|
(struct hl_vm_hw_block_list_node *) vma->vm_private_data;
|
|
|
|
struct hl_ctx *ctx = lnode->ctx;
|
2022-08-03 21:36:02 +08:00
|
|
|
long new_mmap_size;
|
|
|
|
|
|
|
|
new_mmap_size = lnode->mapped_size - (vma->vm_end - vma->vm_start);
|
|
|
|
if (new_mmap_size > 0) {
|
|
|
|
lnode->mapped_size = new_mmap_size;
|
|
|
|
return;
|
|
|
|
}
|
2021-01-05 18:55:06 +08:00
|
|
|
|
2021-02-23 17:01:08 +08:00
|
|
|
mutex_lock(&ctx->hw_block_list_lock);
|
|
|
|
list_del(&lnode->node);
|
|
|
|
mutex_unlock(&ctx->hw_block_list_lock);
|
2021-01-05 18:55:06 +08:00
|
|
|
hl_ctx_put(ctx);
|
2021-02-23 17:01:08 +08:00
|
|
|
kfree(lnode);
|
2021-01-05 18:55:06 +08:00
|
|
|
vma->vm_private_data = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct vm_operations_struct hw_block_vm_ops = {
|
|
|
|
.close = hw_block_vm_close
|
|
|
|
};
|
|
|
|
|
|
|
|
/**
|
|
|
|
* hl_hw_block_mmap() - mmap a hw block to user.
|
|
|
|
* @hpriv: pointer to the private data of the fd
|
|
|
|
* @vma: pointer to vm_area_struct of the process
|
|
|
|
*
|
|
|
|
* Driver increments context reference for every HW block mapped in order
|
|
|
|
* to prevent user from closing FD without unmapping first
|
|
|
|
*/
|
|
|
|
int hl_hw_block_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma)
|
|
|
|
{
|
2021-02-23 17:01:08 +08:00
|
|
|
struct hl_vm_hw_block_list_node *lnode;
|
2021-01-05 18:55:06 +08:00
|
|
|
struct hl_device *hdev = hpriv->hdev;
|
2021-02-23 17:01:08 +08:00
|
|
|
struct hl_ctx *ctx = hpriv->ctx;
|
2021-01-05 18:55:06 +08:00
|
|
|
u32 block_id, block_size;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
/* We use the page offset to hold the block id and thus we need to clear
|
|
|
|
* it before doing the mmap itself
|
|
|
|
*/
|
|
|
|
block_id = vma->vm_pgoff;
|
|
|
|
vma->vm_pgoff = 0;
|
|
|
|
|
|
|
|
/* Driver only allows mapping of a complete HW block */
|
|
|
|
block_size = vma->vm_end - vma->vm_start;
|
|
|
|
|
|
|
|
if (!access_ok((void __user *) (uintptr_t) vma->vm_start, block_size)) {
|
|
|
|
dev_err(hdev->dev,
|
|
|
|
"user pointer is invalid - 0x%lx\n",
|
|
|
|
vma->vm_start);
|
|
|
|
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2021-02-23 17:01:08 +08:00
|
|
|
lnode = kzalloc(sizeof(*lnode), GFP_KERNEL);
|
|
|
|
if (!lnode)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2021-01-05 18:55:06 +08:00
|
|
|
rc = hdev->asic_funcs->hw_block_mmap(hdev, vma, block_id, block_size);
|
|
|
|
if (rc) {
|
2021-02-23 17:01:08 +08:00
|
|
|
kfree(lnode);
|
2021-01-05 18:55:06 +08:00
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2022-07-29 21:30:48 +08:00
|
|
|
hl_ctx_get(ctx);
|
|
|
|
|
2021-02-23 17:01:08 +08:00
|
|
|
lnode->ctx = ctx;
|
|
|
|
lnode->vaddr = vma->vm_start;
|
2022-08-03 21:36:02 +08:00
|
|
|
lnode->block_size = block_size;
|
|
|
|
lnode->mapped_size = lnode->block_size;
|
2021-02-23 17:01:08 +08:00
|
|
|
lnode->id = block_id;
|
|
|
|
|
2022-07-29 21:30:48 +08:00
|
|
|
vma->vm_private_data = lnode;
|
|
|
|
vma->vm_ops = &hw_block_vm_ops;
|
|
|
|
|
2021-02-23 17:01:08 +08:00
|
|
|
mutex_lock(&ctx->hw_block_list_lock);
|
|
|
|
list_add_tail(&lnode->node, &ctx->hw_block_mem_list);
|
|
|
|
mutex_unlock(&ctx->hw_block_list_lock);
|
|
|
|
|
2021-01-05 18:55:06 +08:00
|
|
|
vma->vm_pgoff = block_id;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-06-15 15:55:43 +08:00
|
|
|
static int set_dma_sg(struct scatterlist *sg, u64 bar_address, u64 chunk_size,
|
|
|
|
struct device *dev, enum dma_data_direction dir)
|
|
|
|
{
|
|
|
|
dma_addr_t addr;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
addr = dma_map_resource(dev, bar_address, chunk_size, dir,
|
|
|
|
DMA_ATTR_SKIP_CPU_SYNC);
|
|
|
|
rc = dma_mapping_error(dev, addr);
|
|
|
|
if (rc)
|
|
|
|
return rc;
|
|
|
|
|
|
|
|
sg_set_page(sg, NULL, chunk_size, 0);
|
|
|
|
sg_dma_address(sg) = addr;
|
|
|
|
sg_dma_len(sg) = chunk_size;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct sg_table *alloc_sgt_from_device_pages(struct hl_device *hdev, u64 *pages, u64 npages,
|
|
|
|
u64 page_size, struct device *dev,
|
|
|
|
enum dma_data_direction dir)
|
|
|
|
{
|
|
|
|
u64 chunk_size, bar_address, dma_max_seg_size;
|
|
|
|
struct asic_fixed_properties *prop;
|
|
|
|
int rc, i, j, nents, cur_page;
|
|
|
|
struct scatterlist *sg;
|
|
|
|
struct sg_table *sgt;
|
|
|
|
|
|
|
|
prop = &hdev->asic_prop;
|
|
|
|
|
|
|
|
dma_max_seg_size = dma_get_max_seg_size(dev);
|
|
|
|
|
|
|
|
/* We would like to align the max segment size to PAGE_SIZE, so the
|
|
|
|
* SGL will contain aligned addresses that can be easily mapped to
|
|
|
|
* an MMU
|
|
|
|
*/
|
|
|
|
dma_max_seg_size = ALIGN_DOWN(dma_max_seg_size, PAGE_SIZE);
|
|
|
|
if (dma_max_seg_size < PAGE_SIZE) {
|
|
|
|
dev_err_ratelimited(hdev->dev,
|
|
|
|
"dma_max_seg_size %llu can't be smaller than PAGE_SIZE\n",
|
|
|
|
dma_max_seg_size);
|
|
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
}
|
|
|
|
|
|
|
|
sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
|
|
|
|
if (!sgt)
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
|
|
|
|
/* If the size of each page is larger than the dma max segment size,
|
|
|
|
* then we can't combine pages and the number of entries in the SGL
|
|
|
|
* will just be the
|
|
|
|
* <number of pages> * <chunks of max segment size in each page>
|
|
|
|
*/
|
|
|
|
if (page_size > dma_max_seg_size)
|
|
|
|
nents = npages * DIV_ROUND_UP_ULL(page_size, dma_max_seg_size);
|
|
|
|
else
|
|
|
|
/* Get number of non-contiguous chunks */
|
|
|
|
for (i = 1, nents = 1, chunk_size = page_size ; i < npages ; i++) {
|
|
|
|
if (pages[i - 1] + page_size != pages[i] ||
|
|
|
|
chunk_size + page_size > dma_max_seg_size) {
|
|
|
|
nents++;
|
|
|
|
chunk_size = page_size;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
chunk_size += page_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
rc = sg_alloc_table(sgt, nents, GFP_KERNEL | __GFP_ZERO);
|
|
|
|
if (rc)
|
|
|
|
goto error_free;
|
|
|
|
|
|
|
|
cur_page = 0;
|
|
|
|
|
|
|
|
if (page_size > dma_max_seg_size) {
|
|
|
|
u64 size_left, cur_device_address = 0;
|
|
|
|
|
|
|
|
size_left = page_size;
|
|
|
|
|
|
|
|
/* Need to split each page into the number of chunks of
|
|
|
|
* dma_max_seg_size
|
|
|
|
*/
|
|
|
|
for_each_sgtable_dma_sg(sgt, sg, i) {
|
|
|
|
if (size_left == page_size)
|
|
|
|
cur_device_address =
|
|
|
|
pages[cur_page] - prop->dram_base_address;
|
|
|
|
else
|
|
|
|
cur_device_address += dma_max_seg_size;
|
|
|
|
|
|
|
|
chunk_size = min(size_left, dma_max_seg_size);
|
|
|
|
|
|
|
|
bar_address = hdev->dram_pci_bar_start + cur_device_address;
|
|
|
|
|
|
|
|
rc = set_dma_sg(sg, bar_address, chunk_size, dev, dir);
|
|
|
|
if (rc)
|
|
|
|
goto error_unmap;
|
|
|
|
|
|
|
|
if (size_left > dma_max_seg_size) {
|
|
|
|
size_left -= dma_max_seg_size;
|
|
|
|
} else {
|
|
|
|
cur_page++;
|
|
|
|
size_left = page_size;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
/* Merge pages and put them into the scatterlist */
|
|
|
|
for_each_sgtable_dma_sg(sgt, sg, i) {
|
|
|
|
chunk_size = page_size;
|
|
|
|
for (j = cur_page + 1 ; j < npages ; j++) {
|
|
|
|
if (pages[j - 1] + page_size != pages[j] ||
|
|
|
|
chunk_size + page_size > dma_max_seg_size)
|
|
|
|
break;
|
|
|
|
|
|
|
|
chunk_size += page_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
bar_address = hdev->dram_pci_bar_start +
|
|
|
|
(pages[cur_page] - prop->dram_base_address);
|
|
|
|
|
|
|
|
rc = set_dma_sg(sg, bar_address, chunk_size, dev, dir);
|
|
|
|
if (rc)
|
|
|
|
goto error_unmap;
|
|
|
|
|
|
|
|
cur_page = j;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Because we are not going to include a CPU list we want to have some
|
|
|
|
* chance that other users will detect this by setting the orig_nents
|
|
|
|
* to 0 and using only nents (length of DMA list) when going over the
|
|
|
|
* sgl
|
|
|
|
*/
|
|
|
|
sgt->orig_nents = 0;
|
|
|
|
|
|
|
|
return sgt;
|
|
|
|
|
|
|
|
error_unmap:
|
|
|
|
for_each_sgtable_dma_sg(sgt, sg, i) {
|
|
|
|
if (!sg_dma_len(sg))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
dma_unmap_resource(dev, sg_dma_address(sg),
|
|
|
|
sg_dma_len(sg), dir,
|
|
|
|
DMA_ATTR_SKIP_CPU_SYNC);
|
|
|
|
}
|
|
|
|
|
|
|
|
sg_free_table(sgt);
|
|
|
|
|
|
|
|
error_free:
|
|
|
|
kfree(sgt);
|
|
|
|
return ERR_PTR(rc);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int hl_dmabuf_attach(struct dma_buf *dmabuf,
|
|
|
|
struct dma_buf_attachment *attachment)
|
|
|
|
{
|
|
|
|
struct hl_dmabuf_priv *hl_dmabuf;
|
|
|
|
struct hl_device *hdev;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
hl_dmabuf = dmabuf->priv;
|
|
|
|
hdev = hl_dmabuf->ctx->hdev;
|
|
|
|
|
|
|
|
rc = pci_p2pdma_distance_many(hdev->pdev, &attachment->dev, 1, true);
|
|
|
|
|
|
|
|
if (rc < 0)
|
|
|
|
attachment->peer2peer = false;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct sg_table *hl_map_dmabuf(struct dma_buf_attachment *attachment,
|
|
|
|
enum dma_data_direction dir)
|
|
|
|
{
|
|
|
|
struct dma_buf *dma_buf = attachment->dmabuf;
|
|
|
|
struct hl_vm_phys_pg_pack *phys_pg_pack;
|
|
|
|
struct hl_dmabuf_priv *hl_dmabuf;
|
|
|
|
struct hl_device *hdev;
|
|
|
|
struct sg_table *sgt;
|
|
|
|
|
|
|
|
hl_dmabuf = dma_buf->priv;
|
|
|
|
hdev = hl_dmabuf->ctx->hdev;
|
|
|
|
phys_pg_pack = hl_dmabuf->phys_pg_pack;
|
|
|
|
|
|
|
|
if (!attachment->peer2peer) {
|
|
|
|
dev_dbg(hdev->dev, "Failed to map dmabuf because p2p is disabled\n");
|
|
|
|
return ERR_PTR(-EPERM);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (phys_pg_pack)
|
|
|
|
sgt = alloc_sgt_from_device_pages(hdev,
|
|
|
|
phys_pg_pack->pages,
|
|
|
|
phys_pg_pack->npages,
|
|
|
|
phys_pg_pack->page_size,
|
|
|
|
attachment->dev,
|
|
|
|
dir);
|
|
|
|
else
|
|
|
|
sgt = alloc_sgt_from_device_pages(hdev,
|
|
|
|
&hl_dmabuf->device_address,
|
|
|
|
1,
|
|
|
|
hl_dmabuf->dmabuf->size,
|
|
|
|
attachment->dev,
|
|
|
|
dir);
|
|
|
|
|
|
|
|
if (IS_ERR(sgt))
|
|
|
|
dev_err(hdev->dev, "failed (%ld) to initialize sgt for dmabuf\n", PTR_ERR(sgt));
|
|
|
|
|
|
|
|
return sgt;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void hl_unmap_dmabuf(struct dma_buf_attachment *attachment,
|
|
|
|
struct sg_table *sgt,
|
|
|
|
enum dma_data_direction dir)
|
|
|
|
{
|
|
|
|
struct scatterlist *sg;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
/* The memory behind the dma-buf has *always* resided on the device itself, i.e. it lives
|
|
|
|
* only in the 'device' domain (after all, it maps a PCI bar address which points to the
|
|
|
|
* device memory).
|
|
|
|
*
|
|
|
|
* Therefore, it was never in the 'CPU' domain and hence, there is no need to perform
|
|
|
|
* a sync of the memory to the CPU's cache, as it never resided inside that cache.
|
|
|
|
*/
|
|
|
|
for_each_sgtable_dma_sg(sgt, sg, i)
|
|
|
|
dma_unmap_resource(attachment->dev, sg_dma_address(sg),
|
|
|
|
sg_dma_len(sg), dir,
|
|
|
|
DMA_ATTR_SKIP_CPU_SYNC);
|
|
|
|
|
|
|
|
/* Need to restore orig_nents because sg_free_table use that field */
|
|
|
|
sgt->orig_nents = sgt->nents;
|
|
|
|
sg_free_table(sgt);
|
|
|
|
kfree(sgt);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void hl_release_dmabuf(struct dma_buf *dmabuf)
|
|
|
|
{
|
|
|
|
struct hl_dmabuf_priv *hl_dmabuf = dmabuf->priv;
|
|
|
|
struct hl_ctx *ctx = hl_dmabuf->ctx;
|
|
|
|
struct hl_device *hdev = ctx->hdev;
|
|
|
|
struct hl_vm *vm = &hdev->vm;
|
|
|
|
|
|
|
|
if (hl_dmabuf->phys_pg_pack) {
|
|
|
|
spin_lock(&vm->idr_lock);
|
|
|
|
hl_dmabuf->phys_pg_pack->exporting_cnt--;
|
|
|
|
spin_unlock(&vm->idr_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
hl_ctx_put(hl_dmabuf->ctx);
|
|
|
|
|
|
|
|
kfree(hl_dmabuf);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct dma_buf_ops habanalabs_dmabuf_ops = {
|
|
|
|
.attach = hl_dmabuf_attach,
|
|
|
|
.map_dma_buf = hl_map_dmabuf,
|
|
|
|
.unmap_dma_buf = hl_unmap_dmabuf,
|
|
|
|
.release = hl_release_dmabuf,
|
|
|
|
};
|
|
|
|
|
|
|
|
static int export_dmabuf_common(struct hl_ctx *ctx,
|
|
|
|
struct hl_dmabuf_priv *hl_dmabuf,
|
|
|
|
u64 total_size, int flags, int *dmabuf_fd)
|
|
|
|
{
|
|
|
|
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
|
|
|
|
struct hl_device *hdev = ctx->hdev;
|
|
|
|
int rc, fd;
|
|
|
|
|
|
|
|
exp_info.ops = &habanalabs_dmabuf_ops;
|
|
|
|
exp_info.size = total_size;
|
|
|
|
exp_info.flags = flags;
|
|
|
|
exp_info.priv = hl_dmabuf;
|
|
|
|
|
|
|
|
hl_dmabuf->dmabuf = dma_buf_export(&exp_info);
|
|
|
|
if (IS_ERR(hl_dmabuf->dmabuf)) {
|
|
|
|
dev_err(hdev->dev, "failed to export dma-buf\n");
|
|
|
|
return PTR_ERR(hl_dmabuf->dmabuf);
|
|
|
|
}
|
|
|
|
|
|
|
|
fd = dma_buf_fd(hl_dmabuf->dmabuf, flags);
|
|
|
|
if (fd < 0) {
|
|
|
|
dev_err(hdev->dev, "failed to get a file descriptor for a dma-buf\n");
|
|
|
|
rc = fd;
|
|
|
|
goto err_dma_buf_put;
|
|
|
|
}
|
|
|
|
|
|
|
|
hl_dmabuf->ctx = ctx;
|
2022-05-11 18:28:39 +08:00
|
|
|
hl_ctx_get(hl_dmabuf->ctx);
|
2021-06-15 15:55:43 +08:00
|
|
|
|
|
|
|
*dmabuf_fd = fd;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_dma_buf_put:
|
|
|
|
dma_buf_put(hl_dmabuf->dmabuf);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* export_dmabuf_from_addr() - export a dma-buf object for the given memory
|
|
|
|
* address and size.
|
|
|
|
* @ctx: pointer to the context structure.
|
|
|
|
* @device_addr: device memory physical address.
|
|
|
|
* @size: size of device memory.
|
|
|
|
* @flags: DMA-BUF file/FD flags.
|
|
|
|
* @dmabuf_fd: pointer to result FD that represents the dma-buf object.
|
|
|
|
*
|
|
|
|
* Create and export a dma-buf object for an existing memory allocation inside
|
|
|
|
* the device memory, and return a FD which is associated with the dma-buf
|
|
|
|
* object.
|
|
|
|
*
|
|
|
|
* Return: 0 on success, non-zero for failure.
|
|
|
|
*/
|
|
|
|
static int export_dmabuf_from_addr(struct hl_ctx *ctx, u64 device_addr,
|
|
|
|
u64 size, int flags, int *dmabuf_fd)
|
|
|
|
{
|
|
|
|
struct hl_dmabuf_priv *hl_dmabuf;
|
|
|
|
struct hl_device *hdev = ctx->hdev;
|
|
|
|
struct asic_fixed_properties *prop;
|
|
|
|
u64 bar_address;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
prop = &hdev->asic_prop;
|
|
|
|
|
|
|
|
if (!IS_ALIGNED(device_addr, PAGE_SIZE)) {
|
|
|
|
dev_dbg(hdev->dev,
|
|
|
|
"exported device memory address 0x%llx should be aligned to 0x%lx\n",
|
|
|
|
device_addr, PAGE_SIZE);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (size < PAGE_SIZE) {
|
|
|
|
dev_dbg(hdev->dev,
|
|
|
|
"exported device memory size %llu should be equal to or greater than %lu\n",
|
|
|
|
size, PAGE_SIZE);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (device_addr < prop->dram_user_base_address ||
|
|
|
|
device_addr + size > prop->dram_end_address ||
|
|
|
|
device_addr + size < device_addr) {
|
|
|
|
dev_dbg(hdev->dev,
|
|
|
|
"DRAM memory range 0x%llx (+0x%llx) is outside of DRAM boundaries\n",
|
|
|
|
device_addr, size);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
bar_address = hdev->dram_pci_bar_start +
|
|
|
|
(device_addr - prop->dram_base_address);
|
|
|
|
|
|
|
|
if (bar_address + size >
|
|
|
|
hdev->dram_pci_bar_start + prop->dram_pci_bar_size ||
|
|
|
|
bar_address + size < bar_address) {
|
|
|
|
dev_dbg(hdev->dev,
|
|
|
|
"DRAM memory range 0x%llx (+0x%llx) is outside of PCI BAR boundaries\n",
|
|
|
|
device_addr, size);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
hl_dmabuf = kzalloc(sizeof(*hl_dmabuf), GFP_KERNEL);
|
|
|
|
if (!hl_dmabuf)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
hl_dmabuf->device_address = device_addr;
|
|
|
|
|
|
|
|
rc = export_dmabuf_common(ctx, hl_dmabuf, size, flags, dmabuf_fd);
|
|
|
|
if (rc)
|
|
|
|
goto err_free_dmabuf_wrapper;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_free_dmabuf_wrapper:
|
|
|
|
kfree(hl_dmabuf);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* export_dmabuf_from_handle() - export a dma-buf object for the given memory
|
|
|
|
* handle.
|
|
|
|
* @ctx: pointer to the context structure.
|
|
|
|
* @handle: device memory allocation handle.
|
|
|
|
* @flags: DMA-BUF file/FD flags.
|
|
|
|
* @dmabuf_fd: pointer to result FD that represents the dma-buf object.
|
|
|
|
*
|
|
|
|
* Create and export a dma-buf object for an existing memory allocation inside
|
|
|
|
* the device memory, and return a FD which is associated with the dma-buf
|
|
|
|
* object.
|
|
|
|
*
|
|
|
|
* Return: 0 on success, non-zero for failure.
|
|
|
|
*/
|
|
|
|
static int export_dmabuf_from_handle(struct hl_ctx *ctx, u64 handle, int flags,
|
|
|
|
int *dmabuf_fd)
|
|
|
|
{
|
|
|
|
struct hl_vm_phys_pg_pack *phys_pg_pack;
|
|
|
|
struct hl_dmabuf_priv *hl_dmabuf;
|
|
|
|
struct hl_device *hdev = ctx->hdev;
|
|
|
|
struct asic_fixed_properties *prop;
|
|
|
|
struct hl_vm *vm = &hdev->vm;
|
|
|
|
u64 bar_address;
|
|
|
|
int rc, i;
|
|
|
|
|
|
|
|
prop = &hdev->asic_prop;
|
|
|
|
|
|
|
|
if (upper_32_bits(handle)) {
|
|
|
|
dev_dbg(hdev->dev, "no match for handle 0x%llx\n", handle);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
spin_lock(&vm->idr_lock);
|
|
|
|
|
|
|
|
phys_pg_pack = idr_find(&vm->phys_pg_pack_handles, (u32) handle);
|
|
|
|
if (!phys_pg_pack) {
|
|
|
|
spin_unlock(&vm->idr_lock);
|
|
|
|
dev_dbg(hdev->dev, "no match for handle 0x%x\n", (u32) handle);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* increment now to avoid freeing device memory while exporting */
|
|
|
|
phys_pg_pack->exporting_cnt++;
|
|
|
|
|
|
|
|
spin_unlock(&vm->idr_lock);
|
|
|
|
|
|
|
|
if (phys_pg_pack->vm_type != VM_TYPE_PHYS_PACK) {
|
|
|
|
dev_dbg(hdev->dev, "handle 0x%llx does not represent DRAM memory\n", handle);
|
|
|
|
rc = -EINVAL;
|
|
|
|
goto err_dec_exporting_cnt;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0 ; i < phys_pg_pack->npages ; i++) {
|
|
|
|
|
|
|
|
bar_address = hdev->dram_pci_bar_start +
|
|
|
|
(phys_pg_pack->pages[i] -
|
|
|
|
prop->dram_base_address);
|
|
|
|
|
|
|
|
if (bar_address + phys_pg_pack->page_size >
|
|
|
|
hdev->dram_pci_bar_start + prop->dram_pci_bar_size ||
|
|
|
|
bar_address + phys_pg_pack->page_size < bar_address) {
|
|
|
|
|
|
|
|
dev_dbg(hdev->dev,
|
|
|
|
"DRAM memory range 0x%llx (+0x%x) is outside of PCI BAR boundaries\n",
|
|
|
|
phys_pg_pack->pages[i],
|
|
|
|
phys_pg_pack->page_size);
|
|
|
|
|
|
|
|
rc = -EINVAL;
|
|
|
|
goto err_dec_exporting_cnt;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
hl_dmabuf = kzalloc(sizeof(*hl_dmabuf), GFP_KERNEL);
|
|
|
|
if (!hl_dmabuf) {
|
|
|
|
rc = -ENOMEM;
|
|
|
|
goto err_dec_exporting_cnt;
|
|
|
|
}
|
|
|
|
|
|
|
|
hl_dmabuf->phys_pg_pack = phys_pg_pack;
|
|
|
|
|
|
|
|
rc = export_dmabuf_common(ctx, hl_dmabuf, phys_pg_pack->total_size,
|
|
|
|
flags, dmabuf_fd);
|
|
|
|
if (rc)
|
|
|
|
goto err_free_dmabuf_wrapper;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_free_dmabuf_wrapper:
|
|
|
|
kfree(hl_dmabuf);
|
|
|
|
|
|
|
|
err_dec_exporting_cnt:
|
|
|
|
spin_lock(&vm->idr_lock);
|
|
|
|
phys_pg_pack->exporting_cnt--;
|
|
|
|
spin_unlock(&vm->idr_lock);
|
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2019-04-04 19:42:26 +08:00
|
|
|
static int mem_ioctl_no_mmu(struct hl_fpriv *hpriv, union hl_mem_args *args)
|
|
|
|
{
|
|
|
|
struct hl_device *hdev = hpriv->hdev;
|
2021-01-05 18:55:06 +08:00
|
|
|
u64 block_handle, device_addr = 0;
|
2022-01-18 18:16:53 +08:00
|
|
|
struct hl_ctx *ctx = hpriv->ctx;
|
2021-02-05 22:04:34 +08:00
|
|
|
u32 handle = 0, block_size;
|
2022-01-18 18:16:53 +08:00
|
|
|
int rc;
|
2019-04-04 19:42:26 +08:00
|
|
|
|
|
|
|
switch (args->in.op) {
|
|
|
|
case HL_MEM_OP_ALLOC:
|
|
|
|
if (args->in.alloc.mem_size == 0) {
|
2022-01-18 18:16:53 +08:00
|
|
|
dev_err(hdev->dev, "alloc size must be larger than 0\n");
|
2019-04-04 19:42:26 +08:00
|
|
|
rc = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Force contiguous as there are no real MMU
|
|
|
|
* translations to overcome physical memory gaps
|
|
|
|
*/
|
|
|
|
args->in.flags |= HL_MEM_CONTIGUOUS;
|
|
|
|
rc = alloc_device_memory(ctx, &args->in, &handle);
|
|
|
|
|
|
|
|
memset(args, 0, sizeof(*args));
|
|
|
|
args->out.handle = (__u64) handle;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case HL_MEM_OP_FREE:
|
2020-12-09 19:34:11 +08:00
|
|
|
rc = free_device_memory(ctx, &args->in);
|
2019-04-04 19:42:26 +08:00
|
|
|
break;
|
|
|
|
|
|
|
|
case HL_MEM_OP_MAP:
|
|
|
|
if (args->in.flags & HL_MEM_USERPTR) {
|
2022-01-18 18:16:53 +08:00
|
|
|
dev_err(hdev->dev, "Failed to map host memory when MMU is disabled\n");
|
|
|
|
rc = -EPERM;
|
2019-04-04 19:42:26 +08:00
|
|
|
} else {
|
2022-01-18 18:16:53 +08:00
|
|
|
rc = get_paddr_from_handle(ctx, &args->in, &device_addr);
|
|
|
|
memset(args, 0, sizeof(*args));
|
|
|
|
args->out.device_virt_addr = device_addr;
|
2019-04-04 19:42:26 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
case HL_MEM_OP_UNMAP:
|
|
|
|
rc = 0;
|
|
|
|
break;
|
|
|
|
|
2021-01-05 18:55:06 +08:00
|
|
|
case HL_MEM_OP_MAP_BLOCK:
|
2022-01-18 18:16:53 +08:00
|
|
|
rc = map_block(hdev, args->in.map_block.block_addr, &block_handle, &block_size);
|
2021-02-05 22:04:34 +08:00
|
|
|
args->out.block_handle = block_handle;
|
|
|
|
args->out.block_size = block_size;
|
2021-01-05 18:55:06 +08:00
|
|
|
break;
|
|
|
|
|
2021-06-15 15:55:43 +08:00
|
|
|
case HL_MEM_OP_EXPORT_DMABUF_FD:
|
2022-01-18 18:16:53 +08:00
|
|
|
dev_err(hdev->dev, "Failed to export dma-buf object when MMU is disabled\n");
|
|
|
|
rc = -EPERM;
|
2021-06-15 15:55:43 +08:00
|
|
|
break;
|
|
|
|
|
2021-12-23 19:24:34 +08:00
|
|
|
case HL_MEM_OP_TS_ALLOC:
|
|
|
|
rc = allocate_timestamps_buffers(hpriv, &args->in, &args->out.handle);
|
|
|
|
break;
|
2019-04-04 19:42:26 +08:00
|
|
|
default:
|
|
|
|
dev_err(hdev->dev, "Unknown opcode for memory IOCTL\n");
|
2021-12-19 22:06:59 +08:00
|
|
|
rc = -EINVAL;
|
2019-04-04 19:42:26 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
out:
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2022-03-20 22:15:01 +08:00
|
|
|
static void ts_buff_release(struct hl_mmap_mem_buf *buf)
|
2021-12-23 19:24:34 +08:00
|
|
|
{
|
2022-03-20 22:15:01 +08:00
|
|
|
struct hl_ts_buff *ts_buff = buf->private;
|
2021-12-23 19:24:34 +08:00
|
|
|
|
2022-03-20 22:15:01 +08:00
|
|
|
vfree(ts_buff->kernel_buff_address);
|
|
|
|
vfree(ts_buff->user_buff_address);
|
|
|
|
kfree(ts_buff);
|
2021-12-23 19:24:34 +08:00
|
|
|
}
|
|
|
|
|
2022-03-20 22:15:01 +08:00
|
|
|
static int hl_ts_mmap(struct hl_mmap_mem_buf *buf, struct vm_area_struct *vma, void *args)
|
2021-12-23 19:24:34 +08:00
|
|
|
{
|
2022-03-20 22:15:01 +08:00
|
|
|
struct hl_ts_buff *ts_buff = buf->private;
|
2021-12-23 19:24:34 +08:00
|
|
|
|
|
|
|
vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP | VM_DONTCOPY | VM_NORESERVE;
|
2022-03-20 22:15:01 +08:00
|
|
|
return remap_vmalloc_range(vma, ts_buff->user_buff_address, 0);
|
2021-12-23 19:24:34 +08:00
|
|
|
}
|
|
|
|
|
2022-03-20 22:15:01 +08:00
|
|
|
static int hl_ts_alloc_buf(struct hl_mmap_mem_buf *buf, gfp_t gfp, void *args)
|
2021-12-23 19:24:34 +08:00
|
|
|
{
|
|
|
|
struct hl_ts_buff *ts_buff = NULL;
|
2022-03-20 22:15:01 +08:00
|
|
|
u32 size, num_elements;
|
2021-12-23 19:24:34 +08:00
|
|
|
void *p;
|
|
|
|
|
2022-03-20 22:15:01 +08:00
|
|
|
num_elements = *(u32 *)args;
|
|
|
|
|
2021-12-23 19:24:34 +08:00
|
|
|
ts_buff = kzalloc(sizeof(*ts_buff), GFP_KERNEL);
|
|
|
|
if (!ts_buff)
|
2022-03-20 22:15:01 +08:00
|
|
|
return -ENOMEM;
|
2021-12-23 19:24:34 +08:00
|
|
|
|
|
|
|
/* Allocate the user buffer */
|
|
|
|
size = num_elements * sizeof(u64);
|
|
|
|
p = vmalloc_user(size);
|
|
|
|
if (!p)
|
|
|
|
goto free_mem;
|
|
|
|
|
|
|
|
ts_buff->user_buff_address = p;
|
2022-03-20 22:15:01 +08:00
|
|
|
buf->mappable_size = size;
|
2021-12-23 19:24:34 +08:00
|
|
|
|
|
|
|
/* Allocate the internal kernel buffer */
|
|
|
|
size = num_elements * sizeof(struct hl_user_pending_interrupt);
|
|
|
|
p = vmalloc(size);
|
|
|
|
if (!p)
|
|
|
|
goto free_user_buff;
|
|
|
|
|
|
|
|
ts_buff->kernel_buff_address = p;
|
|
|
|
ts_buff->kernel_buff_size = size;
|
|
|
|
|
2022-03-20 22:15:01 +08:00
|
|
|
buf->private = ts_buff;
|
|
|
|
|
|
|
|
return 0;
|
2021-12-23 19:24:34 +08:00
|
|
|
|
|
|
|
free_user_buff:
|
|
|
|
vfree(ts_buff->user_buff_address);
|
|
|
|
free_mem:
|
|
|
|
kfree(ts_buff);
|
2022-03-20 22:15:01 +08:00
|
|
|
return -ENOMEM;
|
2021-12-23 19:24:34 +08:00
|
|
|
}
|
|
|
|
|
habanalabs: hide memory manager page shift
The new unified memory manager uses page offset to pass buffer handle
during the mmap operation. One problem with this approach is that it
requires the handle to always be divisible by the page size, else, the
user would not be able to pass it correctly as an argument to the mmap
system call.
Previously, this was achieved by shifting the handle left after alloc
operation, and shifting it right before get operation. This was done in
the user code. This creates code duplication, and, what's worse,
requires some knowledge from the user regarding the handle internal
structure, hurting the encapsulation.
This patch encloses all the page shifts inside memory manager functions.
This way, the user can take the handle as a black box, and simply use
it, without any concert about how it actually works.
Signed-off-by: Yuri Nudelman <ynudelman@habana.ai>
Reviewed-by: Oded Gabbay <ogabbay@kernel.org>
Signed-off-by: Oded Gabbay <ogabbay@kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2022-03-23 21:08:22 +08:00
|
|
|
static struct hl_mmap_mem_buf_behavior hl_ts_behavior = {
|
2022-05-02 18:41:11 +08:00
|
|
|
.topic = "TS",
|
habanalabs: hide memory manager page shift
The new unified memory manager uses page offset to pass buffer handle
during the mmap operation. One problem with this approach is that it
requires the handle to always be divisible by the page size, else, the
user would not be able to pass it correctly as an argument to the mmap
system call.
Previously, this was achieved by shifting the handle left after alloc
operation, and shifting it right before get operation. This was done in
the user code. This creates code duplication, and, what's worse,
requires some knowledge from the user regarding the handle internal
structure, hurting the encapsulation.
This patch encloses all the page shifts inside memory manager functions.
This way, the user can take the handle as a black box, and simply use
it, without any concert about how it actually works.
Signed-off-by: Yuri Nudelman <ynudelman@habana.ai>
Reviewed-by: Oded Gabbay <ogabbay@kernel.org>
Signed-off-by: Oded Gabbay <ogabbay@kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2022-03-23 21:08:22 +08:00
|
|
|
.mem_id = HL_MMAP_TYPE_TS_BUFF,
|
2022-03-20 22:15:01 +08:00
|
|
|
.mmap = hl_ts_mmap,
|
|
|
|
.alloc = hl_ts_alloc_buf,
|
|
|
|
.release = ts_buff_release,
|
|
|
|
};
|
|
|
|
|
2021-12-23 19:24:34 +08:00
|
|
|
/**
|
|
|
|
* allocate_timestamps_buffers() - allocate timestamps buffers
|
|
|
|
* This function will allocate ts buffer that will later on be mapped to the user
|
|
|
|
* in order to be able to read the timestamp.
|
|
|
|
* in additon it'll allocate an extra buffer for registration management.
|
|
|
|
* since we cannot fail during registration for out-of-memory situation, so
|
|
|
|
* we'll prepare a pool which will be used as user interrupt nodes and instead
|
|
|
|
* of dynamically allocating nodes while registration we'll pick the node from
|
|
|
|
* this pool. in addtion it'll add node to the mapping hash which will be used
|
|
|
|
* to map user ts buffer to the internal kernel ts buffer.
|
|
|
|
* @hpriv: pointer to the private data of the fd
|
|
|
|
* @args: ioctl input
|
|
|
|
* @handle: user timestamp buffer handle as an output
|
|
|
|
*/
|
|
|
|
static int allocate_timestamps_buffers(struct hl_fpriv *hpriv, struct hl_mem_in *args, u64 *handle)
|
|
|
|
{
|
2022-03-20 22:15:01 +08:00
|
|
|
struct hl_mem_mgr *mmg = &hpriv->mem_mgr;
|
|
|
|
struct hl_mmap_mem_buf *buf;
|
2021-12-23 19:24:34 +08:00
|
|
|
|
|
|
|
if (args->num_of_elements > TS_MAX_ELEMENTS_NUM) {
|
2022-03-20 22:15:01 +08:00
|
|
|
dev_err(mmg->dev, "Num of elements exceeds Max allowed number (0x%x > 0x%x)\n",
|
2021-12-23 19:24:34 +08:00
|
|
|
args->num_of_elements, TS_MAX_ELEMENTS_NUM);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2022-03-20 22:15:01 +08:00
|
|
|
buf = hl_mmap_mem_buf_alloc(mmg, &hl_ts_behavior, GFP_KERNEL, &args->num_of_elements);
|
|
|
|
if (!buf)
|
|
|
|
return -ENOMEM;
|
2021-12-23 19:24:34 +08:00
|
|
|
|
habanalabs: hide memory manager page shift
The new unified memory manager uses page offset to pass buffer handle
during the mmap operation. One problem with this approach is that it
requires the handle to always be divisible by the page size, else, the
user would not be able to pass it correctly as an argument to the mmap
system call.
Previously, this was achieved by shifting the handle left after alloc
operation, and shifting it right before get operation. This was done in
the user code. This creates code duplication, and, what's worse,
requires some knowledge from the user regarding the handle internal
structure, hurting the encapsulation.
This patch encloses all the page shifts inside memory manager functions.
This way, the user can take the handle as a black box, and simply use
it, without any concert about how it actually works.
Signed-off-by: Yuri Nudelman <ynudelman@habana.ai>
Reviewed-by: Oded Gabbay <ogabbay@kernel.org>
Signed-off-by: Oded Gabbay <ogabbay@kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2022-03-23 21:08:22 +08:00
|
|
|
*handle = buf->handle;
|
2021-12-23 19:24:34 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-02-16 06:39:22 +08:00
|
|
|
int hl_mem_ioctl(struct hl_fpriv *hpriv, void *data)
|
|
|
|
{
|
2020-10-05 19:40:10 +08:00
|
|
|
enum hl_device_status status;
|
2019-02-16 06:39:22 +08:00
|
|
|
union hl_mem_args *args = data;
|
|
|
|
struct hl_device *hdev = hpriv->hdev;
|
|
|
|
struct hl_ctx *ctx = hpriv->ctx;
|
2021-01-05 18:55:06 +08:00
|
|
|
u64 block_handle, device_addr = 0;
|
2021-02-05 22:04:34 +08:00
|
|
|
u32 handle = 0, block_size;
|
2021-06-15 15:55:43 +08:00
|
|
|
int rc, dmabuf_fd = -EBADF;
|
2019-02-16 06:39:22 +08:00
|
|
|
|
2020-10-05 19:40:10 +08:00
|
|
|
if (!hl_device_operational(hdev, &status)) {
|
2019-02-16 06:39:22 +08:00
|
|
|
dev_warn_ratelimited(hdev->dev,
|
2019-04-06 20:41:35 +08:00
|
|
|
"Device is %s. Can't execute MEMORY IOCTL\n",
|
2020-10-05 19:40:10 +08:00
|
|
|
hdev->status[status]);
|
2019-02-16 06:39:22 +08:00
|
|
|
return -EBUSY;
|
|
|
|
}
|
|
|
|
|
2019-04-04 19:42:26 +08:00
|
|
|
if (!hdev->mmu_enable)
|
|
|
|
return mem_ioctl_no_mmu(hpriv, args);
|
2019-02-16 06:39:22 +08:00
|
|
|
|
2019-04-04 19:42:26 +08:00
|
|
|
switch (args->in.op) {
|
|
|
|
case HL_MEM_OP_ALLOC:
|
|
|
|
if (args->in.alloc.mem_size == 0) {
|
|
|
|
dev_err(hdev->dev,
|
|
|
|
"alloc size must be larger than 0\n");
|
|
|
|
rc = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
2020-10-18 20:32:23 +08:00
|
|
|
|
|
|
|
/* If DRAM does not support virtual memory the driver won't
|
|
|
|
* handle the allocation/freeing of that memory. However, for
|
|
|
|
* system administration/monitoring purposes, the driver will
|
|
|
|
* keep track of the amount of DRAM memory that is allocated
|
|
|
|
* and freed by the user. Because this code totally relies on
|
|
|
|
* the user's input, the driver can't ensure the validity
|
|
|
|
* of this accounting.
|
|
|
|
*/
|
2020-11-09 15:48:31 +08:00
|
|
|
if (!hdev->asic_prop.dram_supports_virtual_memory) {
|
2020-10-18 20:32:23 +08:00
|
|
|
atomic64_add(args->in.alloc.mem_size,
|
|
|
|
&ctx->dram_phys_mem);
|
|
|
|
atomic64_add(args->in.alloc.mem_size,
|
|
|
|
&hdev->dram_used_mem);
|
|
|
|
|
|
|
|
dev_dbg(hdev->dev, "DRAM alloc is not supported\n");
|
|
|
|
rc = 0;
|
|
|
|
|
|
|
|
memset(args, 0, sizeof(*args));
|
|
|
|
args->out.handle = 0;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2019-04-04 19:42:26 +08:00
|
|
|
rc = alloc_device_memory(ctx, &args->in, &handle);
|
2019-02-16 06:39:22 +08:00
|
|
|
|
2019-04-04 19:42:26 +08:00
|
|
|
memset(args, 0, sizeof(*args));
|
|
|
|
args->out.handle = (__u64) handle;
|
|
|
|
break;
|
2019-02-16 06:39:22 +08:00
|
|
|
|
2019-04-04 19:42:26 +08:00
|
|
|
case HL_MEM_OP_FREE:
|
2020-10-18 20:32:23 +08:00
|
|
|
/* If DRAM does not support virtual memory the driver won't
|
|
|
|
* handle the allocation/freeing of that memory. However, for
|
|
|
|
* system administration/monitoring purposes, the driver will
|
|
|
|
* keep track of the amount of DRAM memory that is allocated
|
|
|
|
* and freed by the user. Because this code totally relies on
|
|
|
|
* the user's input, the driver can't ensure the validity
|
|
|
|
* of this accounting.
|
|
|
|
*/
|
2020-11-09 15:48:31 +08:00
|
|
|
if (!hdev->asic_prop.dram_supports_virtual_memory) {
|
2020-10-18 20:32:23 +08:00
|
|
|
atomic64_sub(args->in.alloc.mem_size,
|
|
|
|
&ctx->dram_phys_mem);
|
|
|
|
atomic64_sub(args->in.alloc.mem_size,
|
|
|
|
&hdev->dram_used_mem);
|
|
|
|
|
|
|
|
dev_dbg(hdev->dev, "DRAM alloc is not supported\n");
|
|
|
|
rc = 0;
|
|
|
|
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2020-12-09 19:34:11 +08:00
|
|
|
rc = free_device_memory(ctx, &args->in);
|
2019-04-04 19:42:26 +08:00
|
|
|
break;
|
2019-02-16 06:39:22 +08:00
|
|
|
|
2019-04-04 19:42:26 +08:00
|
|
|
case HL_MEM_OP_MAP:
|
|
|
|
rc = map_device_va(ctx, &args->in, &device_addr);
|
2019-02-16 06:39:22 +08:00
|
|
|
|
2019-04-04 19:42:26 +08:00
|
|
|
memset(args, 0, sizeof(*args));
|
|
|
|
args->out.device_virt_addr = device_addr;
|
|
|
|
break;
|
2019-02-16 06:39:22 +08:00
|
|
|
|
2019-04-04 19:42:26 +08:00
|
|
|
case HL_MEM_OP_UNMAP:
|
2020-12-09 19:34:11 +08:00
|
|
|
rc = unmap_device_va(ctx, &args->in, false);
|
2019-04-04 19:42:26 +08:00
|
|
|
break;
|
2019-02-16 06:39:22 +08:00
|
|
|
|
2021-01-05 18:55:06 +08:00
|
|
|
case HL_MEM_OP_MAP_BLOCK:
|
|
|
|
rc = map_block(hdev, args->in.map_block.block_addr,
|
2021-02-05 22:04:34 +08:00
|
|
|
&block_handle, &block_size);
|
|
|
|
args->out.block_handle = block_handle;
|
|
|
|
args->out.block_size = block_size;
|
2019-04-04 19:42:26 +08:00
|
|
|
break;
|
2019-02-16 06:39:22 +08:00
|
|
|
|
2021-06-15 15:55:43 +08:00
|
|
|
case HL_MEM_OP_EXPORT_DMABUF_FD:
|
|
|
|
if (hdev->asic_prop.dram_supports_virtual_memory)
|
|
|
|
rc = export_dmabuf_from_handle(ctx,
|
|
|
|
args->in.export_dmabuf_fd.handle,
|
|
|
|
args->in.flags,
|
|
|
|
&dmabuf_fd);
|
|
|
|
else
|
|
|
|
rc = export_dmabuf_from_addr(ctx,
|
|
|
|
args->in.export_dmabuf_fd.handle,
|
|
|
|
args->in.export_dmabuf_fd.mem_size,
|
|
|
|
args->in.flags,
|
|
|
|
&dmabuf_fd);
|
|
|
|
memset(args, 0, sizeof(*args));
|
|
|
|
args->out.fd = dmabuf_fd;
|
|
|
|
break;
|
|
|
|
|
2021-12-23 19:24:34 +08:00
|
|
|
case HL_MEM_OP_TS_ALLOC:
|
|
|
|
rc = allocate_timestamps_buffers(hpriv, &args->in, &args->out.handle);
|
|
|
|
break;
|
2019-04-04 19:42:26 +08:00
|
|
|
default:
|
|
|
|
dev_err(hdev->dev, "Unknown opcode for memory IOCTL\n");
|
2021-12-19 22:06:59 +08:00
|
|
|
rc = -EINVAL;
|
2019-04-04 19:42:26 +08:00
|
|
|
break;
|
2019-02-16 06:39:22 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
out:
|
|
|
|
return rc;
|
|
|
|
}
|
2019-02-16 06:39:21 +08:00
|
|
|
|
2019-08-12 16:48:46 +08:00
|
|
|
static int get_user_memory(struct hl_device *hdev, u64 addr, u64 size,
|
|
|
|
u32 npages, u64 start, u32 offset,
|
|
|
|
struct hl_userptr *userptr)
|
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
if (!access_ok((void __user *) (uintptr_t) addr, size)) {
|
|
|
|
dev_err(hdev->dev, "user pointer is invalid - 0x%llx\n", addr);
|
|
|
|
return -EFAULT;
|
|
|
|
}
|
|
|
|
|
2020-11-28 00:41:17 +08:00
|
|
|
userptr->pages = kvmalloc_array(npages, sizeof(*userptr->pages),
|
|
|
|
GFP_KERNEL);
|
|
|
|
if (!userptr->pages)
|
2019-08-12 16:48:46 +08:00
|
|
|
return -ENOMEM;
|
|
|
|
|
2020-11-28 00:41:18 +08:00
|
|
|
rc = pin_user_pages_fast(start, npages,
|
|
|
|
FOLL_FORCE | FOLL_WRITE | FOLL_LONGTERM,
|
2020-11-28 00:41:17 +08:00
|
|
|
userptr->pages);
|
2019-08-12 16:48:46 +08:00
|
|
|
|
|
|
|
if (rc != npages) {
|
|
|
|
dev_err(hdev->dev,
|
2021-06-11 01:48:39 +08:00
|
|
|
"Failed (%d) to pin host memory with user ptr 0x%llx, size 0x%llx, npages %d\n",
|
|
|
|
rc, addr, size, npages);
|
2019-08-12 16:48:46 +08:00
|
|
|
if (rc < 0)
|
2020-11-28 00:41:17 +08:00
|
|
|
goto destroy_pages;
|
|
|
|
npages = rc;
|
2019-08-12 16:48:46 +08:00
|
|
|
rc = -EFAULT;
|
2020-11-28 00:41:17 +08:00
|
|
|
goto put_pages;
|
2019-08-12 16:48:46 +08:00
|
|
|
}
|
2020-11-28 00:41:17 +08:00
|
|
|
userptr->npages = npages;
|
2019-08-12 16:48:46 +08:00
|
|
|
|
|
|
|
rc = sg_alloc_table_from_pages(userptr->sgt,
|
2020-11-28 00:41:17 +08:00
|
|
|
userptr->pages,
|
2021-02-14 21:35:56 +08:00
|
|
|
npages, offset, size, GFP_KERNEL);
|
2019-08-12 16:48:46 +08:00
|
|
|
if (rc < 0) {
|
|
|
|
dev_err(hdev->dev, "failed to create SG table from pages\n");
|
2020-11-28 00:41:17 +08:00
|
|
|
goto put_pages;
|
2019-08-12 16:48:46 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
2020-11-28 00:41:17 +08:00
|
|
|
put_pages:
|
|
|
|
unpin_user_pages(userptr->pages, npages);
|
|
|
|
destroy_pages:
|
|
|
|
kvfree(userptr->pages);
|
2019-08-12 16:48:46 +08:00
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2020-12-09 19:28:46 +08:00
|
|
|
/**
|
|
|
|
* hl_pin_host_memory() - pins a chunk of host memory.
|
|
|
|
* @hdev: pointer to the habanalabs device structure.
|
|
|
|
* @addr: the host virtual address of the memory area.
|
|
|
|
* @size: the size of the memory area.
|
|
|
|
* @userptr: pointer to hl_userptr structure.
|
2019-02-16 06:39:21 +08:00
|
|
|
*
|
|
|
|
* This function does the following:
|
2020-12-09 19:28:46 +08:00
|
|
|
* - Pins the physical pages.
|
|
|
|
* - Create an SG list from those pages.
|
2019-02-16 06:39:21 +08:00
|
|
|
*/
|
2019-02-27 06:19:18 +08:00
|
|
|
int hl_pin_host_memory(struct hl_device *hdev, u64 addr, u64 size,
|
2019-08-12 16:48:46 +08:00
|
|
|
struct hl_userptr *userptr)
|
2019-02-16 06:39:21 +08:00
|
|
|
{
|
|
|
|
u64 start, end;
|
|
|
|
u32 npages, offset;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
if (!size) {
|
2019-02-27 06:19:18 +08:00
|
|
|
dev_err(hdev->dev, "size to pin is invalid - %llu\n", size);
|
2019-02-16 06:39:21 +08:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the combination of the address and size requested for this memory
|
|
|
|
* region causes an integer overflow, return error.
|
|
|
|
*/
|
|
|
|
if (((addr + size) < addr) ||
|
|
|
|
PAGE_ALIGN(addr + size) < (addr + size)) {
|
|
|
|
dev_err(hdev->dev,
|
2019-02-27 06:19:18 +08:00
|
|
|
"user pointer 0x%llx + %llu causes integer overflow\n",
|
2019-02-16 06:39:21 +08:00
|
|
|
addr, size);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2021-07-27 22:39:42 +08:00
|
|
|
userptr->pid = current->pid;
|
2021-02-14 21:35:56 +08:00
|
|
|
userptr->sgt = kzalloc(sizeof(*userptr->sgt), GFP_KERNEL);
|
2019-08-12 16:48:46 +08:00
|
|
|
if (!userptr->sgt)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2019-02-16 06:39:21 +08:00
|
|
|
start = addr & PAGE_MASK;
|
|
|
|
offset = addr & ~PAGE_MASK;
|
|
|
|
end = PAGE_ALIGN(addr + size);
|
|
|
|
npages = (end - start) >> PAGE_SHIFT;
|
|
|
|
|
|
|
|
userptr->size = size;
|
|
|
|
userptr->addr = addr;
|
|
|
|
userptr->dma_mapped = false;
|
|
|
|
INIT_LIST_HEAD(&userptr->job_node);
|
|
|
|
|
2019-08-12 16:48:46 +08:00
|
|
|
rc = get_user_memory(hdev, addr, size, npages, start, offset,
|
|
|
|
userptr);
|
|
|
|
if (rc) {
|
2019-02-16 06:39:21 +08:00
|
|
|
dev_err(hdev->dev,
|
2019-08-12 16:48:46 +08:00
|
|
|
"failed to get user memory for address 0x%llx\n",
|
|
|
|
addr);
|
2019-02-16 06:39:21 +08:00
|
|
|
goto free_sgt;
|
|
|
|
}
|
|
|
|
|
2019-02-16 06:39:24 +08:00
|
|
|
hl_debugfs_add_userptr(hdev, userptr);
|
|
|
|
|
2019-02-16 06:39:21 +08:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
free_sgt:
|
|
|
|
kfree(userptr->sgt);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2019-08-12 16:48:46 +08:00
|
|
|
* hl_unpin_host_memory - unpins a chunk of host memory.
|
|
|
|
* @hdev: pointer to the habanalabs device structure
|
|
|
|
* @userptr: pointer to hl_userptr structure
|
2019-02-16 06:39:21 +08:00
|
|
|
*
|
|
|
|
* This function does the following:
|
|
|
|
* - Unpins the physical pages related to the host memory
|
|
|
|
* - Free the SG list
|
|
|
|
*/
|
2019-08-12 16:48:46 +08:00
|
|
|
void hl_unpin_host_memory(struct hl_device *hdev, struct hl_userptr *userptr)
|
2019-02-16 06:39:21 +08:00
|
|
|
{
|
2019-02-16 06:39:24 +08:00
|
|
|
hl_debugfs_remove_userptr(hdev, userptr);
|
|
|
|
|
2019-02-16 06:39:21 +08:00
|
|
|
if (userptr->dma_mapped)
|
2022-03-24 22:34:49 +08:00
|
|
|
hdev->asic_funcs->hl_dma_unmap_sgtable(hdev, userptr->sgt, userptr->dir);
|
2019-02-16 06:39:21 +08:00
|
|
|
|
2020-11-28 00:41:17 +08:00
|
|
|
unpin_user_pages_dirty_lock(userptr->pages, userptr->npages, true);
|
|
|
|
kvfree(userptr->pages);
|
2019-02-16 06:39:21 +08:00
|
|
|
|
|
|
|
list_del(&userptr->job_node);
|
|
|
|
|
|
|
|
sg_free_table(userptr->sgt);
|
|
|
|
kfree(userptr->sgt);
|
|
|
|
}
|
|
|
|
|
2020-12-09 19:28:46 +08:00
|
|
|
/**
|
|
|
|
* hl_userptr_delete_list() - clear userptr list.
|
|
|
|
* @hdev: pointer to the habanalabs device structure.
|
|
|
|
* @userptr_list: pointer to the list to clear.
|
2019-02-16 06:39:21 +08:00
|
|
|
*
|
|
|
|
* This function does the following:
|
|
|
|
* - Iterates over the list and unpins the host memory and frees the userptr
|
|
|
|
* structure.
|
|
|
|
*/
|
|
|
|
void hl_userptr_delete_list(struct hl_device *hdev,
|
|
|
|
struct list_head *userptr_list)
|
|
|
|
{
|
|
|
|
struct hl_userptr *userptr, *tmp;
|
|
|
|
|
|
|
|
list_for_each_entry_safe(userptr, tmp, userptr_list, job_node) {
|
|
|
|
hl_unpin_host_memory(hdev, userptr);
|
|
|
|
kfree(userptr);
|
|
|
|
}
|
|
|
|
|
|
|
|
INIT_LIST_HEAD(userptr_list);
|
|
|
|
}
|
|
|
|
|
2020-12-09 19:28:46 +08:00
|
|
|
/**
|
|
|
|
* hl_userptr_is_pinned() - returns whether the given userptr is pinned.
|
|
|
|
* @hdev: pointer to the habanalabs device structure.
|
2021-12-19 17:38:01 +08:00
|
|
|
* @addr: user address to check.
|
|
|
|
* @size: user block size to check.
|
2020-12-09 19:28:46 +08:00
|
|
|
* @userptr_list: pointer to the list to clear.
|
|
|
|
* @userptr: pointer to userptr to check.
|
2019-02-16 06:39:21 +08:00
|
|
|
*
|
|
|
|
* This function does the following:
|
|
|
|
* - Iterates over the list and checks if the given userptr is in it, means is
|
|
|
|
* pinned. If so, returns true, otherwise returns false.
|
|
|
|
*/
|
|
|
|
bool hl_userptr_is_pinned(struct hl_device *hdev, u64 addr,
|
|
|
|
u32 size, struct list_head *userptr_list,
|
|
|
|
struct hl_userptr **userptr)
|
|
|
|
{
|
|
|
|
list_for_each_entry((*userptr), userptr_list, job_node) {
|
|
|
|
if ((addr == (*userptr)->addr) && (size == (*userptr)->size))
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
2019-02-16 06:39:22 +08:00
|
|
|
|
2020-12-09 19:28:46 +08:00
|
|
|
/**
|
|
|
|
* va_range_init() - initialize virtual addresses range.
|
|
|
|
* @hdev: pointer to the habanalabs device structure.
|
2022-06-27 22:21:39 +08:00
|
|
|
* @va_ranges: pointer to va_ranges array.
|
|
|
|
* @range_type: virtual address range type.
|
|
|
|
* @start: range start address, inclusive.
|
|
|
|
* @end: range end address, inclusive.
|
2021-12-19 17:38:01 +08:00
|
|
|
* @page_size: page size for this va_range.
|
2019-02-16 06:39:22 +08:00
|
|
|
*
|
|
|
|
* This function does the following:
|
|
|
|
* - Initializes the virtual addresses list of the given range with the given
|
|
|
|
* addresses.
|
|
|
|
*/
|
2022-06-27 22:21:39 +08:00
|
|
|
static int va_range_init(struct hl_device *hdev, struct hl_va_range **va_ranges,
|
|
|
|
enum hl_va_range_type range_type, u64 start,
|
|
|
|
u64 end, u32 page_size)
|
2019-02-16 06:39:22 +08:00
|
|
|
{
|
2022-06-27 22:21:39 +08:00
|
|
|
struct hl_va_range *va_range = va_ranges[range_type];
|
2019-02-16 06:39:22 +08:00
|
|
|
int rc;
|
|
|
|
|
|
|
|
INIT_LIST_HEAD(&va_range->list);
|
|
|
|
|
2020-11-19 02:15:29 +08:00
|
|
|
/*
|
|
|
|
* PAGE_SIZE alignment
|
|
|
|
* it is the callers responsibility to align the addresses if the
|
|
|
|
* page size is not a power of 2
|
|
|
|
*/
|
2019-02-16 06:39:22 +08:00
|
|
|
|
2020-11-19 02:15:29 +08:00
|
|
|
if (is_power_of_2(page_size)) {
|
|
|
|
if (start & (PAGE_SIZE - 1)) {
|
|
|
|
start &= PAGE_MASK;
|
|
|
|
start += PAGE_SIZE;
|
|
|
|
}
|
2019-02-16 06:39:22 +08:00
|
|
|
|
2021-10-14 15:33:27 +08:00
|
|
|
/*
|
|
|
|
* The end of the range is inclusive, hence we need to align it
|
|
|
|
* to the end of the last full page in the range. For example if
|
|
|
|
* end = 0x3ff5 with page size 0x1000, we need to align it to
|
|
|
|
* 0x2fff. The remainig 0xff5 bytes do not form a full page.
|
|
|
|
*/
|
|
|
|
if ((end + 1) & (PAGE_SIZE - 1))
|
|
|
|
end = ((end + 1) & PAGE_MASK) - 1;
|
2019-02-16 06:39:22 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (start >= end) {
|
|
|
|
dev_err(hdev->dev, "too small vm range for va list\n");
|
|
|
|
return -EFAULT;
|
|
|
|
}
|
|
|
|
|
|
|
|
rc = add_va_block(hdev, va_range, start, end);
|
|
|
|
|
|
|
|
if (rc) {
|
|
|
|
dev_err(hdev->dev, "Failed to init host va list\n");
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
va_range->start_addr = start;
|
|
|
|
va_range->end_addr = end;
|
2020-10-22 16:05:55 +08:00
|
|
|
va_range->page_size = page_size;
|
2019-02-16 06:39:22 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-12-09 19:28:46 +08:00
|
|
|
/**
|
|
|
|
* va_range_fini() - clear a virtual addresses range.
|
|
|
|
* @hdev: pointer to the habanalabs structure.
|
2021-12-19 17:38:01 +08:00
|
|
|
* @va_range: pointer to virtual addresses range.
|
2019-02-16 06:39:22 +08:00
|
|
|
*
|
2020-01-05 17:05:45 +08:00
|
|
|
* This function does the following:
|
2020-12-09 19:28:46 +08:00
|
|
|
* - Frees the virtual addresses block list and its lock.
|
2020-01-05 17:05:45 +08:00
|
|
|
*/
|
2020-10-22 16:05:55 +08:00
|
|
|
static void va_range_fini(struct hl_device *hdev, struct hl_va_range *va_range)
|
2020-01-05 17:05:45 +08:00
|
|
|
{
|
|
|
|
mutex_lock(&va_range->lock);
|
|
|
|
clear_va_list_locked(hdev, &va_range->list);
|
|
|
|
mutex_unlock(&va_range->lock);
|
|
|
|
|
|
|
|
mutex_destroy(&va_range->lock);
|
|
|
|
kfree(va_range);
|
|
|
|
}
|
|
|
|
|
2020-12-09 19:28:46 +08:00
|
|
|
/**
|
|
|
|
* vm_ctx_init_with_ranges() - initialize virtual memory for context.
|
|
|
|
* @ctx: pointer to the habanalabs context structure.
|
2020-01-05 17:05:45 +08:00
|
|
|
* @host_range_start: host virtual addresses range start.
|
|
|
|
* @host_range_end: host virtual addresses range end.
|
2021-12-19 17:38:01 +08:00
|
|
|
* @host_page_size: host page size.
|
2020-01-05 17:05:45 +08:00
|
|
|
* @host_huge_range_start: host virtual addresses range start for memory
|
2020-12-09 19:28:46 +08:00
|
|
|
* allocated with huge pages.
|
2020-01-05 17:05:45 +08:00
|
|
|
* @host_huge_range_end: host virtual addresses range end for memory allocated
|
|
|
|
* with huge pages.
|
2021-12-19 17:38:01 +08:00
|
|
|
* @host_huge_page_size: host huge page size.
|
2020-01-05 17:05:45 +08:00
|
|
|
* @dram_range_start: dram virtual addresses range start.
|
|
|
|
* @dram_range_end: dram virtual addresses range end.
|
2021-12-19 17:38:01 +08:00
|
|
|
* @dram_page_size: dram page size.
|
2019-02-16 06:39:22 +08:00
|
|
|
*
|
|
|
|
* This function initializes the following:
|
2020-12-09 19:28:46 +08:00
|
|
|
* - MMU for context.
|
|
|
|
* - Virtual address to area descriptor hashtable.
|
|
|
|
* - Virtual block list of available virtual memory.
|
2019-02-16 06:39:22 +08:00
|
|
|
*/
|
2020-01-05 17:05:45 +08:00
|
|
|
static int vm_ctx_init_with_ranges(struct hl_ctx *ctx,
|
|
|
|
u64 host_range_start,
|
|
|
|
u64 host_range_end,
|
2020-10-22 16:05:55 +08:00
|
|
|
u32 host_page_size,
|
2020-01-05 17:05:45 +08:00
|
|
|
u64 host_huge_range_start,
|
|
|
|
u64 host_huge_range_end,
|
2020-10-22 16:05:55 +08:00
|
|
|
u32 host_huge_page_size,
|
2020-01-05 17:05:45 +08:00
|
|
|
u64 dram_range_start,
|
2020-10-22 16:05:55 +08:00
|
|
|
u64 dram_range_end,
|
|
|
|
u32 dram_page_size)
|
2019-02-16 06:39:22 +08:00
|
|
|
{
|
|
|
|
struct hl_device *hdev = ctx->hdev;
|
2020-10-22 16:05:55 +08:00
|
|
|
int i, rc;
|
|
|
|
|
|
|
|
for (i = 0 ; i < HL_VA_RANGE_TYPE_MAX ; i++) {
|
|
|
|
ctx->va_range[i] =
|
|
|
|
kzalloc(sizeof(struct hl_va_range), GFP_KERNEL);
|
|
|
|
if (!ctx->va_range[i]) {
|
|
|
|
rc = -ENOMEM;
|
|
|
|
goto free_va_range;
|
|
|
|
}
|
2020-01-05 17:05:45 +08:00
|
|
|
}
|
|
|
|
|
2019-02-28 16:46:11 +08:00
|
|
|
rc = hl_mmu_ctx_init(ctx);
|
|
|
|
if (rc) {
|
|
|
|
dev_err(hdev->dev, "failed to init context %d\n", ctx->asid);
|
2020-10-22 16:05:55 +08:00
|
|
|
goto free_va_range;
|
2019-02-28 16:46:11 +08:00
|
|
|
}
|
2019-02-16 06:39:22 +08:00
|
|
|
|
|
|
|
mutex_init(&ctx->mem_hash_lock);
|
|
|
|
hash_init(ctx->mem_hash);
|
|
|
|
|
2020-10-22 16:05:55 +08:00
|
|
|
mutex_init(&ctx->va_range[HL_VA_RANGE_TYPE_HOST]->lock);
|
2019-02-16 06:39:22 +08:00
|
|
|
|
2022-06-27 22:21:39 +08:00
|
|
|
rc = va_range_init(hdev, ctx->va_range, HL_VA_RANGE_TYPE_HOST,
|
2020-10-22 16:05:55 +08:00
|
|
|
host_range_start, host_range_end, host_page_size);
|
2019-02-16 06:39:22 +08:00
|
|
|
if (rc) {
|
|
|
|
dev_err(hdev->dev, "failed to init host vm range\n");
|
2020-10-22 16:05:55 +08:00
|
|
|
goto mmu_ctx_fini;
|
2020-01-05 17:05:45 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (hdev->pmmu_huge_range) {
|
2020-10-22 16:05:55 +08:00
|
|
|
mutex_init(&ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE]->lock);
|
2020-01-05 17:05:45 +08:00
|
|
|
|
2020-10-22 16:05:55 +08:00
|
|
|
rc = va_range_init(hdev,
|
2022-06-27 22:21:39 +08:00
|
|
|
ctx->va_range, HL_VA_RANGE_TYPE_HOST_HUGE,
|
2020-10-22 16:05:55 +08:00
|
|
|
host_huge_range_start, host_huge_range_end,
|
|
|
|
host_huge_page_size);
|
2020-01-05 17:05:45 +08:00
|
|
|
if (rc) {
|
|
|
|
dev_err(hdev->dev,
|
|
|
|
"failed to init host huge vm range\n");
|
2020-10-22 16:05:55 +08:00
|
|
|
goto clear_host_va_range;
|
2020-01-05 17:05:45 +08:00
|
|
|
}
|
|
|
|
} else {
|
2020-11-26 19:01:11 +08:00
|
|
|
kfree(ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE]);
|
2020-10-22 16:05:55 +08:00
|
|
|
ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE] =
|
|
|
|
ctx->va_range[HL_VA_RANGE_TYPE_HOST];
|
2019-02-16 06:39:22 +08:00
|
|
|
}
|
|
|
|
|
2020-10-22 16:05:55 +08:00
|
|
|
mutex_init(&ctx->va_range[HL_VA_RANGE_TYPE_DRAM]->lock);
|
2019-02-16 06:39:22 +08:00
|
|
|
|
2022-06-27 22:21:39 +08:00
|
|
|
rc = va_range_init(hdev, ctx->va_range, HL_VA_RANGE_TYPE_DRAM,
|
2020-10-22 16:05:55 +08:00
|
|
|
dram_range_start, dram_range_end, dram_page_size);
|
2019-02-16 06:39:22 +08:00
|
|
|
if (rc) {
|
|
|
|
dev_err(hdev->dev, "failed to init dram vm range\n");
|
2020-10-22 16:05:55 +08:00
|
|
|
goto clear_host_huge_va_range;
|
2019-02-16 06:39:22 +08:00
|
|
|
}
|
|
|
|
|
2019-02-16 06:39:24 +08:00
|
|
|
hl_debugfs_add_ctx_mem_hash(hdev, ctx);
|
|
|
|
|
2019-02-16 06:39:22 +08:00
|
|
|
return 0;
|
|
|
|
|
2020-10-22 16:05:55 +08:00
|
|
|
clear_host_huge_va_range:
|
|
|
|
mutex_destroy(&ctx->va_range[HL_VA_RANGE_TYPE_DRAM]->lock);
|
2019-02-16 06:39:22 +08:00
|
|
|
|
2020-01-05 17:05:45 +08:00
|
|
|
if (hdev->pmmu_huge_range) {
|
2020-10-22 16:05:55 +08:00
|
|
|
mutex_lock(&ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE]->lock);
|
|
|
|
clear_va_list_locked(hdev,
|
|
|
|
&ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE]->list);
|
|
|
|
mutex_unlock(&ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE]->lock);
|
2020-01-05 17:05:45 +08:00
|
|
|
}
|
2020-10-22 16:05:55 +08:00
|
|
|
clear_host_va_range:
|
2020-01-05 17:05:45 +08:00
|
|
|
if (hdev->pmmu_huge_range)
|
2020-10-22 16:05:55 +08:00
|
|
|
mutex_destroy(&ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE]->lock);
|
|
|
|
mutex_lock(&ctx->va_range[HL_VA_RANGE_TYPE_HOST]->lock);
|
|
|
|
clear_va_list_locked(hdev, &ctx->va_range[HL_VA_RANGE_TYPE_HOST]->list);
|
|
|
|
mutex_unlock(&ctx->va_range[HL_VA_RANGE_TYPE_HOST]->lock);
|
|
|
|
mmu_ctx_fini:
|
|
|
|
mutex_destroy(&ctx->va_range[HL_VA_RANGE_TYPE_HOST]->lock);
|
2019-02-16 06:39:22 +08:00
|
|
|
mutex_destroy(&ctx->mem_hash_lock);
|
|
|
|
hl_mmu_ctx_fini(ctx);
|
2020-10-22 16:05:55 +08:00
|
|
|
free_va_range:
|
|
|
|
for (i = 0 ; i < HL_VA_RANGE_TYPE_MAX ; i++)
|
|
|
|
kfree(ctx->va_range[i]);
|
2019-02-16 06:39:22 +08:00
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
int hl_vm_ctx_init(struct hl_ctx *ctx)
|
|
|
|
{
|
|
|
|
struct asic_fixed_properties *prop = &ctx->hdev->asic_prop;
|
2020-01-05 17:05:45 +08:00
|
|
|
u64 host_range_start, host_range_end, host_huge_range_start,
|
|
|
|
host_huge_range_end, dram_range_start, dram_range_end;
|
2020-10-22 16:05:55 +08:00
|
|
|
u32 host_page_size, host_huge_page_size, dram_page_size;
|
2019-02-16 06:39:22 +08:00
|
|
|
|
|
|
|
atomic64_set(&ctx->dram_phys_mem, 0);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* - If MMU is enabled, init the ranges as usual.
|
|
|
|
* - If MMU is disabled, in case of host mapping, the returned address
|
|
|
|
* is the given one.
|
|
|
|
* In case of DRAM mapping, the returned address is the physical
|
|
|
|
* address of the memory related to the given handle.
|
|
|
|
*/
|
2020-10-05 04:00:39 +08:00
|
|
|
if (!ctx->hdev->mmu_enable)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
dram_range_start = prop->dmmu.start_addr;
|
2021-10-14 15:33:27 +08:00
|
|
|
dram_range_end = prop->dmmu.end_addr - 1;
|
2020-11-19 02:15:29 +08:00
|
|
|
dram_page_size = prop->dram_page_size ?
|
|
|
|
prop->dram_page_size : prop->dmmu.page_size;
|
2020-10-05 04:00:39 +08:00
|
|
|
host_range_start = prop->pmmu.start_addr;
|
2021-10-14 15:33:27 +08:00
|
|
|
host_range_end = prop->pmmu.end_addr - 1;
|
2020-10-22 16:05:55 +08:00
|
|
|
host_page_size = prop->pmmu.page_size;
|
2020-10-05 04:00:39 +08:00
|
|
|
host_huge_range_start = prop->pmmu_huge.start_addr;
|
2021-10-14 15:33:27 +08:00
|
|
|
host_huge_range_end = prop->pmmu_huge.end_addr - 1;
|
2020-10-22 16:05:55 +08:00
|
|
|
host_huge_page_size = prop->pmmu_huge.page_size;
|
2019-02-16 06:39:22 +08:00
|
|
|
|
2020-01-05 17:05:45 +08:00
|
|
|
return vm_ctx_init_with_ranges(ctx, host_range_start, host_range_end,
|
2020-10-22 16:05:55 +08:00
|
|
|
host_page_size, host_huge_range_start,
|
|
|
|
host_huge_range_end, host_huge_page_size,
|
|
|
|
dram_range_start, dram_range_end, dram_page_size);
|
2019-02-16 06:39:22 +08:00
|
|
|
}
|
|
|
|
|
2020-12-09 19:28:46 +08:00
|
|
|
/**
|
|
|
|
* hl_vm_ctx_fini() - virtual memory teardown of context.
|
|
|
|
* @ctx: pointer to the habanalabs context structure.
|
2019-02-16 06:39:22 +08:00
|
|
|
*
|
|
|
|
* This function perform teardown the following:
|
2020-12-09 19:28:46 +08:00
|
|
|
* - Virtual block list of available virtual memory.
|
|
|
|
* - Virtual address to area descriptor hashtable.
|
|
|
|
* - MMU for context.
|
2019-02-16 06:39:22 +08:00
|
|
|
*
|
|
|
|
* In addition this function does the following:
|
|
|
|
* - Unmaps the existing hashtable nodes if the hashtable is not empty. The
|
|
|
|
* hashtable should be empty as no valid mappings should exist at this
|
|
|
|
* point.
|
|
|
|
* - Frees any existing physical page list from the idr which relates to the
|
|
|
|
* current context asid.
|
|
|
|
* - This function checks the virtual block list for correctness. At this point
|
|
|
|
* the list should contain one element which describes the whole virtual
|
|
|
|
* memory range of the context. Otherwise, a warning is printed.
|
|
|
|
*/
|
|
|
|
void hl_vm_ctx_fini(struct hl_ctx *ctx)
|
|
|
|
{
|
2022-01-30 23:39:54 +08:00
|
|
|
struct hl_vm_phys_pg_pack *phys_pg_list, *tmp_phys_node;
|
2022-01-16 06:18:32 +08:00
|
|
|
struct hl_device *hdev = ctx->hdev;
|
2019-02-16 06:39:22 +08:00
|
|
|
struct hl_vm_hash_node *hnode;
|
2022-01-16 06:18:32 +08:00
|
|
|
struct hl_vm *vm = &hdev->vm;
|
2019-02-16 06:39:22 +08:00
|
|
|
struct hlist_node *tmp_node;
|
2022-01-16 06:18:32 +08:00
|
|
|
struct list_head free_list;
|
2020-12-09 19:34:11 +08:00
|
|
|
struct hl_mem_in args;
|
2019-02-16 06:39:22 +08:00
|
|
|
int i;
|
|
|
|
|
2020-12-09 19:28:46 +08:00
|
|
|
if (!hdev->mmu_enable)
|
2020-10-05 04:00:39 +08:00
|
|
|
return;
|
|
|
|
|
2019-02-16 06:39:24 +08:00
|
|
|
hl_debugfs_remove_ctx_mem_hash(hdev, ctx);
|
|
|
|
|
2019-11-15 02:23:59 +08:00
|
|
|
/*
|
|
|
|
* Clearly something went wrong on hard reset so no point in printing
|
|
|
|
* another side effect error
|
|
|
|
*/
|
2021-11-23 21:15:22 +08:00
|
|
|
if (!hdev->reset_info.hard_reset_pending && !hash_empty(ctx->mem_hash))
|
2021-07-13 13:11:54 +08:00
|
|
|
dev_dbg(hdev->dev,
|
2020-06-22 14:52:22 +08:00
|
|
|
"user released device without removing its memory mappings\n");
|
2019-02-16 06:39:22 +08:00
|
|
|
|
|
|
|
hash_for_each_safe(ctx->mem_hash, i, tmp_node, hnode, node) {
|
|
|
|
dev_dbg(hdev->dev,
|
|
|
|
"hl_mem_hash_node of vaddr 0x%llx of asid %d is still alive\n",
|
|
|
|
hnode->vaddr, ctx->asid);
|
2020-12-09 19:34:11 +08:00
|
|
|
args.unmap.device_virt_addr = hnode->vaddr;
|
|
|
|
unmap_device_va(ctx, &args, true);
|
2019-02-16 06:39:22 +08:00
|
|
|
}
|
|
|
|
|
2020-11-26 15:39:26 +08:00
|
|
|
mutex_lock(&ctx->mmu_lock);
|
|
|
|
|
2019-11-15 02:23:58 +08:00
|
|
|
/* invalidate the cache once after the unmapping loop */
|
2021-11-09 19:12:38 +08:00
|
|
|
hl_mmu_invalidate_cache(hdev, true, MMU_OP_USERPTR);
|
|
|
|
hl_mmu_invalidate_cache(hdev, true, MMU_OP_PHYS_PACK);
|
2019-11-15 02:23:58 +08:00
|
|
|
|
2020-11-26 15:39:26 +08:00
|
|
|
mutex_unlock(&ctx->mmu_lock);
|
|
|
|
|
2022-01-16 06:18:32 +08:00
|
|
|
INIT_LIST_HEAD(&free_list);
|
|
|
|
|
2019-02-16 06:39:22 +08:00
|
|
|
spin_lock(&vm->idr_lock);
|
|
|
|
idr_for_each_entry(&vm->phys_pg_pack_handles, phys_pg_list, i)
|
|
|
|
if (phys_pg_list->asid == ctx->asid) {
|
|
|
|
dev_dbg(hdev->dev,
|
2019-08-12 16:48:46 +08:00
|
|
|
"page list 0x%px of asid %d is still alive\n",
|
2019-02-16 06:39:22 +08:00
|
|
|
phys_pg_list, ctx->asid);
|
2022-01-16 06:18:32 +08:00
|
|
|
|
|
|
|
atomic64_sub(phys_pg_list->total_size, &hdev->dram_used_mem);
|
2019-02-16 06:39:22 +08:00
|
|
|
idr_remove(&vm->phys_pg_pack_handles, i);
|
2022-01-16 06:18:32 +08:00
|
|
|
list_add(&phys_pg_list->node, &free_list);
|
2019-02-16 06:39:22 +08:00
|
|
|
}
|
|
|
|
spin_unlock(&vm->idr_lock);
|
|
|
|
|
2022-01-30 23:39:54 +08:00
|
|
|
list_for_each_entry_safe(phys_pg_list, tmp_phys_node, &free_list, node)
|
2022-01-16 06:18:32 +08:00
|
|
|
free_phys_pg_pack(hdev, phys_pg_list);
|
|
|
|
|
2020-10-22 16:05:55 +08:00
|
|
|
va_range_fini(hdev, ctx->va_range[HL_VA_RANGE_TYPE_DRAM]);
|
2020-11-26 19:01:11 +08:00
|
|
|
va_range_fini(hdev, ctx->va_range[HL_VA_RANGE_TYPE_HOST]);
|
|
|
|
|
2020-01-05 17:05:45 +08:00
|
|
|
if (hdev->pmmu_huge_range)
|
2020-10-22 16:05:55 +08:00
|
|
|
va_range_fini(hdev, ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE]);
|
2019-02-16 06:39:22 +08:00
|
|
|
|
|
|
|
mutex_destroy(&ctx->mem_hash_lock);
|
|
|
|
hl_mmu_ctx_fini(ctx);
|
2020-10-18 20:32:23 +08:00
|
|
|
|
|
|
|
/* In this case we need to clear the global accounting of DRAM usage
|
|
|
|
* because the user notifies us on allocations. If the user is no more,
|
|
|
|
* all DRAM is available
|
|
|
|
*/
|
2020-11-12 17:03:32 +08:00
|
|
|
if (ctx->asid != HL_KERNEL_ASID_ID &&
|
2020-12-09 19:28:46 +08:00
|
|
|
!hdev->asic_prop.dram_supports_virtual_memory)
|
|
|
|
atomic64_set(&hdev->dram_used_mem, 0);
|
2019-02-16 06:39:22 +08:00
|
|
|
}
|
|
|
|
|
2020-12-09 19:28:46 +08:00
|
|
|
/**
|
|
|
|
* hl_vm_init() - initialize virtual memory module.
|
|
|
|
* @hdev: pointer to the habanalabs device structure.
|
2019-02-16 06:39:22 +08:00
|
|
|
*
|
|
|
|
* This function initializes the following:
|
2020-12-09 19:28:46 +08:00
|
|
|
* - MMU module.
|
|
|
|
* - DRAM physical pages pool of 2MB.
|
|
|
|
* - Idr for device memory allocation handles.
|
2019-02-16 06:39:22 +08:00
|
|
|
*/
|
|
|
|
int hl_vm_init(struct hl_device *hdev)
|
|
|
|
{
|
|
|
|
struct asic_fixed_properties *prop = &hdev->asic_prop;
|
|
|
|
struct hl_vm *vm = &hdev->vm;
|
|
|
|
int rc;
|
|
|
|
|
2020-11-19 02:15:29 +08:00
|
|
|
if (is_power_of_2(prop->dram_page_size))
|
|
|
|
vm->dram_pg_pool =
|
|
|
|
gen_pool_create(__ffs(prop->dram_page_size), -1);
|
|
|
|
else
|
|
|
|
vm->dram_pg_pool =
|
|
|
|
gen_pool_create(__ffs(DRAM_POOL_PAGE_SIZE), -1);
|
|
|
|
|
2019-02-16 06:39:22 +08:00
|
|
|
if (!vm->dram_pg_pool) {
|
|
|
|
dev_err(hdev->dev, "Failed to create dram page pool\n");
|
2019-05-29 19:43:04 +08:00
|
|
|
return -ENOMEM;
|
2019-02-16 06:39:22 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
kref_init(&vm->dram_pg_pool_refcount);
|
|
|
|
|
|
|
|
rc = gen_pool_add(vm->dram_pg_pool, prop->dram_user_base_address,
|
|
|
|
prop->dram_end_address - prop->dram_user_base_address,
|
|
|
|
-1);
|
|
|
|
|
|
|
|
if (rc) {
|
|
|
|
dev_err(hdev->dev,
|
|
|
|
"Failed to add memory to dram page pool %d\n", rc);
|
|
|
|
goto pool_add_err;
|
|
|
|
}
|
|
|
|
|
|
|
|
spin_lock_init(&vm->idr_lock);
|
|
|
|
idr_init(&vm->phys_pg_pack_handles);
|
|
|
|
|
|
|
|
atomic64_set(&hdev->dram_used_mem, 0);
|
|
|
|
|
|
|
|
vm->init_done = true;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
pool_add_err:
|
|
|
|
gen_pool_destroy(vm->dram_pg_pool);
|
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2020-12-09 19:28:46 +08:00
|
|
|
/**
|
|
|
|
* hl_vm_fini() - virtual memory module teardown.
|
|
|
|
* @hdev: pointer to the habanalabs device structure.
|
2019-02-16 06:39:22 +08:00
|
|
|
*
|
|
|
|
* This function perform teardown to the following:
|
2020-12-09 19:28:46 +08:00
|
|
|
* - Idr for device memory allocation handles.
|
|
|
|
* - DRAM physical pages pool of 2MB.
|
|
|
|
* - MMU module.
|
2019-02-16 06:39:22 +08:00
|
|
|
*/
|
|
|
|
void hl_vm_fini(struct hl_device *hdev)
|
|
|
|
{
|
|
|
|
struct hl_vm *vm = &hdev->vm;
|
|
|
|
|
|
|
|
if (!vm->init_done)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* At this point all the contexts should be freed and hence no DRAM
|
|
|
|
* memory should be in use. Hence the DRAM pool should be freed here.
|
|
|
|
*/
|
|
|
|
if (kref_put(&vm->dram_pg_pool_refcount, dram_pg_pool_do_release) != 1)
|
|
|
|
dev_warn(hdev->dev, "dram_pg_pool was not destroyed on %s\n",
|
|
|
|
__func__);
|
|
|
|
|
|
|
|
vm->init_done = false;
|
|
|
|
}
|
2021-02-23 17:01:08 +08:00
|
|
|
|
|
|
|
/**
|
|
|
|
* hl_hw_block_mem_init() - HW block memory initialization.
|
|
|
|
* @ctx: pointer to the habanalabs context structure.
|
|
|
|
*
|
|
|
|
* This function initializes the HW block virtual mapped addresses list and
|
|
|
|
* it's lock.
|
|
|
|
*/
|
|
|
|
void hl_hw_block_mem_init(struct hl_ctx *ctx)
|
|
|
|
{
|
|
|
|
mutex_init(&ctx->hw_block_list_lock);
|
|
|
|
INIT_LIST_HEAD(&ctx->hw_block_mem_list);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* hl_hw_block_mem_fini() - HW block memory teardown.
|
|
|
|
* @ctx: pointer to the habanalabs context structure.
|
|
|
|
*
|
|
|
|
* This function clears the HW block virtual mapped addresses list and destroys
|
|
|
|
* it's lock.
|
|
|
|
*/
|
|
|
|
void hl_hw_block_mem_fini(struct hl_ctx *ctx)
|
|
|
|
{
|
|
|
|
struct hl_vm_hw_block_list_node *lnode, *tmp;
|
|
|
|
|
|
|
|
if (!list_empty(&ctx->hw_block_mem_list))
|
|
|
|
dev_crit(ctx->hdev->dev, "HW block mem list isn't empty\n");
|
|
|
|
|
|
|
|
list_for_each_entry_safe(lnode, tmp, &ctx->hw_block_mem_list, node) {
|
|
|
|
list_del(&lnode->node);
|
|
|
|
kfree(lnode);
|
|
|
|
}
|
|
|
|
|
|
|
|
mutex_destroy(&ctx->hw_block_list_lock);
|
|
|
|
}
|