habanalabs: fix MMU number of pages calculation

The requested allocation size is 64bit, hence the number of requested
pages and the total requested size should 64bit as well.
This patch fixes all places where these are treated as 32bit.

Signed-off-by: Omer Shpigelman <oshpigelman@habana.ai>
Signed-off-by: Oded Gabbay <oded.gabbay@gmail.com>
This commit is contained in:
Omer Shpigelman 2019-03-05 10:59:16 +02:00 committed by Oded Gabbay
parent 8c2ffd9174
commit bfb1ce1259
3 changed files with 23 additions and 21 deletions

View File

@ -232,6 +232,7 @@ static int vm_show(struct seq_file *s, void *data)
struct hl_vm_phys_pg_pack *phys_pg_pack = NULL;
enum vm_type_t *vm_type;
bool once = true;
u64 j;
int i;
if (!dev_entry->hdev->mmu_enable)
@ -260,7 +261,7 @@ static int vm_show(struct seq_file *s, void *data)
} else {
phys_pg_pack = hnode->ptr;
seq_printf(s,
" 0x%-14llx %-10u %-4u\n",
" 0x%-14llx %-10llu %-4u\n",
hnode->vaddr, phys_pg_pack->total_size,
phys_pg_pack->handle);
}
@ -282,9 +283,9 @@ static int vm_show(struct seq_file *s, void *data)
phys_pg_pack->page_size);
seq_puts(s, " physical address\n");
seq_puts(s, "---------------------\n");
for (i = 0 ; i < phys_pg_pack->npages ; i++) {
for (j = 0 ; j < phys_pg_pack->npages ; j++) {
seq_printf(s, " 0x%-14llx\n",
phys_pg_pack->pages[i]);
phys_pg_pack->pages[j]);
}
}
spin_unlock(&vm->idr_lock);

View File

@ -793,11 +793,11 @@ struct hl_vm_hash_node {
* struct hl_vm_phys_pg_pack - physical page pack.
* @vm_type: describes the type of the virtual area descriptor.
* @pages: the physical page array.
* @npages: num physical pages in the pack.
* @total_size: total size of all the pages in this list.
* @mapping_cnt: number of shared mappings.
* @asid: the context related to this list.
* @npages: num physical pages in the pack.
* @page_size: size of each page in the pack.
* @total_size: total size of all the pages in this list.
* @flags: HL_MEM_* flags related to this list.
* @handle: the provided handle related to this list.
* @offset: offset from the first page.
@ -807,11 +807,11 @@ struct hl_vm_hash_node {
struct hl_vm_phys_pg_pack {
enum vm_type_t vm_type; /* must be first */
u64 *pages;
u64 npages;
u64 total_size;
atomic_t mapping_cnt;
u32 asid;
u32 npages;
u32 page_size;
u32 total_size;
u32 flags;
u32 handle;
u32 offset;

View File

@ -56,9 +56,9 @@ static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args,
struct hl_device *hdev = ctx->hdev;
struct hl_vm *vm = &hdev->vm;
struct hl_vm_phys_pg_pack *phys_pg_pack;
u64 paddr = 0;
u32 total_size, num_pgs, num_curr_pgs, page_size, page_shift;
int handle, rc, i;
u64 paddr = 0, total_size, num_pgs, i;
u32 num_curr_pgs, page_size, page_shift;
int handle, rc;
bool contiguous;
num_curr_pgs = 0;
@ -73,7 +73,7 @@ static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args,
paddr = (u64) gen_pool_alloc(vm->dram_pg_pool, total_size);
if (!paddr) {
dev_err(hdev->dev,
"failed to allocate %u huge contiguous pages\n",
"failed to allocate %llu huge contiguous pages\n",
num_pgs);
return -ENOMEM;
}
@ -267,7 +267,7 @@ static void free_phys_pg_pack(struct hl_device *hdev,
struct hl_vm_phys_pg_pack *phys_pg_pack)
{
struct hl_vm *vm = &hdev->vm;
int i;
u64 i;
if (!phys_pg_pack->created_from_userptr) {
if (phys_pg_pack->contiguous) {
@ -519,7 +519,7 @@ static inline int add_va_block(struct hl_device *hdev,
* - Return the start address of the virtual block
*/
static u64 get_va_block(struct hl_device *hdev,
struct hl_va_range *va_range, u32 size, u64 hint_addr,
struct hl_va_range *va_range, u64 size, u64 hint_addr,
bool is_userptr)
{
struct hl_vm_va_block *va_block, *new_va_block = NULL;
@ -577,7 +577,8 @@ static u64 get_va_block(struct hl_device *hdev,
}
if (!new_va_block) {
dev_err(hdev->dev, "no available va block for size %u\n", size);
dev_err(hdev->dev, "no available va block for size %llu\n",
size);
goto out;
}
@ -648,8 +649,8 @@ static int init_phys_pg_pack_from_userptr(struct hl_ctx *ctx,
struct hl_vm_phys_pg_pack *phys_pg_pack;
struct scatterlist *sg;
dma_addr_t dma_addr;
u64 page_mask;
u32 npages, total_npages, page_size = PAGE_SIZE;
u64 page_mask, total_npages;
u32 npages, page_size = PAGE_SIZE;
bool first = true, is_huge_page_opt = true;
int rc, i, j;
@ -750,9 +751,9 @@ static int map_phys_page_pack(struct hl_ctx *ctx, u64 vaddr,
struct hl_vm_phys_pg_pack *phys_pg_pack)
{
struct hl_device *hdev = ctx->hdev;
u64 next_vaddr = vaddr, paddr;
u64 next_vaddr = vaddr, paddr, mapped_pg_cnt = 0, i;
u32 page_size = phys_pg_pack->page_size;
int i, rc = 0, mapped_pg_cnt = 0;
int rc = 0;
for (i = 0 ; i < phys_pg_pack->npages ; i++) {
paddr = phys_pg_pack->pages[i];
@ -764,7 +765,7 @@ static int map_phys_page_pack(struct hl_ctx *ctx, u64 vaddr,
rc = hl_mmu_map(ctx, next_vaddr, paddr, page_size);
if (rc) {
dev_err(hdev->dev,
"map failed for handle %u, npages: %d, mapped: %d",
"map failed for handle %u, npages: %llu, mapped: %llu",
phys_pg_pack->handle, phys_pg_pack->npages,
mapped_pg_cnt);
goto err;
@ -985,10 +986,10 @@ static int unmap_device_va(struct hl_ctx *ctx, u64 vaddr)
struct hl_vm_hash_node *hnode = NULL;
struct hl_userptr *userptr = NULL;
enum vm_type_t *vm_type;
u64 next_vaddr;
u64 next_vaddr, i;
u32 page_size;
bool is_userptr;
int i, rc;
int rc;
/* protect from double entrance */
mutex_lock(&ctx->mem_hash_lock);