habanalabs: wrong VA size calculation
VA blocks are currently stored in an inconsistent way. Sometimes block end is inclusive, sometimes exclusive. This leads to wrong size calculations in certain cases, plus could lead to a segmentation fault in case mapping process fails in the middle and we try to roll it back. Need to make this consistent - start inclusive till end inclusive. For example, the regions table may now look like this: 0x0000 - 0x1fff : allocated 0x2000 - 0x2fff : free 0x3000 - 0x3fff : allocated Signed-off-by: Yuri Nudelman <ynudelman@habana.ai> Reviewed-by: Oded Gabbay <ogabbay@kernel.org> Signed-off-by: Oded Gabbay <ogabbay@kernel.org>
This commit is contained in:
parent
90d283b672
commit
f06bad02b5
|
@ -57,7 +57,7 @@ static int cb_map_mem(struct hl_ctx *ctx, struct hl_cb *cb)
|
|||
}
|
||||
|
||||
va_block->start = virt_addr;
|
||||
va_block->end = virt_addr + page_size;
|
||||
va_block->end = virt_addr + page_size - 1;
|
||||
va_block->size = page_size;
|
||||
list_add_tail(&va_block->node, &cb->va_block_list);
|
||||
}
|
||||
|
|
|
@ -2757,21 +2757,9 @@ static inline bool hl_mem_area_inside_range(u64 address, u64 size,
|
|||
static inline bool hl_mem_area_crosses_range(u64 address, u32 size,
|
||||
u64 range_start_address, u64 range_end_address)
|
||||
{
|
||||
u64 end_address = address + size;
|
||||
u64 end_address = address + size - 1;
|
||||
|
||||
if ((address >= range_start_address) &&
|
||||
(address < range_end_address))
|
||||
return true;
|
||||
|
||||
if ((end_address >= range_start_address) &&
|
||||
(end_address < range_end_address))
|
||||
return true;
|
||||
|
||||
if ((address < range_start_address) &&
|
||||
(end_address >= range_end_address))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
return ((address <= range_end_address) && (range_start_address <= end_address));
|
||||
}
|
||||
|
||||
int hl_device_open(struct inode *inode, struct file *filp);
|
||||
|
|
|
@ -477,7 +477,7 @@ static int add_va_block_locked(struct hl_device *hdev,
|
|||
struct list_head *va_list, u64 start, u64 end)
|
||||
{
|
||||
struct hl_vm_va_block *va_block, *res = NULL;
|
||||
u64 size = end - start;
|
||||
u64 size = end - start + 1;
|
||||
|
||||
print_va_list_locked(hdev, va_list);
|
||||
|
||||
|
@ -644,7 +644,7 @@ static u64 get_va_block(struct hl_device *hdev,
|
|||
continue;
|
||||
}
|
||||
|
||||
valid_size = va_block->end - valid_start;
|
||||
valid_size = va_block->end - valid_start + 1;
|
||||
if (valid_size < size)
|
||||
continue;
|
||||
|
||||
|
@ -707,7 +707,7 @@ static u64 get_va_block(struct hl_device *hdev,
|
|||
|
||||
if (new_va_block->size > size) {
|
||||
new_va_block->start += size;
|
||||
new_va_block->size = new_va_block->end - new_va_block->start;
|
||||
new_va_block->size = new_va_block->end - new_va_block->start + 1;
|
||||
} else {
|
||||
list_del(&new_va_block->node);
|
||||
kfree(new_va_block);
|
||||
|
@ -2388,8 +2388,14 @@ static int va_range_init(struct hl_device *hdev, struct hl_va_range *va_range,
|
|||
start += PAGE_SIZE;
|
||||
}
|
||||
|
||||
if (end & (PAGE_SIZE - 1))
|
||||
end &= PAGE_MASK;
|
||||
/*
|
||||
* The end of the range is inclusive, hence we need to align it
|
||||
* to the end of the last full page in the range. For example if
|
||||
* end = 0x3ff5 with page size 0x1000, we need to align it to
|
||||
* 0x2fff. The remainig 0xff5 bytes do not form a full page.
|
||||
*/
|
||||
if ((end + 1) & (PAGE_SIZE - 1))
|
||||
end = ((end + 1) & PAGE_MASK) - 1;
|
||||
}
|
||||
|
||||
if (start >= end) {
|
||||
|
@ -2564,14 +2570,14 @@ int hl_vm_ctx_init(struct hl_ctx *ctx)
|
|||
return 0;
|
||||
|
||||
dram_range_start = prop->dmmu.start_addr;
|
||||
dram_range_end = prop->dmmu.end_addr;
|
||||
dram_range_end = prop->dmmu.end_addr - 1;
|
||||
dram_page_size = prop->dram_page_size ?
|
||||
prop->dram_page_size : prop->dmmu.page_size;
|
||||
host_range_start = prop->pmmu.start_addr;
|
||||
host_range_end = prop->pmmu.end_addr;
|
||||
host_range_end = prop->pmmu.end_addr - 1;
|
||||
host_page_size = prop->pmmu.page_size;
|
||||
host_huge_range_start = prop->pmmu_huge.start_addr;
|
||||
host_huge_range_end = prop->pmmu_huge.end_addr;
|
||||
host_huge_range_end = prop->pmmu_huge.end_addr - 1;
|
||||
host_huge_page_size = prop->pmmu_huge.page_size;
|
||||
|
||||
return vm_ctx_init_with_ranges(ctx, host_range_start, host_range_end,
|
||||
|
|
Loading…
Reference in New Issue