ALSA: memalloc: Count continuous pages in vmalloc buffer handler
This is an enhancement for the SG-style page handling in vmalloc buffer handler to calculate the continuous pages. When snd_sgbuf_get_chunk_size() is called for a vmalloc buffer, currently we return only the size that fits into a single page. However, this API call is rather supposed for obtaining the continuous pages and most of vmalloc or noncontig buffers do have lots of continuous pages indeed. So, in this patch, the callback now calculates the possibly continuous pages up to the given size limit. Note that the end address in the function is calculated from the last byte, hence it's one byte shorter. This is because ofs + size can be above the actual buffer size boundary. Until now, this feature isn't really used, but it'll become useful in a later patch that adds the non-contiguous buffer type that shares the same callback function as vmalloc. Link: https://lore.kernel.org/r/20210812113818.6479-1-tiwai@suse.de Link: https://lore.kernel.org/r/20210813081645.4680-1-tiwai@suse.de Signed-off-by: Takashi Iwai <tiwai@suse.de>
This commit is contained in:
parent
1a04830169
commit
bda36b0fc2
|
@ -290,11 +290,13 @@ static int snd_dma_vmalloc_mmap(struct snd_dma_buffer *dmab,
|
||||||
return remap_vmalloc_range(area, dmab->area, 0);
|
return remap_vmalloc_range(area, dmab->area, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#define get_vmalloc_page_addr(dmab, offset) \
|
||||||
|
page_to_phys(vmalloc_to_page((dmab)->area + (offset)))
|
||||||
|
|
||||||
static dma_addr_t snd_dma_vmalloc_get_addr(struct snd_dma_buffer *dmab,
|
static dma_addr_t snd_dma_vmalloc_get_addr(struct snd_dma_buffer *dmab,
|
||||||
size_t offset)
|
size_t offset)
|
||||||
{
|
{
|
||||||
return page_to_phys(vmalloc_to_page(dmab->area + offset)) +
|
return get_vmalloc_page_addr(dmab, offset) + offset % PAGE_SIZE;
|
||||||
offset % PAGE_SIZE;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct page *snd_dma_vmalloc_get_page(struct snd_dma_buffer *dmab,
|
static struct page *snd_dma_vmalloc_get_page(struct snd_dma_buffer *dmab,
|
||||||
|
@ -307,11 +309,23 @@ static unsigned int
|
||||||
snd_dma_vmalloc_get_chunk_size(struct snd_dma_buffer *dmab,
|
snd_dma_vmalloc_get_chunk_size(struct snd_dma_buffer *dmab,
|
||||||
unsigned int ofs, unsigned int size)
|
unsigned int ofs, unsigned int size)
|
||||||
{
|
{
|
||||||
ofs %= PAGE_SIZE;
|
unsigned int start, end;
|
||||||
size += ofs;
|
unsigned long addr;
|
||||||
if (size > PAGE_SIZE)
|
|
||||||
size = PAGE_SIZE;
|
start = ALIGN_DOWN(ofs, PAGE_SIZE);
|
||||||
return size - ofs;
|
end = ofs + size - 1; /* the last byte address */
|
||||||
|
/* check page continuity */
|
||||||
|
addr = get_vmalloc_page_addr(dmab, start);
|
||||||
|
for (;;) {
|
||||||
|
start += PAGE_SIZE;
|
||||||
|
if (start > end)
|
||||||
|
break;
|
||||||
|
addr += PAGE_SIZE;
|
||||||
|
if (get_vmalloc_page_addr(dmab, start) != addr)
|
||||||
|
return start - ofs;
|
||||||
|
}
|
||||||
|
/* ok, all on continuous pages */
|
||||||
|
return size;
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct snd_malloc_ops snd_dma_vmalloc_ops = {
|
static const struct snd_malloc_ops snd_dma_vmalloc_ops = {
|
||||||
|
|
Loading…
Reference in New Issue