ALSA: emu10k1: make sure synth DMA pages are allocated with DMA functions
Commit a5003fc041
("[ALSA] emu10k1 - simplify page allocation for synth")
switched from using the DMA allocator for synth DMA pages to manually
calling alloc_page().
However, this usage has an implicit assumption that the DMA address space
for the emu10k1-family chip is the same as the CPU physical address space
which is not true for a system with a IOMMU.
Since this made the synth part of the driver non-functional on such systems
let's effectively revert that commit (while keeping the
__synth_free_pages() simplification).
Signed-off-by: Maciej S. Szmigiero <mail@maciej.szmigiero.name>
Signed-off-by: Takashi Iwai <tiwai@suse.de>
This commit is contained in:
parent
541b9bad16
commit
055e0ae10f
|
@ -461,10 +461,19 @@ static void get_single_page_range(struct snd_util_memhdr *hdr,
|
||||||
static void __synth_free_pages(struct snd_emu10k1 *emu, int first_page,
|
static void __synth_free_pages(struct snd_emu10k1 *emu, int first_page,
|
||||||
int last_page)
|
int last_page)
|
||||||
{
|
{
|
||||||
|
struct snd_dma_buffer dmab;
|
||||||
int page;
|
int page;
|
||||||
|
|
||||||
|
dmab.dev.type = SNDRV_DMA_TYPE_DEV;
|
||||||
|
dmab.dev.dev = snd_dma_pci_data(emu->pci);
|
||||||
|
|
||||||
for (page = first_page; page <= last_page; page++) {
|
for (page = first_page; page <= last_page; page++) {
|
||||||
free_page((unsigned long)emu->page_ptr_table[page]);
|
if (emu->page_ptr_table[page] == NULL)
|
||||||
|
continue;
|
||||||
|
dmab.area = emu->page_ptr_table[page];
|
||||||
|
dmab.addr = emu->page_addr_table[page];
|
||||||
|
dmab.bytes = PAGE_SIZE;
|
||||||
|
snd_dma_free_pages(&dmab);
|
||||||
emu->page_addr_table[page] = 0;
|
emu->page_addr_table[page] = 0;
|
||||||
emu->page_ptr_table[page] = NULL;
|
emu->page_ptr_table[page] = NULL;
|
||||||
}
|
}
|
||||||
|
@ -476,30 +485,31 @@ static void __synth_free_pages(struct snd_emu10k1 *emu, int first_page,
|
||||||
static int synth_alloc_pages(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
|
static int synth_alloc_pages(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
|
||||||
{
|
{
|
||||||
int page, first_page, last_page;
|
int page, first_page, last_page;
|
||||||
|
struct snd_dma_buffer dmab;
|
||||||
|
|
||||||
emu10k1_memblk_init(blk);
|
emu10k1_memblk_init(blk);
|
||||||
get_single_page_range(emu->memhdr, blk, &first_page, &last_page);
|
get_single_page_range(emu->memhdr, blk, &first_page, &last_page);
|
||||||
/* allocate kernel pages */
|
/* allocate kernel pages */
|
||||||
for (page = first_page; page <= last_page; page++) {
|
for (page = first_page; page <= last_page; page++) {
|
||||||
/* first try to allocate from <4GB zone */
|
if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV,
|
||||||
struct page *p = alloc_page(GFP_KERNEL | GFP_DMA32 |
|
snd_dma_pci_data(emu->pci),
|
||||||
__GFP_NOWARN);
|
PAGE_SIZE, &dmab) < 0)
|
||||||
if (!p || (page_to_pfn(p) & ~(emu->dma_mask >> PAGE_SHIFT))) {
|
goto __fail;
|
||||||
if (p)
|
if (!is_valid_page(emu, dmab.addr)) {
|
||||||
__free_page(p);
|
snd_dma_free_pages(&dmab);
|
||||||
/* try to allocate from <16MB zone */
|
goto __fail;
|
||||||
p = alloc_page(GFP_ATOMIC | GFP_DMA |
|
|
||||||
__GFP_NORETRY | /* no OOM-killer */
|
|
||||||
__GFP_NOWARN);
|
|
||||||
}
|
}
|
||||||
if (!p) {
|
emu->page_addr_table[page] = dmab.addr;
|
||||||
__synth_free_pages(emu, first_page, page - 1);
|
emu->page_ptr_table[page] = dmab.area;
|
||||||
return -ENOMEM;
|
|
||||||
}
|
|
||||||
emu->page_addr_table[page] = page_to_phys(p);
|
|
||||||
emu->page_ptr_table[page] = page_address(p);
|
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
__fail:
|
||||||
|
/* release allocated pages */
|
||||||
|
last_page = page - 1;
|
||||||
|
__synth_free_pages(emu, first_page, last_page);
|
||||||
|
|
||||||
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
Loading…
Reference in New Issue