ALSA: pcm: fix wait_time calculations
... in wait_for_avail() and snd_pcm_drain(). t was calculated in seconds, so it would be pretty much always zero, to be subsequently de-facto ignored due to being max(t, 10)'d. And then it (i.e., 10) would be treated as secs, which doesn't seem right. However, fixing it to properly calculate msecs would potentially cause timeouts when using twice the period size for the default timeout (which seems reasonable to me), so instead use the buffer size plus 10 percent to be on the safe side ... but that still seems insufficient, presumably because the hardware typically needs a moment to fire up. To compensate for this, we up the minimal timeout to 100ms, which is still two orders of magnitude less than the bogus minimum. substream->wait_time was also misinterpreted as jiffies, despite being documented as being in msecs. Only the soc/sof driver sets it - to 500, which looks very much like msecs were intended. Speaking of which, shouldn't snd_pcm_drain() also use substream-> wait_time? As a drive-by, make the debug messages on timeout less confusing. Signed-off-by: Oswald Buddenhagen <oswald.buddenhagen@gmx.de> Link: https://lore.kernel.org/r/20230405201219.2197774-1-oswald.buddenhagen@gmx.de Signed-off-by: Takashi Iwai <tiwai@suse.de>
This commit is contained in:
parent
102882b5c6
commit
3ed2b549b3
|
@ -1878,15 +1878,14 @@ static int wait_for_avail(struct snd_pcm_substream *substream,
|
|||
if (substream->wait_time) {
|
||||
wait_time = substream->wait_time;
|
||||
} else {
|
||||
wait_time = 10;
|
||||
wait_time = 100;
|
||||
|
||||
if (runtime->rate) {
|
||||
long t = runtime->period_size * 2 /
|
||||
runtime->rate;
|
||||
long t = runtime->buffer_size * 1100 / runtime->rate;
|
||||
wait_time = max(t, wait_time);
|
||||
}
|
||||
wait_time = msecs_to_jiffies(wait_time * 1000);
|
||||
}
|
||||
wait_time = msecs_to_jiffies(wait_time);
|
||||
}
|
||||
|
||||
for (;;) {
|
||||
|
@ -1934,8 +1933,8 @@ static int wait_for_avail(struct snd_pcm_substream *substream,
|
|||
}
|
||||
if (!tout) {
|
||||
pcm_dbg(substream->pcm,
|
||||
"%s write error (DMA or IRQ trouble?)\n",
|
||||
is_playback ? "playback" : "capture");
|
||||
"%s timeout (DMA or IRQ trouble?)\n",
|
||||
is_playback ? "playback write" : "capture read");
|
||||
err = -EIO;
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -2159,12 +2159,12 @@ static int snd_pcm_drain(struct snd_pcm_substream *substream,
|
|||
if (runtime->no_period_wakeup)
|
||||
tout = MAX_SCHEDULE_TIMEOUT;
|
||||
else {
|
||||
tout = 10;
|
||||
tout = 100;
|
||||
if (runtime->rate) {
|
||||
long t = runtime->period_size * 2 / runtime->rate;
|
||||
long t = runtime->buffer_size * 1100 / runtime->rate;
|
||||
tout = max(t, tout);
|
||||
}
|
||||
tout = msecs_to_jiffies(tout * 1000);
|
||||
tout = msecs_to_jiffies(tout);
|
||||
}
|
||||
tout = schedule_timeout(tout);
|
||||
|
||||
|
@ -2187,7 +2187,7 @@ static int snd_pcm_drain(struct snd_pcm_substream *substream,
|
|||
result = -ESTRPIPE;
|
||||
else {
|
||||
dev_dbg(substream->pcm->card->dev,
|
||||
"playback drain error (DMA or IRQ trouble?)\n");
|
||||
"playback drain timeout (DMA or IRQ trouble?)\n");
|
||||
snd_pcm_stop(substream, SNDRV_PCM_STATE_SETUP);
|
||||
result = -EIO;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue