ALSA: firewire-lib: start processing content of packet at the same cycle in several IT contexts

DICE ASICs support several pairs of isochronous packet streaming and
expect software to queue packets with the same timing information into
the same isochronous cycle.

This commit adds structure member to manage the cycle to start
processing packet in several IT contexts. The cycle is decided when
batch of isochronous cycle is skipped in callback to isochronous context
for IRQ target.

Signed-off-by: Takashi Sakamoto <o-takashi@sakamocchi.jp>
Link: https://lore.kernel.org/r/20210520040154.80450-8-o-takashi@sakamocchi.jp
Signed-off-by: Takashi Iwai <tiwai@suse.de>
This commit is contained in:
Takashi Sakamoto 2021-05-20 13:01:53 +09:00 committed by Takashi Iwai
parent bd165079de
commit 9b1fcd9bf8
3 changed files with 156 additions and 32 deletions

View File

@ -894,14 +894,13 @@ static void process_ctx_payloads(struct amdtp_stream *s,
update_pcm_pointers(s, pcm, pcm_frames);
}
static void out_stream_callback(struct fw_iso_context *context, u32 tstamp,
size_t header_length, void *header,
void *private_data)
static void process_rx_packets(struct fw_iso_context *context, u32 tstamp, size_t header_length,
void *header, void *private_data)
{
struct amdtp_stream *s = private_data;
const struct amdtp_domain *d = s->domain;
const __be32 *ctx_header = header;
unsigned int events_per_period = d->events_per_period;
const unsigned int events_per_period = d->events_per_period;
unsigned int event_count = s->ctx_data.rx.event_count;
unsigned int pkt_header_length;
unsigned int packets;
@ -958,6 +957,89 @@ static void out_stream_callback(struct fw_iso_context *context, u32 tstamp,
s->ctx_data.rx.event_count = event_count;
}
static void skip_rx_packets(struct fw_iso_context *context, u32 tstamp, size_t header_length,
void *header, void *private_data)
{
struct amdtp_stream *s = private_data;
struct amdtp_domain *d = s->domain;
const __be32 *ctx_header = header;
unsigned int packets;
unsigned int cycle;
int i;
if (s->packet_index < 0)
return;
packets = header_length / sizeof(*ctx_header);
cycle = compute_ohci_it_cycle(ctx_header[packets - 1], s->queue_size);
s->next_cycle = increment_ohci_cycle_count(cycle, 1);
for (i = 0; i < packets; ++i) {
struct fw_iso_packet params = {
.header_length = 0,
.payload_length = 0,
};
bool sched_irq = (s == d->irq_target && i == packets - 1);
if (queue_out_packet(s, &params, sched_irq) < 0) {
cancel_stream(s);
return;
}
}
}
static void irq_target_callback(struct fw_iso_context *context, u32 tstamp, size_t header_length,
void *header, void *private_data);
static void process_rx_packets_intermediately(struct fw_iso_context *context, u32 tstamp,
size_t header_length, void *header, void *private_data)
{
struct amdtp_stream *s = private_data;
struct amdtp_domain *d = s->domain;
__be32 *ctx_header = header;
const unsigned int queue_size = s->queue_size;
unsigned int packets;
unsigned int offset;
if (s->packet_index < 0)
return;
packets = header_length / sizeof(*ctx_header);
offset = 0;
while (offset < packets) {
unsigned int cycle = compute_ohci_it_cycle(ctx_header[offset], queue_size);
if (compare_ohci_cycle_count(cycle, d->processing_cycle.rx_start) >= 0)
break;
++offset;
}
if (offset > 0) {
unsigned int length = sizeof(*ctx_header) * offset;
skip_rx_packets(context, tstamp, length, ctx_header, private_data);
if (amdtp_streaming_error(s))
return;
ctx_header += offset;
header_length -= length;
}
if (offset < packets) {
process_rx_packets(context, tstamp, header_length, ctx_header, private_data);
if (amdtp_streaming_error(s))
return;
if (s == d->irq_target)
s->context->callback.sc = irq_target_callback;
else
s->context->callback.sc = process_rx_packets;
}
}
static void process_tx_packets(struct fw_iso_context *context, u32 tstamp, size_t header_length,
void *header, void *private_data)
{
@ -1116,34 +1198,22 @@ static void pool_ideal_seq_descs(struct amdtp_domain *d, unsigned int packets)
d->seq.tail = seq_tail;
}
static void irq_target_callback(struct fw_iso_context *context, u32 tstamp,
size_t header_length, void *header,
void *private_data)
static void process_ctxs_in_domain(struct amdtp_domain *d)
{
struct amdtp_stream *irq_target = private_data;
struct amdtp_domain *d = irq_target->domain;
unsigned int packets = header_length / sizeof(__be32);
struct amdtp_stream *s;
// Record enough entries with extra 3 cycles at least.
pool_ideal_seq_descs(d, packets + 3);
out_stream_callback(context, tstamp, header_length, header, irq_target);
if (amdtp_streaming_error(irq_target))
goto error;
list_for_each_entry(s, &d->streams, list) {
if (s != irq_target && amdtp_stream_running(s)) {
if (s != d->irq_target && amdtp_stream_running(s))
fw_iso_context_flush_completions(s->context);
if (amdtp_streaming_error(s))
goto error;
}
if (amdtp_streaming_error(s))
goto error;
}
return;
error:
if (amdtp_stream_running(irq_target))
cancel_stream(irq_target);
if (amdtp_stream_running(d->irq_target))
cancel_stream(d->irq_target);
list_for_each_entry(s, &d->streams, list) {
if (amdtp_stream_running(s))
@ -1151,6 +1221,61 @@ error:
}
}
static void irq_target_callback(struct fw_iso_context *context, u32 tstamp, size_t header_length,
void *header, void *private_data)
{
struct amdtp_stream *s = private_data;
struct amdtp_domain *d = s->domain;
unsigned int packets = header_length / sizeof(__be32);
pool_ideal_seq_descs(d, packets);
process_rx_packets(context, tstamp, header_length, header, private_data);
process_ctxs_in_domain(d);
}
static void irq_target_callback_intermediately(struct fw_iso_context *context, u32 tstamp,
size_t header_length, void *header, void *private_data)
{
struct amdtp_stream *s = private_data;
struct amdtp_domain *d = s->domain;
unsigned int packets = header_length / sizeof(__be32);
pool_ideal_seq_descs(d, packets);
process_rx_packets_intermediately(context, tstamp, header_length, header, private_data);
process_ctxs_in_domain(d);
}
static void irq_target_callback_skip(struct fw_iso_context *context, u32 tstamp,
size_t header_length, void *header, void *private_data)
{
struct amdtp_stream *s = private_data;
struct amdtp_domain *d = s->domain;
unsigned int cycle;
skip_rx_packets(context, tstamp, header_length, header, private_data);
process_ctxs_in_domain(d);
// Decide the cycle count to begin processing content of packet in IT contexts. All of IT
// contexts are expected to start and get callback when reaching here.
cycle = s->next_cycle;
list_for_each_entry(s, &d->streams, list) {
if (s->direction != AMDTP_OUT_STREAM)
continue;
if (compare_ohci_cycle_count(s->next_cycle, cycle) > 0)
cycle = s->next_cycle;
if (s == d->irq_target)
s->context->callback.sc = irq_target_callback_intermediately;
else
s->context->callback.sc = process_rx_packets_intermediately;
}
d->processing_cycle.rx_start = cycle;
}
// this is executed one time.
static void amdtp_stream_first_callback(struct fw_iso_context *context,
u32 tstamp, size_t header_length,
@ -1176,13 +1301,11 @@ static void amdtp_stream_first_callback(struct fw_iso_context *context,
cycle = compute_ohci_it_cycle(*ctx_header, s->queue_size);
if (s == d->irq_target)
context->callback.sc = irq_target_callback;
context->callback.sc = irq_target_callback_skip;
else
context->callback.sc = out_stream_callback;
context->callback.sc = skip_rx_packets;
}
s->start_cycle = cycle;
context->callback.sc(context, tstamp, header_length, header, s);
// Decide the cycle count to begin processing content of packet in IR contexts.

View File

@ -170,7 +170,6 @@ struct amdtp_stream {
/* To wait for first packet. */
bool callbacked;
wait_queue_head_t callback_wait;
u32 start_cycle;
unsigned int next_cycle;
/* For backends to process data blocks. */
@ -291,6 +290,7 @@ struct amdtp_domain {
struct {
unsigned int tx_init_skip;
unsigned int tx_start;
unsigned int rx_start;
} processing_cycle;
struct {

View File

@ -377,8 +377,8 @@ static inline void compute_next_elapse_from_start(struct amdtp_motu *p)
p->next_seconds -= 128;
}
static void write_sph(struct amdtp_stream *s, __be32 *buffer,
unsigned int data_blocks)
static void write_sph(struct amdtp_stream *s, __be32 *buffer, unsigned int data_blocks,
const unsigned int rx_start_cycle)
{
struct amdtp_motu *p = s->protocol;
unsigned int next_cycles;
@ -386,7 +386,7 @@ static void write_sph(struct amdtp_stream *s, __be32 *buffer,
u32 sph;
for (i = 0; i < data_blocks; i++) {
next_cycles = (s->start_cycle + p->next_cycles) % 8000;
next_cycles = (rx_start_cycle + p->next_cycles) % 8000;
sph = ((next_cycles << 12) | p->next_ticks) & 0x01ffffff;
*buffer = cpu_to_be32(sph);
@ -401,6 +401,7 @@ static unsigned int process_it_ctx_payloads(struct amdtp_stream *s,
unsigned int packets,
struct snd_pcm_substream *pcm)
{
const unsigned int rx_start_cycle = s->domain->processing_cycle.rx_start;
struct amdtp_motu *p = s->protocol;
unsigned int pcm_frames = 0;
int i;
@ -423,7 +424,7 @@ static unsigned int process_it_ctx_payloads(struct amdtp_stream *s,
// TODO: how to interact control messages between userspace?
write_sph(s, buf, data_blocks);
write_sph(s, buf, data_blocks, rx_start_cycle);
}
// For tracepoints.