2019-05-27 14:55:05 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* ALSA sequencer Memory Manager
|
|
|
|
* Copyright (c) 1998 by Frank van de Pol <fvdpol@coil.demon.nl>
|
2007-10-15 15:50:19 +08:00
|
|
|
* Jaroslav Kysela <perex@perex.cz>
|
2005-04-17 06:20:36 +08:00
|
|
|
* 2000 by Takashi Iwai <tiwai@suse.de>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/init.h>
|
2011-09-22 21:34:58 +08:00
|
|
|
#include <linux/export.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <linux/slab.h>
|
2017-02-03 02:15:33 +08:00
|
|
|
#include <linux/sched/signal.h>
|
2019-03-28 23:09:45 +08:00
|
|
|
#include <linux/mm.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <sound/core.h>
|
|
|
|
|
|
|
|
#include <sound/seq_kernel.h>
|
|
|
|
#include "seq_memory.h"
|
|
|
|
#include "seq_queue.h"
|
|
|
|
#include "seq_info.h"
|
|
|
|
#include "seq_lock.h"
|
|
|
|
|
2005-11-17 21:04:02 +08:00
|
|
|
static inline int snd_seq_pool_available(struct snd_seq_pool *pool)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
return pool->total_elements - atomic_read(&pool->counter);
|
|
|
|
}
|
|
|
|
|
2005-11-17 21:04:02 +08:00
|
|
|
static inline int snd_seq_output_ok(struct snd_seq_pool *pool)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
return snd_seq_pool_available(pool) >= pool->room;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Variable length event:
|
|
|
|
* The event like sysex uses variable length type.
|
|
|
|
* The external data may be stored in three different formats.
|
|
|
|
* 1) kernel space
|
|
|
|
* This is the normal case.
|
|
|
|
* ext.data.len = length
|
|
|
|
* ext.data.ptr = buffer pointer
|
|
|
|
* 2) user space
|
|
|
|
* When an event is generated via read(), the external data is
|
|
|
|
* kept in user space until expanded.
|
|
|
|
* ext.data.len = length | SNDRV_SEQ_EXT_USRPTR
|
|
|
|
* ext.data.ptr = userspace pointer
|
|
|
|
* 3) chained cells
|
|
|
|
* When the variable length event is enqueued (in prioq or fifo),
|
|
|
|
* the external data is decomposed to several cells.
|
|
|
|
* ext.data.len = length | SNDRV_SEQ_EXT_CHAINED
|
|
|
|
* ext.data.ptr = the additiona cell head
|
|
|
|
* -> cell.next -> cell.next -> ..
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* exported:
|
|
|
|
* call dump function to expand external data.
|
|
|
|
*/
|
|
|
|
|
2005-11-17 21:04:02 +08:00
|
|
|
static int get_var_len(const struct snd_seq_event *event)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
if ((event->flags & SNDRV_SEQ_EVENT_LENGTH_MASK) != SNDRV_SEQ_EVENT_LENGTH_VARIABLE)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
return event->data.ext.len & ~SNDRV_SEQ_EXT_MASK;
|
|
|
|
}
|
|
|
|
|
2023-05-23 15:53:39 +08:00
|
|
|
static int dump_var_event(const struct snd_seq_event *event,
|
|
|
|
snd_seq_dump_func_t func, void *private_data,
|
|
|
|
int offset, int maxlen)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
int len, err;
|
2005-11-17 21:04:02 +08:00
|
|
|
struct snd_seq_event_cell *cell;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2021-06-08 22:05:30 +08:00
|
|
|
len = get_var_len(event);
|
|
|
|
if (len <= 0)
|
2005-04-17 06:20:36 +08:00
|
|
|
return len;
|
2023-05-23 15:53:39 +08:00
|
|
|
if (len <= offset)
|
|
|
|
return 0;
|
|
|
|
if (maxlen && len > offset + maxlen)
|
|
|
|
len = offset + maxlen;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
if (event->data.ext.len & SNDRV_SEQ_EXT_USRPTR) {
|
|
|
|
char buf[32];
|
2011-02-14 18:00:47 +08:00
|
|
|
char __user *curptr = (char __force __user *)event->data.ext.ptr;
|
2023-05-23 15:53:39 +08:00
|
|
|
curptr += offset;
|
|
|
|
len -= offset;
|
2005-04-17 06:20:36 +08:00
|
|
|
while (len > 0) {
|
|
|
|
int size = sizeof(buf);
|
|
|
|
if (len < size)
|
|
|
|
size = len;
|
|
|
|
if (copy_from_user(buf, curptr, size))
|
|
|
|
return -EFAULT;
|
|
|
|
err = func(private_data, buf, size);
|
|
|
|
if (err < 0)
|
|
|
|
return err;
|
|
|
|
curptr += size;
|
|
|
|
len -= size;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
2014-06-23 23:56:09 +08:00
|
|
|
if (!(event->data.ext.len & SNDRV_SEQ_EXT_CHAINED))
|
2023-05-23 15:53:39 +08:00
|
|
|
return func(private_data, event->data.ext.ptr + offset,
|
|
|
|
len - offset);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2005-11-17 21:04:02 +08:00
|
|
|
cell = (struct snd_seq_event_cell *)event->data.ext.ptr;
|
2005-04-17 06:20:36 +08:00
|
|
|
for (; len > 0 && cell; cell = cell->next) {
|
2005-11-17 21:04:02 +08:00
|
|
|
int size = sizeof(struct snd_seq_event);
|
2023-05-23 15:53:39 +08:00
|
|
|
char *curptr = (char *)&cell->event;
|
|
|
|
|
|
|
|
if (offset >= size) {
|
|
|
|
offset -= size;
|
|
|
|
len -= size;
|
|
|
|
continue;
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
if (len < size)
|
|
|
|
size = len;
|
2023-05-23 15:53:39 +08:00
|
|
|
err = func(private_data, curptr + offset, size - offset);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (err < 0)
|
|
|
|
return err;
|
2023-05-23 15:53:39 +08:00
|
|
|
offset = 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
len -= size;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
2023-05-23 15:53:39 +08:00
|
|
|
|
|
|
|
int snd_seq_dump_var_event(const struct snd_seq_event *event,
|
|
|
|
snd_seq_dump_func_t func, void *private_data)
|
|
|
|
{
|
|
|
|
return dump_var_event(event, func, private_data, 0, 0);
|
|
|
|
}
|
2006-04-28 21:13:39 +08:00
|
|
|
EXPORT_SYMBOL(snd_seq_dump_var_event);
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* exported:
|
|
|
|
* expand the variable length event to linear buffer space.
|
|
|
|
*/
|
|
|
|
|
2022-11-19 07:23:50 +08:00
|
|
|
static int seq_copy_in_kernel(void *ptr, void *src, int size)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2022-11-19 07:23:50 +08:00
|
|
|
char **bufptr = ptr;
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
memcpy(*bufptr, src, size);
|
|
|
|
*bufptr += size;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2022-11-19 07:23:50 +08:00
|
|
|
static int seq_copy_in_user(void *ptr, void *src, int size)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2022-11-19 07:23:50 +08:00
|
|
|
char __user **bufptr = ptr;
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
if (copy_to_user(*bufptr, src, size))
|
|
|
|
return -EFAULT;
|
|
|
|
*bufptr += size;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2023-05-23 15:53:39 +08:00
|
|
|
static int expand_var_event(const struct snd_seq_event *event,
|
|
|
|
int offset, int size, char *buf, bool in_kernel)
|
|
|
|
{
|
|
|
|
if (event->data.ext.len & SNDRV_SEQ_EXT_USRPTR) {
|
|
|
|
if (! in_kernel)
|
|
|
|
return -EINVAL;
|
|
|
|
if (copy_from_user(buf,
|
|
|
|
(char __force __user *)event->data.ext.ptr + offset,
|
|
|
|
size))
|
|
|
|
return -EFAULT;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
return dump_var_event(event,
|
|
|
|
in_kernel ? seq_copy_in_kernel : seq_copy_in_user,
|
|
|
|
&buf, offset, size);
|
|
|
|
}
|
|
|
|
|
2005-11-17 21:04:02 +08:00
|
|
|
int snd_seq_expand_var_event(const struct snd_seq_event *event, int count, char *buf,
|
|
|
|
int in_kernel, int size_aligned)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2023-05-23 15:53:39 +08:00
|
|
|
int len, newlen, err;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2021-06-08 22:05:30 +08:00
|
|
|
len = get_var_len(event);
|
|
|
|
if (len < 0)
|
2005-04-17 06:20:36 +08:00
|
|
|
return len;
|
|
|
|
newlen = len;
|
|
|
|
if (size_aligned > 0)
|
2006-10-09 14:14:15 +08:00
|
|
|
newlen = roundup(len, size_aligned);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (count < newlen)
|
|
|
|
return -EAGAIN;
|
2023-05-23 15:53:39 +08:00
|
|
|
err = expand_var_event(event, 0, len, buf, in_kernel);
|
|
|
|
if (err < 0)
|
|
|
|
return err;
|
2023-05-23 15:53:38 +08:00
|
|
|
if (len != newlen)
|
|
|
|
memset(buf + len, 0, newlen - len);
|
|
|
|
return newlen;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2006-04-28 21:13:39 +08:00
|
|
|
EXPORT_SYMBOL(snd_seq_expand_var_event);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2023-05-23 15:53:39 +08:00
|
|
|
int snd_seq_expand_var_event_at(const struct snd_seq_event *event, int count,
|
|
|
|
char *buf, int offset)
|
|
|
|
{
|
|
|
|
int len, err;
|
|
|
|
|
|
|
|
len = get_var_len(event);
|
|
|
|
if (len < 0)
|
|
|
|
return len;
|
|
|
|
if (len <= offset)
|
|
|
|
return 0;
|
|
|
|
len -= offset;
|
|
|
|
if (len > count)
|
|
|
|
len = count;
|
|
|
|
err = expand_var_event(event, offset, count, buf, true);
|
|
|
|
if (err < 0)
|
|
|
|
return err;
|
|
|
|
return len;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(snd_seq_expand_var_event_at);
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* release this cell, free extended data if available
|
|
|
|
*/
|
|
|
|
|
2005-11-17 21:04:02 +08:00
|
|
|
static inline void free_cell(struct snd_seq_pool *pool,
|
|
|
|
struct snd_seq_event_cell *cell)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
cell->next = pool->free;
|
|
|
|
pool->free = cell;
|
|
|
|
atomic_dec(&pool->counter);
|
|
|
|
}
|
|
|
|
|
2005-11-17 21:04:02 +08:00
|
|
|
void snd_seq_cell_free(struct snd_seq_event_cell * cell)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
unsigned long flags;
|
2005-11-17 21:04:02 +08:00
|
|
|
struct snd_seq_pool *pool;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2008-08-08 23:09:09 +08:00
|
|
|
if (snd_BUG_ON(!cell))
|
|
|
|
return;
|
2005-04-17 06:20:36 +08:00
|
|
|
pool = cell->pool;
|
2008-08-08 23:09:09 +08:00
|
|
|
if (snd_BUG_ON(!pool))
|
|
|
|
return;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
spin_lock_irqsave(&pool->lock, flags);
|
|
|
|
free_cell(pool, cell);
|
|
|
|
if (snd_seq_ev_is_variable(&cell->event)) {
|
|
|
|
if (cell->event.data.ext.len & SNDRV_SEQ_EXT_CHAINED) {
|
2005-11-17 21:04:02 +08:00
|
|
|
struct snd_seq_event_cell *curp, *nextptr;
|
2005-04-17 06:20:36 +08:00
|
|
|
curp = cell->event.data.ext.ptr;
|
|
|
|
for (; curp; curp = nextptr) {
|
|
|
|
nextptr = curp->next;
|
|
|
|
curp->next = pool->free;
|
|
|
|
free_cell(pool, curp);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (waitqueue_active(&pool->output_sleep)) {
|
|
|
|
/* has enough space now? */
|
|
|
|
if (snd_seq_output_ok(pool))
|
|
|
|
wake_up(&pool->output_sleep);
|
|
|
|
}
|
|
|
|
spin_unlock_irqrestore(&pool->lock, flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* allocate an event cell.
|
|
|
|
*/
|
2005-11-17 21:04:02 +08:00
|
|
|
static int snd_seq_cell_alloc(struct snd_seq_pool *pool,
|
|
|
|
struct snd_seq_event_cell **cellp,
|
2018-03-06 05:06:09 +08:00
|
|
|
int nonblock, struct file *file,
|
|
|
|
struct mutex *mutexp)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2005-11-17 21:04:02 +08:00
|
|
|
struct snd_seq_event_cell *cell;
|
2005-04-17 06:20:36 +08:00
|
|
|
unsigned long flags;
|
|
|
|
int err = -EAGAIN;
|
2017-06-20 18:06:13 +08:00
|
|
|
wait_queue_entry_t wait;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
if (pool == NULL)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
*cellp = NULL;
|
|
|
|
|
|
|
|
init_waitqueue_entry(&wait, current);
|
|
|
|
spin_lock_irqsave(&pool->lock, flags);
|
|
|
|
if (pool->ptr == NULL) { /* not initialized */
|
2014-02-05 01:24:34 +08:00
|
|
|
pr_debug("ALSA: seq: pool is not initialized\n");
|
2005-04-17 06:20:36 +08:00
|
|
|
err = -EINVAL;
|
|
|
|
goto __error;
|
|
|
|
}
|
|
|
|
while (pool->free == NULL && ! nonblock && ! pool->closing) {
|
|
|
|
|
|
|
|
set_current_state(TASK_INTERRUPTIBLE);
|
|
|
|
add_wait_queue(&pool->output_sleep, &wait);
|
2019-03-28 22:55:08 +08:00
|
|
|
spin_unlock_irqrestore(&pool->lock, flags);
|
2018-03-06 05:06:09 +08:00
|
|
|
if (mutexp)
|
|
|
|
mutex_unlock(mutexp);
|
2005-04-17 06:20:36 +08:00
|
|
|
schedule();
|
2018-03-06 05:06:09 +08:00
|
|
|
if (mutexp)
|
|
|
|
mutex_lock(mutexp);
|
2019-03-28 22:55:08 +08:00
|
|
|
spin_lock_irqsave(&pool->lock, flags);
|
2005-04-17 06:20:36 +08:00
|
|
|
remove_wait_queue(&pool->output_sleep, &wait);
|
|
|
|
/* interrupted? */
|
|
|
|
if (signal_pending(current)) {
|
|
|
|
err = -ERESTARTSYS;
|
|
|
|
goto __error;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (pool->closing) { /* closing.. */
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto __error;
|
|
|
|
}
|
|
|
|
|
|
|
|
cell = pool->free;
|
|
|
|
if (cell) {
|
|
|
|
int used;
|
|
|
|
pool->free = cell->next;
|
|
|
|
atomic_inc(&pool->counter);
|
|
|
|
used = atomic_read(&pool->counter);
|
|
|
|
if (pool->max_used < used)
|
|
|
|
pool->max_used = used;
|
|
|
|
pool->event_alloc_success++;
|
|
|
|
/* clear cell pointers */
|
|
|
|
cell->next = NULL;
|
|
|
|
err = 0;
|
|
|
|
} else
|
|
|
|
pool->event_alloc_failures++;
|
|
|
|
*cellp = cell;
|
|
|
|
|
|
|
|
__error:
|
|
|
|
spin_unlock_irqrestore(&pool->lock, flags);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* duplicate the event to a cell.
|
|
|
|
* if the event has external data, the data is decomposed to additional
|
|
|
|
* cells.
|
|
|
|
*/
|
2005-11-17 21:04:02 +08:00
|
|
|
int snd_seq_event_dup(struct snd_seq_pool *pool, struct snd_seq_event *event,
|
|
|
|
struct snd_seq_event_cell **cellp, int nonblock,
|
2018-03-06 05:06:09 +08:00
|
|
|
struct file *file, struct mutex *mutexp)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
int ncells, err;
|
|
|
|
unsigned int extlen;
|
2005-11-17 21:04:02 +08:00
|
|
|
struct snd_seq_event_cell *cell;
|
ALSA: seq: Add UMP support
Starting from this commit, we add the basic support of UMP (Universal
MIDI Packet) events on ALSA sequencer infrastructure. The biggest
change here is that, for transferring UMP packets that are up to 128
bits, we extend the data payload of ALSA sequencer event record when
the client is declared to support for the new UMP events.
A new event flag bit, SNDRV_SEQ_EVENT_UMP, is defined and it shall be
set for the UMP packet events that have the larger payload of 128
bits, defined as struct snd_seq_ump_event.
For controlling the UMP feature enablement in kernel, a new Kconfig,
CONFIG_SND_SEQ_UMP is introduced. The extended event for UMP is
available only when this Kconfig item is set. Similarly, the size of
the internal snd_seq_event_cell also increases (in 4 bytes) when the
Kconfig item is set. (But the size increase is effective only for
32bit architectures; 64bit archs already have padding there.)
Overall, when CONFIG_SND_SEQ_UMP isn't set, there is no change in the
event and cell, keeping the old sizes.
For applications that want to access the UMP packets, first of all, a
sequencer client has to declare the user-protocol to match with the
latest one via the new SNDRV_SEQ_IOCTL_USER_PVERSION; otherwise it's
treated as if a legacy client without UMP support.
Then the client can switch to the new UMP mode (MIDI 1.0 or MIDI 2.0)
with a new field, midi_version, in snd_seq_client_info. When switched
to UMP mode (midi_version = 1 or 2), the client can write the UMP
events with SNDRV_SEQ_EVENT_UMP flag. For reads, the alignment size
is changed from snd_seq_event (28 bytes) to snd_seq_ump_event (32
bytes). When a UMP sequencer event is delivered to a legacy sequencer
client, it's ignored or handled as an error.
Conceptually, ALSA sequencer client and port correspond to the UMP
Endpoint and Group, respectively; each client may have multiple ports
and each port has the fixed number (16) of channels, total up to 256
channels.
As of this commit, ALSA sequencer core just sends and receives the UMP
events as-is from/to clients. The automatic conversions between the
legacy events and the new UMP events will be implemented in a later
patch.
Along with this commit, bump the sequencer protocol version to 1.0.3.
Reviewed-by: Jaroslav Kysela <perex@perex.cz>
Link: https://lore.kernel.org/r/20230523075358.9672-26-tiwai@suse.de
Signed-off-by: Takashi Iwai <tiwai@suse.de>
2023-05-23 15:53:46 +08:00
|
|
|
int size;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
*cellp = NULL;
|
|
|
|
|
|
|
|
ncells = 0;
|
|
|
|
extlen = 0;
|
|
|
|
if (snd_seq_ev_is_variable(event)) {
|
|
|
|
extlen = event->data.ext.len & ~SNDRV_SEQ_EXT_MASK;
|
2020-12-24 01:22:12 +08:00
|
|
|
ncells = DIV_ROUND_UP(extlen, sizeof(struct snd_seq_event));
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
if (ncells >= pool->total_elements)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2018-03-06 05:06:09 +08:00
|
|
|
err = snd_seq_cell_alloc(pool, &cell, nonblock, file, mutexp);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (err < 0)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
/* copy the event */
|
ALSA: seq: Add UMP support
Starting from this commit, we add the basic support of UMP (Universal
MIDI Packet) events on ALSA sequencer infrastructure. The biggest
change here is that, for transferring UMP packets that are up to 128
bits, we extend the data payload of ALSA sequencer event record when
the client is declared to support for the new UMP events.
A new event flag bit, SNDRV_SEQ_EVENT_UMP, is defined and it shall be
set for the UMP packet events that have the larger payload of 128
bits, defined as struct snd_seq_ump_event.
For controlling the UMP feature enablement in kernel, a new Kconfig,
CONFIG_SND_SEQ_UMP is introduced. The extended event for UMP is
available only when this Kconfig item is set. Similarly, the size of
the internal snd_seq_event_cell also increases (in 4 bytes) when the
Kconfig item is set. (But the size increase is effective only for
32bit architectures; 64bit archs already have padding there.)
Overall, when CONFIG_SND_SEQ_UMP isn't set, there is no change in the
event and cell, keeping the old sizes.
For applications that want to access the UMP packets, first of all, a
sequencer client has to declare the user-protocol to match with the
latest one via the new SNDRV_SEQ_IOCTL_USER_PVERSION; otherwise it's
treated as if a legacy client without UMP support.
Then the client can switch to the new UMP mode (MIDI 1.0 or MIDI 2.0)
with a new field, midi_version, in snd_seq_client_info. When switched
to UMP mode (midi_version = 1 or 2), the client can write the UMP
events with SNDRV_SEQ_EVENT_UMP flag. For reads, the alignment size
is changed from snd_seq_event (28 bytes) to snd_seq_ump_event (32
bytes). When a UMP sequencer event is delivered to a legacy sequencer
client, it's ignored or handled as an error.
Conceptually, ALSA sequencer client and port correspond to the UMP
Endpoint and Group, respectively; each client may have multiple ports
and each port has the fixed number (16) of channels, total up to 256
channels.
As of this commit, ALSA sequencer core just sends and receives the UMP
events as-is from/to clients. The automatic conversions between the
legacy events and the new UMP events will be implemented in a later
patch.
Along with this commit, bump the sequencer protocol version to 1.0.3.
Reviewed-by: Jaroslav Kysela <perex@perex.cz>
Link: https://lore.kernel.org/r/20230523075358.9672-26-tiwai@suse.de
Signed-off-by: Takashi Iwai <tiwai@suse.de>
2023-05-23 15:53:46 +08:00
|
|
|
size = snd_seq_event_packet_size(event);
|
|
|
|
memcpy(&cell->ump, event, size);
|
|
|
|
#if IS_ENABLED(CONFIG_SND_SEQ_UMP)
|
|
|
|
if (size < sizeof(cell->event))
|
|
|
|
cell->ump.raw.extra = 0;
|
|
|
|
#endif
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/* decompose */
|
|
|
|
if (snd_seq_ev_is_variable(event)) {
|
|
|
|
int len = extlen;
|
|
|
|
int is_chained = event->data.ext.len & SNDRV_SEQ_EXT_CHAINED;
|
|
|
|
int is_usrptr = event->data.ext.len & SNDRV_SEQ_EXT_USRPTR;
|
2005-11-17 21:04:02 +08:00
|
|
|
struct snd_seq_event_cell *src, *tmp, *tail;
|
2005-04-17 06:20:36 +08:00
|
|
|
char *buf;
|
|
|
|
|
|
|
|
cell->event.data.ext.len = extlen | SNDRV_SEQ_EXT_CHAINED;
|
|
|
|
cell->event.data.ext.ptr = NULL;
|
|
|
|
|
2005-11-17 21:04:02 +08:00
|
|
|
src = (struct snd_seq_event_cell *)event->data.ext.ptr;
|
2005-04-17 06:20:36 +08:00
|
|
|
buf = (char *)event->data.ext.ptr;
|
|
|
|
tail = NULL;
|
|
|
|
|
|
|
|
while (ncells-- > 0) {
|
ALSA: seq: Add UMP support
Starting from this commit, we add the basic support of UMP (Universal
MIDI Packet) events on ALSA sequencer infrastructure. The biggest
change here is that, for transferring UMP packets that are up to 128
bits, we extend the data payload of ALSA sequencer event record when
the client is declared to support for the new UMP events.
A new event flag bit, SNDRV_SEQ_EVENT_UMP, is defined and it shall be
set for the UMP packet events that have the larger payload of 128
bits, defined as struct snd_seq_ump_event.
For controlling the UMP feature enablement in kernel, a new Kconfig,
CONFIG_SND_SEQ_UMP is introduced. The extended event for UMP is
available only when this Kconfig item is set. Similarly, the size of
the internal snd_seq_event_cell also increases (in 4 bytes) when the
Kconfig item is set. (But the size increase is effective only for
32bit architectures; 64bit archs already have padding there.)
Overall, when CONFIG_SND_SEQ_UMP isn't set, there is no change in the
event and cell, keeping the old sizes.
For applications that want to access the UMP packets, first of all, a
sequencer client has to declare the user-protocol to match with the
latest one via the new SNDRV_SEQ_IOCTL_USER_PVERSION; otherwise it's
treated as if a legacy client without UMP support.
Then the client can switch to the new UMP mode (MIDI 1.0 or MIDI 2.0)
with a new field, midi_version, in snd_seq_client_info. When switched
to UMP mode (midi_version = 1 or 2), the client can write the UMP
events with SNDRV_SEQ_EVENT_UMP flag. For reads, the alignment size
is changed from snd_seq_event (28 bytes) to snd_seq_ump_event (32
bytes). When a UMP sequencer event is delivered to a legacy sequencer
client, it's ignored or handled as an error.
Conceptually, ALSA sequencer client and port correspond to the UMP
Endpoint and Group, respectively; each client may have multiple ports
and each port has the fixed number (16) of channels, total up to 256
channels.
As of this commit, ALSA sequencer core just sends and receives the UMP
events as-is from/to clients. The automatic conversions between the
legacy events and the new UMP events will be implemented in a later
patch.
Along with this commit, bump the sequencer protocol version to 1.0.3.
Reviewed-by: Jaroslav Kysela <perex@perex.cz>
Link: https://lore.kernel.org/r/20230523075358.9672-26-tiwai@suse.de
Signed-off-by: Takashi Iwai <tiwai@suse.de>
2023-05-23 15:53:46 +08:00
|
|
|
size = sizeof(struct snd_seq_event);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (len < size)
|
|
|
|
size = len;
|
2018-03-06 05:06:09 +08:00
|
|
|
err = snd_seq_cell_alloc(pool, &tmp, nonblock, file,
|
|
|
|
mutexp);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (err < 0)
|
|
|
|
goto __error;
|
|
|
|
if (cell->event.data.ext.ptr == NULL)
|
|
|
|
cell->event.data.ext.ptr = tmp;
|
|
|
|
if (tail)
|
|
|
|
tail->next = tmp;
|
|
|
|
tail = tmp;
|
|
|
|
/* copy chunk */
|
|
|
|
if (is_chained && src) {
|
|
|
|
tmp->event = src->event;
|
|
|
|
src = src->next;
|
|
|
|
} else if (is_usrptr) {
|
2011-02-14 18:00:47 +08:00
|
|
|
if (copy_from_user(&tmp->event, (char __force __user *)buf, size)) {
|
2005-04-17 06:20:36 +08:00
|
|
|
err = -EFAULT;
|
|
|
|
goto __error;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
memcpy(&tmp->event, buf, size);
|
|
|
|
}
|
|
|
|
buf += size;
|
|
|
|
len -= size;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
*cellp = cell;
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
__error:
|
|
|
|
snd_seq_cell_free(cell);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/* poll wait */
|
2005-11-17 21:04:02 +08:00
|
|
|
int snd_seq_pool_poll_wait(struct snd_seq_pool *pool, struct file *file,
|
|
|
|
poll_table *wait)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
poll_wait(file, &pool->output_sleep, wait);
|
|
|
|
return snd_seq_output_ok(pool);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/* allocate room specified number of events */
|
2005-11-17 21:04:02 +08:00
|
|
|
int snd_seq_pool_init(struct snd_seq_pool *pool)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
int cell;
|
2005-11-17 21:04:02 +08:00
|
|
|
struct snd_seq_event_cell *cellptr;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2008-08-08 23:09:09 +08:00
|
|
|
if (snd_BUG_ON(!pool))
|
|
|
|
return -EINVAL;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2019-03-28 23:09:45 +08:00
|
|
|
cellptr = kvmalloc_array(sizeof(struct snd_seq_event_cell), pool->size,
|
|
|
|
GFP_KERNEL);
|
2016-02-15 23:20:24 +08:00
|
|
|
if (!cellptr)
|
2005-04-17 06:20:36 +08:00
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
/* add new cells to the free cell list */
|
2019-03-28 23:21:01 +08:00
|
|
|
spin_lock_irq(&pool->lock);
|
2016-02-15 23:20:24 +08:00
|
|
|
if (pool->ptr) {
|
2019-03-28 23:21:01 +08:00
|
|
|
spin_unlock_irq(&pool->lock);
|
2019-03-28 23:09:45 +08:00
|
|
|
kvfree(cellptr);
|
2016-02-15 23:20:24 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
pool->ptr = cellptr;
|
2005-04-17 06:20:36 +08:00
|
|
|
pool->free = NULL;
|
|
|
|
|
|
|
|
for (cell = 0; cell < pool->size; cell++) {
|
|
|
|
cellptr = pool->ptr + cell;
|
|
|
|
cellptr->pool = pool;
|
|
|
|
cellptr->next = pool->free;
|
|
|
|
pool->free = cellptr;
|
|
|
|
}
|
|
|
|
pool->room = (pool->size + 1) / 2;
|
|
|
|
|
|
|
|
/* init statistics */
|
|
|
|
pool->max_used = 0;
|
|
|
|
pool->total_elements = pool->size;
|
2019-03-28 23:21:01 +08:00
|
|
|
spin_unlock_irq(&pool->lock);
|
2005-04-17 06:20:36 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-03-21 20:56:04 +08:00
|
|
|
/* refuse the further insertion to the pool */
|
|
|
|
void snd_seq_pool_mark_closing(struct snd_seq_pool *pool)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
if (snd_BUG_ON(!pool))
|
|
|
|
return;
|
|
|
|
spin_lock_irqsave(&pool->lock, flags);
|
|
|
|
pool->closing = 1;
|
|
|
|
spin_unlock_irqrestore(&pool->lock, flags);
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/* remove events */
|
2005-11-17 21:04:02 +08:00
|
|
|
int snd_seq_pool_done(struct snd_seq_pool *pool)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2005-11-17 21:04:02 +08:00
|
|
|
struct snd_seq_event_cell *ptr;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2008-08-08 23:09:09 +08:00
|
|
|
if (snd_BUG_ON(!pool))
|
|
|
|
return -EINVAL;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/* wait for closing all threads */
|
|
|
|
if (waitqueue_active(&pool->output_sleep))
|
|
|
|
wake_up(&pool->output_sleep);
|
|
|
|
|
2017-02-06 22:09:48 +08:00
|
|
|
while (atomic_read(&pool->counter) > 0)
|
2005-10-24 21:02:37 +08:00
|
|
|
schedule_timeout_uninterruptible(1);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/* release all resources */
|
2019-03-28 23:21:01 +08:00
|
|
|
spin_lock_irq(&pool->lock);
|
2005-04-17 06:20:36 +08:00
|
|
|
ptr = pool->ptr;
|
|
|
|
pool->ptr = NULL;
|
|
|
|
pool->free = NULL;
|
|
|
|
pool->total_elements = 0;
|
2019-03-28 23:21:01 +08:00
|
|
|
spin_unlock_irq(&pool->lock);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2019-03-28 23:09:45 +08:00
|
|
|
kvfree(ptr);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2019-03-28 23:21:01 +08:00
|
|
|
spin_lock_irq(&pool->lock);
|
2005-04-17 06:20:36 +08:00
|
|
|
pool->closing = 0;
|
2019-03-28 23:21:01 +08:00
|
|
|
spin_unlock_irq(&pool->lock);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/* init new memory pool */
|
2005-11-17 21:04:02 +08:00
|
|
|
struct snd_seq_pool *snd_seq_pool_new(int poolsize)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2005-11-17 21:04:02 +08:00
|
|
|
struct snd_seq_pool *pool;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/* create pool block */
|
2005-09-09 20:20:49 +08:00
|
|
|
pool = kzalloc(sizeof(*pool), GFP_KERNEL);
|
2015-03-10 22:41:18 +08:00
|
|
|
if (!pool)
|
2005-04-17 06:20:36 +08:00
|
|
|
return NULL;
|
|
|
|
spin_lock_init(&pool->lock);
|
|
|
|
pool->ptr = NULL;
|
|
|
|
pool->free = NULL;
|
|
|
|
pool->total_elements = 0;
|
|
|
|
atomic_set(&pool->counter, 0);
|
|
|
|
pool->closing = 0;
|
|
|
|
init_waitqueue_head(&pool->output_sleep);
|
|
|
|
|
|
|
|
pool->size = poolsize;
|
|
|
|
|
|
|
|
/* init statistics */
|
|
|
|
pool->max_used = 0;
|
|
|
|
return pool;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* remove memory pool */
|
2005-11-17 21:04:02 +08:00
|
|
|
int snd_seq_pool_delete(struct snd_seq_pool **ppool)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2005-11-17 21:04:02 +08:00
|
|
|
struct snd_seq_pool *pool = *ppool;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
*ppool = NULL;
|
|
|
|
if (pool == NULL)
|
|
|
|
return 0;
|
2017-03-21 20:56:04 +08:00
|
|
|
snd_seq_pool_mark_closing(pool);
|
2005-04-17 06:20:36 +08:00
|
|
|
snd_seq_pool_done(pool);
|
|
|
|
kfree(pool);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* exported to seq_clientmgr.c */
|
2005-11-17 21:04:02 +08:00
|
|
|
void snd_seq_info_pool(struct snd_info_buffer *buffer,
|
|
|
|
struct snd_seq_pool *pool, char *space)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
if (pool == NULL)
|
|
|
|
return;
|
|
|
|
snd_iprintf(buffer, "%sPool size : %d\n", space, pool->total_elements);
|
|
|
|
snd_iprintf(buffer, "%sCells in use : %d\n", space, atomic_read(&pool->counter));
|
|
|
|
snd_iprintf(buffer, "%sPeak cells in use : %d\n", space, pool->max_used);
|
|
|
|
snd_iprintf(buffer, "%sAlloc success : %d\n", space, pool->event_alloc_success);
|
|
|
|
snd_iprintf(buffer, "%sAlloc failures : %d\n", space, pool->event_alloc_failures);
|
|
|
|
}
|