um: race fix: initialize delayed_work *before* registering IRQ
... since chan_interrupt() might schedule it if there's too much incoming data. Kill task argument of chan_interrupt(), while we are at it - it's always &line->task. Signed-off-by: Al Viro <viro@zeniv.linux.org.uk> Signed-off-by: Richard Weinberger <richard@nod.at>
This commit is contained in:
parent
5eaa3411a8
commit
0fcd719934
|
@ -27,7 +27,7 @@ struct chan {
|
|||
void *data;
|
||||
};
|
||||
|
||||
extern void chan_interrupt(struct line *line, struct delayed_work *task,
|
||||
extern void chan_interrupt(struct line *line,
|
||||
struct tty_struct *tty, int irq);
|
||||
extern int parse_chan_pair(char *str, struct line *line, int device,
|
||||
const struct chan_opts *opts, char **error_out);
|
||||
|
|
|
@ -146,12 +146,22 @@ void chan_enable_winch(struct chan *chan, struct tty_struct *tty)
|
|||
register_winch(chan->fd, tty);
|
||||
}
|
||||
|
||||
static void line_timer_cb(struct work_struct *work)
|
||||
{
|
||||
struct line *line = container_of(work, struct line, task.work);
|
||||
|
||||
if (!line->throttled)
|
||||
chan_interrupt(line, line->tty, line->driver->read_irq);
|
||||
}
|
||||
|
||||
int enable_chan(struct line *line)
|
||||
{
|
||||
struct list_head *ele;
|
||||
struct chan *chan;
|
||||
int err;
|
||||
|
||||
INIT_DELAYED_WORK(&line->task, line_timer_cb);
|
||||
|
||||
list_for_each(ele, &line->chan_list) {
|
||||
chan = list_entry(ele, struct chan, list);
|
||||
err = open_one_chan(chan);
|
||||
|
@ -552,8 +562,7 @@ int parse_chan_pair(char *str, struct line *line, int device,
|
|||
return 0;
|
||||
}
|
||||
|
||||
void chan_interrupt(struct line *line, struct delayed_work *task,
|
||||
struct tty_struct *tty, int irq)
|
||||
void chan_interrupt(struct line *line, struct tty_struct *tty, int irq)
|
||||
{
|
||||
struct chan *chan = line->chan_in;
|
||||
int err;
|
||||
|
@ -564,7 +573,7 @@ void chan_interrupt(struct line *line, struct delayed_work *task,
|
|||
|
||||
do {
|
||||
if (tty && !tty_buffer_request_room(tty, 1)) {
|
||||
schedule_delayed_work(task, 1);
|
||||
schedule_delayed_work(&line->task, 1);
|
||||
goto out;
|
||||
}
|
||||
err = chan->ops->read(chan->fd, &c, chan->data);
|
||||
|
|
|
@ -21,19 +21,10 @@ static irqreturn_t line_interrupt(int irq, void *data)
|
|||
struct line *line = chan->line;
|
||||
|
||||
if (line)
|
||||
chan_interrupt(line, &line->task, line->tty, irq);
|
||||
chan_interrupt(line, line->tty, irq);
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static void line_timer_cb(struct work_struct *work)
|
||||
{
|
||||
struct line *line = container_of(work, struct line, task.work);
|
||||
|
||||
if (!line->throttled)
|
||||
chan_interrupt(line, &line->task, line->tty,
|
||||
line->driver->read_irq);
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns the free space inside the ring buffer of this line.
|
||||
*
|
||||
|
@ -327,8 +318,7 @@ void line_unthrottle(struct tty_struct *tty)
|
|||
struct line *line = tty->driver_data;
|
||||
|
||||
line->throttled = 0;
|
||||
chan_interrupt(line, &line->task, tty,
|
||||
line->driver->read_irq);
|
||||
chan_interrupt(line, tty, line->driver->read_irq);
|
||||
|
||||
/*
|
||||
* Maybe there is enough stuff pending that calling the interrupt
|
||||
|
@ -424,8 +414,6 @@ int line_open(struct line *lines, struct tty_struct *tty)
|
|||
if (err) /* line_close() will be called by our caller */
|
||||
goto out_unlock;
|
||||
|
||||
INIT_DELAYED_WORK(&line->task, line_timer_cb);
|
||||
|
||||
if (!line->sigio) {
|
||||
chan_enable_winch(line->chan_out, tty);
|
||||
line->sigio = 1;
|
||||
|
|
Loading…
Reference in New Issue