Merge branch 'gadget' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
Pull gadgetfs fixes from Al Viro: "Assorted fixes around AIO on gadgetfs: leaks, use-after-free, troubles caused by ->f_op flipping" * 'gadget' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs: gadgetfs: really get rid of switching ->f_op gadgetfs: get rid of flipping ->f_op in ep_config() gadget: switch ep_io_operations to ->read_iter/->write_iter gadgetfs: use-after-free in ->aio_read() gadget/function/f_fs.c: switch to ->{read,write}_iter() gadget/function/f_fs.c: use put iov_iter into io_data gadget/function/f_fs.c: close leaks move iov_iter.c from mm/ to lib/ new helper: dup_iter()
This commit is contained in:
commit
f788baadbd
|
@ -144,10 +144,9 @@ struct ffs_io_data {
|
|||
bool read;
|
||||
|
||||
struct kiocb *kiocb;
|
||||
const struct iovec *iovec;
|
||||
unsigned long nr_segs;
|
||||
char __user *buf;
|
||||
size_t len;
|
||||
struct iov_iter data;
|
||||
const void *to_free;
|
||||
char *buf;
|
||||
|
||||
struct mm_struct *mm;
|
||||
struct work_struct work;
|
||||
|
@ -649,29 +648,10 @@ static void ffs_user_copy_worker(struct work_struct *work)
|
|||
io_data->req->actual;
|
||||
|
||||
if (io_data->read && ret > 0) {
|
||||
int i;
|
||||
size_t pos = 0;
|
||||
|
||||
/*
|
||||
* Since req->length may be bigger than io_data->len (after
|
||||
* being rounded up to maxpacketsize), we may end up with more
|
||||
* data then user space has space for.
|
||||
*/
|
||||
ret = min_t(int, ret, io_data->len);
|
||||
|
||||
use_mm(io_data->mm);
|
||||
for (i = 0; i < io_data->nr_segs; i++) {
|
||||
size_t len = min_t(size_t, ret - pos,
|
||||
io_data->iovec[i].iov_len);
|
||||
if (!len)
|
||||
break;
|
||||
if (unlikely(copy_to_user(io_data->iovec[i].iov_base,
|
||||
&io_data->buf[pos], len))) {
|
||||
ret = -EFAULT;
|
||||
break;
|
||||
}
|
||||
pos += len;
|
||||
}
|
||||
ret = copy_to_iter(io_data->buf, ret, &io_data->data);
|
||||
if (iov_iter_count(&io_data->data))
|
||||
ret = -EFAULT;
|
||||
unuse_mm(io_data->mm);
|
||||
}
|
||||
|
||||
|
@ -684,7 +664,7 @@ static void ffs_user_copy_worker(struct work_struct *work)
|
|||
|
||||
io_data->kiocb->private = NULL;
|
||||
if (io_data->read)
|
||||
kfree(io_data->iovec);
|
||||
kfree(io_data->to_free);
|
||||
kfree(io_data->buf);
|
||||
kfree(io_data);
|
||||
}
|
||||
|
@ -743,6 +723,7 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
|
|||
* before the waiting completes, so do not assign to 'gadget' earlier
|
||||
*/
|
||||
struct usb_gadget *gadget = epfile->ffs->gadget;
|
||||
size_t copied;
|
||||
|
||||
spin_lock_irq(&epfile->ffs->eps_lock);
|
||||
/* In the meantime, endpoint got disabled or changed. */
|
||||
|
@ -750,34 +731,21 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
|
|||
spin_unlock_irq(&epfile->ffs->eps_lock);
|
||||
return -ESHUTDOWN;
|
||||
}
|
||||
data_len = iov_iter_count(&io_data->data);
|
||||
/*
|
||||
* Controller may require buffer size to be aligned to
|
||||
* maxpacketsize of an out endpoint.
|
||||
*/
|
||||
data_len = io_data->read ?
|
||||
usb_ep_align_maybe(gadget, ep->ep, io_data->len) :
|
||||
io_data->len;
|
||||
if (io_data->read)
|
||||
data_len = usb_ep_align_maybe(gadget, ep->ep, data_len);
|
||||
spin_unlock_irq(&epfile->ffs->eps_lock);
|
||||
|
||||
data = kmalloc(data_len, GFP_KERNEL);
|
||||
if (unlikely(!data))
|
||||
return -ENOMEM;
|
||||
if (io_data->aio && !io_data->read) {
|
||||
int i;
|
||||
size_t pos = 0;
|
||||
for (i = 0; i < io_data->nr_segs; i++) {
|
||||
if (unlikely(copy_from_user(&data[pos],
|
||||
io_data->iovec[i].iov_base,
|
||||
io_data->iovec[i].iov_len))) {
|
||||
ret = -EFAULT;
|
||||
goto error;
|
||||
}
|
||||
pos += io_data->iovec[i].iov_len;
|
||||
}
|
||||
} else {
|
||||
if (!io_data->read &&
|
||||
unlikely(__copy_from_user(data, io_data->buf,
|
||||
io_data->len))) {
|
||||
if (!io_data->read) {
|
||||
copied = copy_from_iter(data, data_len, &io_data->data);
|
||||
if (copied != data_len) {
|
||||
ret = -EFAULT;
|
||||
goto error;
|
||||
}
|
||||
|
@ -876,10 +844,8 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
|
|||
*/
|
||||
ret = ep->status;
|
||||
if (io_data->read && ret > 0) {
|
||||
ret = min_t(size_t, ret, io_data->len);
|
||||
|
||||
if (unlikely(copy_to_user(io_data->buf,
|
||||
data, ret)))
|
||||
ret = copy_to_iter(data, ret, &io_data->data);
|
||||
if (unlikely(iov_iter_count(&io_data->data)))
|
||||
ret = -EFAULT;
|
||||
}
|
||||
}
|
||||
|
@ -898,37 +864,6 @@ error:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
ffs_epfile_write(struct file *file, const char __user *buf, size_t len,
|
||||
loff_t *ptr)
|
||||
{
|
||||
struct ffs_io_data io_data;
|
||||
|
||||
ENTER();
|
||||
|
||||
io_data.aio = false;
|
||||
io_data.read = false;
|
||||
io_data.buf = (char * __user)buf;
|
||||
io_data.len = len;
|
||||
|
||||
return ffs_epfile_io(file, &io_data);
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
ffs_epfile_read(struct file *file, char __user *buf, size_t len, loff_t *ptr)
|
||||
{
|
||||
struct ffs_io_data io_data;
|
||||
|
||||
ENTER();
|
||||
|
||||
io_data.aio = false;
|
||||
io_data.read = true;
|
||||
io_data.buf = buf;
|
||||
io_data.len = len;
|
||||
|
||||
return ffs_epfile_io(file, &io_data);
|
||||
}
|
||||
|
||||
static int
|
||||
ffs_epfile_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
|
@ -965,67 +900,86 @@ static int ffs_aio_cancel(struct kiocb *kiocb)
|
|||
return value;
|
||||
}
|
||||
|
||||
static ssize_t ffs_epfile_aio_write(struct kiocb *kiocb,
|
||||
const struct iovec *iovec,
|
||||
unsigned long nr_segs, loff_t loff)
|
||||
static ssize_t ffs_epfile_write_iter(struct kiocb *kiocb, struct iov_iter *from)
|
||||
{
|
||||
struct ffs_io_data *io_data;
|
||||
struct ffs_io_data io_data, *p = &io_data;
|
||||
ssize_t res;
|
||||
|
||||
ENTER();
|
||||
|
||||
io_data = kmalloc(sizeof(*io_data), GFP_KERNEL);
|
||||
if (unlikely(!io_data))
|
||||
return -ENOMEM;
|
||||
|
||||
io_data->aio = true;
|
||||
io_data->read = false;
|
||||
io_data->kiocb = kiocb;
|
||||
io_data->iovec = iovec;
|
||||
io_data->nr_segs = nr_segs;
|
||||
io_data->len = kiocb->ki_nbytes;
|
||||
io_data->mm = current->mm;
|
||||
|
||||
kiocb->private = io_data;
|
||||
|
||||
kiocb_set_cancel_fn(kiocb, ffs_aio_cancel);
|
||||
|
||||
return ffs_epfile_io(kiocb->ki_filp, io_data);
|
||||
}
|
||||
|
||||
static ssize_t ffs_epfile_aio_read(struct kiocb *kiocb,
|
||||
const struct iovec *iovec,
|
||||
unsigned long nr_segs, loff_t loff)
|
||||
{
|
||||
struct ffs_io_data *io_data;
|
||||
struct iovec *iovec_copy;
|
||||
|
||||
ENTER();
|
||||
|
||||
iovec_copy = kmalloc_array(nr_segs, sizeof(*iovec_copy), GFP_KERNEL);
|
||||
if (unlikely(!iovec_copy))
|
||||
return -ENOMEM;
|
||||
|
||||
memcpy(iovec_copy, iovec, sizeof(struct iovec)*nr_segs);
|
||||
|
||||
io_data = kmalloc(sizeof(*io_data), GFP_KERNEL);
|
||||
if (unlikely(!io_data)) {
|
||||
kfree(iovec_copy);
|
||||
return -ENOMEM;
|
||||
if (!is_sync_kiocb(kiocb)) {
|
||||
p = kmalloc(sizeof(io_data), GFP_KERNEL);
|
||||
if (unlikely(!p))
|
||||
return -ENOMEM;
|
||||
p->aio = true;
|
||||
} else {
|
||||
p->aio = false;
|
||||
}
|
||||
|
||||
io_data->aio = true;
|
||||
io_data->read = true;
|
||||
io_data->kiocb = kiocb;
|
||||
io_data->iovec = iovec_copy;
|
||||
io_data->nr_segs = nr_segs;
|
||||
io_data->len = kiocb->ki_nbytes;
|
||||
io_data->mm = current->mm;
|
||||
p->read = false;
|
||||
p->kiocb = kiocb;
|
||||
p->data = *from;
|
||||
p->mm = current->mm;
|
||||
|
||||
kiocb->private = io_data;
|
||||
kiocb->private = p;
|
||||
|
||||
kiocb_set_cancel_fn(kiocb, ffs_aio_cancel);
|
||||
|
||||
return ffs_epfile_io(kiocb->ki_filp, io_data);
|
||||
res = ffs_epfile_io(kiocb->ki_filp, p);
|
||||
if (res == -EIOCBQUEUED)
|
||||
return res;
|
||||
if (p->aio)
|
||||
kfree(p);
|
||||
else
|
||||
*from = p->data;
|
||||
return res;
|
||||
}
|
||||
|
||||
static ssize_t ffs_epfile_read_iter(struct kiocb *kiocb, struct iov_iter *to)
|
||||
{
|
||||
struct ffs_io_data io_data, *p = &io_data;
|
||||
ssize_t res;
|
||||
|
||||
ENTER();
|
||||
|
||||
if (!is_sync_kiocb(kiocb)) {
|
||||
p = kmalloc(sizeof(io_data), GFP_KERNEL);
|
||||
if (unlikely(!p))
|
||||
return -ENOMEM;
|
||||
p->aio = true;
|
||||
} else {
|
||||
p->aio = false;
|
||||
}
|
||||
|
||||
p->read = true;
|
||||
p->kiocb = kiocb;
|
||||
if (p->aio) {
|
||||
p->to_free = dup_iter(&p->data, to, GFP_KERNEL);
|
||||
if (!p->to_free) {
|
||||
kfree(p);
|
||||
return -ENOMEM;
|
||||
}
|
||||
} else {
|
||||
p->data = *to;
|
||||
p->to_free = NULL;
|
||||
}
|
||||
p->mm = current->mm;
|
||||
|
||||
kiocb->private = p;
|
||||
|
||||
kiocb_set_cancel_fn(kiocb, ffs_aio_cancel);
|
||||
|
||||
res = ffs_epfile_io(kiocb->ki_filp, p);
|
||||
if (res == -EIOCBQUEUED)
|
||||
return res;
|
||||
|
||||
if (p->aio) {
|
||||
kfree(p->to_free);
|
||||
kfree(p);
|
||||
} else {
|
||||
*to = p->data;
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -1105,10 +1059,10 @@ static const struct file_operations ffs_epfile_operations = {
|
|||
.llseek = no_llseek,
|
||||
|
||||
.open = ffs_epfile_open,
|
||||
.write = ffs_epfile_write,
|
||||
.read = ffs_epfile_read,
|
||||
.aio_write = ffs_epfile_aio_write,
|
||||
.aio_read = ffs_epfile_aio_read,
|
||||
.write = new_sync_write,
|
||||
.read = new_sync_read,
|
||||
.write_iter = ffs_epfile_write_iter,
|
||||
.read_iter = ffs_epfile_read_iter,
|
||||
.release = ffs_epfile_release,
|
||||
.unlocked_ioctl = ffs_epfile_ioctl,
|
||||
};
|
||||
|
|
|
@ -74,6 +74,8 @@ MODULE_DESCRIPTION (DRIVER_DESC);
|
|||
MODULE_AUTHOR ("David Brownell");
|
||||
MODULE_LICENSE ("GPL");
|
||||
|
||||
static int ep_open(struct inode *, struct file *);
|
||||
|
||||
|
||||
/*----------------------------------------------------------------------*/
|
||||
|
||||
|
@ -283,14 +285,15 @@ static void epio_complete (struct usb_ep *ep, struct usb_request *req)
|
|||
* still need dev->lock to use epdata->ep.
|
||||
*/
|
||||
static int
|
||||
get_ready_ep (unsigned f_flags, struct ep_data *epdata)
|
||||
get_ready_ep (unsigned f_flags, struct ep_data *epdata, bool is_write)
|
||||
{
|
||||
int val;
|
||||
|
||||
if (f_flags & O_NONBLOCK) {
|
||||
if (!mutex_trylock(&epdata->lock))
|
||||
goto nonblock;
|
||||
if (epdata->state != STATE_EP_ENABLED) {
|
||||
if (epdata->state != STATE_EP_ENABLED &&
|
||||
(!is_write || epdata->state != STATE_EP_READY)) {
|
||||
mutex_unlock(&epdata->lock);
|
||||
nonblock:
|
||||
val = -EAGAIN;
|
||||
|
@ -305,18 +308,20 @@ nonblock:
|
|||
|
||||
switch (epdata->state) {
|
||||
case STATE_EP_ENABLED:
|
||||
return 0;
|
||||
case STATE_EP_READY: /* not configured yet */
|
||||
if (is_write)
|
||||
return 0;
|
||||
// FALLTHRU
|
||||
case STATE_EP_UNBOUND: /* clean disconnect */
|
||||
break;
|
||||
// case STATE_EP_DISABLED: /* "can't happen" */
|
||||
// case STATE_EP_READY: /* "can't happen" */
|
||||
default: /* error! */
|
||||
pr_debug ("%s: ep %p not available, state %d\n",
|
||||
shortname, epdata, epdata->state);
|
||||
// FALLTHROUGH
|
||||
case STATE_EP_UNBOUND: /* clean disconnect */
|
||||
val = -ENODEV;
|
||||
mutex_unlock(&epdata->lock);
|
||||
}
|
||||
return val;
|
||||
mutex_unlock(&epdata->lock);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
|
@ -363,97 +368,6 @@ ep_io (struct ep_data *epdata, void *buf, unsigned len)
|
|||
return value;
|
||||
}
|
||||
|
||||
|
||||
/* handle a synchronous OUT bulk/intr/iso transfer */
|
||||
static ssize_t
|
||||
ep_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr)
|
||||
{
|
||||
struct ep_data *data = fd->private_data;
|
||||
void *kbuf;
|
||||
ssize_t value;
|
||||
|
||||
if ((value = get_ready_ep (fd->f_flags, data)) < 0)
|
||||
return value;
|
||||
|
||||
/* halt any endpoint by doing a "wrong direction" i/o call */
|
||||
if (usb_endpoint_dir_in(&data->desc)) {
|
||||
if (usb_endpoint_xfer_isoc(&data->desc)) {
|
||||
mutex_unlock(&data->lock);
|
||||
return -EINVAL;
|
||||
}
|
||||
DBG (data->dev, "%s halt\n", data->name);
|
||||
spin_lock_irq (&data->dev->lock);
|
||||
if (likely (data->ep != NULL))
|
||||
usb_ep_set_halt (data->ep);
|
||||
spin_unlock_irq (&data->dev->lock);
|
||||
mutex_unlock(&data->lock);
|
||||
return -EBADMSG;
|
||||
}
|
||||
|
||||
/* FIXME readahead for O_NONBLOCK and poll(); careful with ZLPs */
|
||||
|
||||
value = -ENOMEM;
|
||||
kbuf = kmalloc (len, GFP_KERNEL);
|
||||
if (unlikely (!kbuf))
|
||||
goto free1;
|
||||
|
||||
value = ep_io (data, kbuf, len);
|
||||
VDEBUG (data->dev, "%s read %zu OUT, status %d\n",
|
||||
data->name, len, (int) value);
|
||||
if (value >= 0 && copy_to_user (buf, kbuf, value))
|
||||
value = -EFAULT;
|
||||
|
||||
free1:
|
||||
mutex_unlock(&data->lock);
|
||||
kfree (kbuf);
|
||||
return value;
|
||||
}
|
||||
|
||||
/* handle a synchronous IN bulk/intr/iso transfer */
|
||||
static ssize_t
|
||||
ep_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
|
||||
{
|
||||
struct ep_data *data = fd->private_data;
|
||||
void *kbuf;
|
||||
ssize_t value;
|
||||
|
||||
if ((value = get_ready_ep (fd->f_flags, data)) < 0)
|
||||
return value;
|
||||
|
||||
/* halt any endpoint by doing a "wrong direction" i/o call */
|
||||
if (!usb_endpoint_dir_in(&data->desc)) {
|
||||
if (usb_endpoint_xfer_isoc(&data->desc)) {
|
||||
mutex_unlock(&data->lock);
|
||||
return -EINVAL;
|
||||
}
|
||||
DBG (data->dev, "%s halt\n", data->name);
|
||||
spin_lock_irq (&data->dev->lock);
|
||||
if (likely (data->ep != NULL))
|
||||
usb_ep_set_halt (data->ep);
|
||||
spin_unlock_irq (&data->dev->lock);
|
||||
mutex_unlock(&data->lock);
|
||||
return -EBADMSG;
|
||||
}
|
||||
|
||||
/* FIXME writebehind for O_NONBLOCK and poll(), qlen = 1 */
|
||||
|
||||
value = -ENOMEM;
|
||||
kbuf = memdup_user(buf, len);
|
||||
if (IS_ERR(kbuf)) {
|
||||
value = PTR_ERR(kbuf);
|
||||
kbuf = NULL;
|
||||
goto free1;
|
||||
}
|
||||
|
||||
value = ep_io (data, kbuf, len);
|
||||
VDEBUG (data->dev, "%s write %zu IN, status %d\n",
|
||||
data->name, len, (int) value);
|
||||
free1:
|
||||
mutex_unlock(&data->lock);
|
||||
kfree (kbuf);
|
||||
return value;
|
||||
}
|
||||
|
||||
static int
|
||||
ep_release (struct inode *inode, struct file *fd)
|
||||
{
|
||||
|
@ -481,7 +395,7 @@ static long ep_ioctl(struct file *fd, unsigned code, unsigned long value)
|
|||
struct ep_data *data = fd->private_data;
|
||||
int status;
|
||||
|
||||
if ((status = get_ready_ep (fd->f_flags, data)) < 0)
|
||||
if ((status = get_ready_ep (fd->f_flags, data, false)) < 0)
|
||||
return status;
|
||||
|
||||
spin_lock_irq (&data->dev->lock);
|
||||
|
@ -517,8 +431,8 @@ struct kiocb_priv {
|
|||
struct mm_struct *mm;
|
||||
struct work_struct work;
|
||||
void *buf;
|
||||
const struct iovec *iv;
|
||||
unsigned long nr_segs;
|
||||
struct iov_iter to;
|
||||
const void *to_free;
|
||||
unsigned actual;
|
||||
};
|
||||
|
||||
|
@ -541,35 +455,6 @@ static int ep_aio_cancel(struct kiocb *iocb)
|
|||
return value;
|
||||
}
|
||||
|
||||
static ssize_t ep_copy_to_user(struct kiocb_priv *priv)
|
||||
{
|
||||
ssize_t len, total;
|
||||
void *to_copy;
|
||||
int i;
|
||||
|
||||
/* copy stuff into user buffers */
|
||||
total = priv->actual;
|
||||
len = 0;
|
||||
to_copy = priv->buf;
|
||||
for (i=0; i < priv->nr_segs; i++) {
|
||||
ssize_t this = min((ssize_t)(priv->iv[i].iov_len), total);
|
||||
|
||||
if (copy_to_user(priv->iv[i].iov_base, to_copy, this)) {
|
||||
if (len == 0)
|
||||
len = -EFAULT;
|
||||
break;
|
||||
}
|
||||
|
||||
total -= this;
|
||||
len += this;
|
||||
to_copy += this;
|
||||
if (total == 0)
|
||||
break;
|
||||
}
|
||||
|
||||
return len;
|
||||
}
|
||||
|
||||
static void ep_user_copy_worker(struct work_struct *work)
|
||||
{
|
||||
struct kiocb_priv *priv = container_of(work, struct kiocb_priv, work);
|
||||
|
@ -578,13 +463,16 @@ static void ep_user_copy_worker(struct work_struct *work)
|
|||
size_t ret;
|
||||
|
||||
use_mm(mm);
|
||||
ret = ep_copy_to_user(priv);
|
||||
ret = copy_to_iter(priv->buf, priv->actual, &priv->to);
|
||||
unuse_mm(mm);
|
||||
if (!ret)
|
||||
ret = -EFAULT;
|
||||
|
||||
/* completing the iocb can drop the ctx and mm, don't touch mm after */
|
||||
aio_complete(iocb, ret, ret);
|
||||
|
||||
kfree(priv->buf);
|
||||
kfree(priv->to_free);
|
||||
kfree(priv);
|
||||
}
|
||||
|
||||
|
@ -603,8 +491,9 @@ static void ep_aio_complete(struct usb_ep *ep, struct usb_request *req)
|
|||
* don't need to copy anything to userspace, so we can
|
||||
* complete the aio request immediately.
|
||||
*/
|
||||
if (priv->iv == NULL || unlikely(req->actual == 0)) {
|
||||
if (priv->to_free == NULL || unlikely(req->actual == 0)) {
|
||||
kfree(req->buf);
|
||||
kfree(priv->to_free);
|
||||
kfree(priv);
|
||||
iocb->private = NULL;
|
||||
/* aio_complete() reports bytes-transferred _and_ faults */
|
||||
|
@ -618,6 +507,7 @@ static void ep_aio_complete(struct usb_ep *ep, struct usb_request *req)
|
|||
|
||||
priv->buf = req->buf;
|
||||
priv->actual = req->actual;
|
||||
INIT_WORK(&priv->work, ep_user_copy_worker);
|
||||
schedule_work(&priv->work);
|
||||
}
|
||||
spin_unlock(&epdata->dev->lock);
|
||||
|
@ -626,38 +516,17 @@ static void ep_aio_complete(struct usb_ep *ep, struct usb_request *req)
|
|||
put_ep(epdata);
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
ep_aio_rwtail(
|
||||
struct kiocb *iocb,
|
||||
char *buf,
|
||||
size_t len,
|
||||
struct ep_data *epdata,
|
||||
const struct iovec *iv,
|
||||
unsigned long nr_segs
|
||||
)
|
||||
static ssize_t ep_aio(struct kiocb *iocb,
|
||||
struct kiocb_priv *priv,
|
||||
struct ep_data *epdata,
|
||||
char *buf,
|
||||
size_t len)
|
||||
{
|
||||
struct kiocb_priv *priv;
|
||||
struct usb_request *req;
|
||||
ssize_t value;
|
||||
struct usb_request *req;
|
||||
ssize_t value;
|
||||
|
||||
priv = kmalloc(sizeof *priv, GFP_KERNEL);
|
||||
if (!priv) {
|
||||
value = -ENOMEM;
|
||||
fail:
|
||||
kfree(buf);
|
||||
return value;
|
||||
}
|
||||
iocb->private = priv;
|
||||
priv->iocb = iocb;
|
||||
priv->iv = iv;
|
||||
priv->nr_segs = nr_segs;
|
||||
INIT_WORK(&priv->work, ep_user_copy_worker);
|
||||
|
||||
value = get_ready_ep(iocb->ki_filp->f_flags, epdata);
|
||||
if (unlikely(value < 0)) {
|
||||
kfree(priv);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
kiocb_set_cancel_fn(iocb, ep_aio_cancel);
|
||||
get_ep(epdata);
|
||||
|
@ -669,75 +538,154 @@ fail:
|
|||
* allocate or submit those if the host disconnected.
|
||||
*/
|
||||
spin_lock_irq(&epdata->dev->lock);
|
||||
if (likely(epdata->ep)) {
|
||||
req = usb_ep_alloc_request(epdata->ep, GFP_ATOMIC);
|
||||
if (likely(req)) {
|
||||
priv->req = req;
|
||||
req->buf = buf;
|
||||
req->length = len;
|
||||
req->complete = ep_aio_complete;
|
||||
req->context = iocb;
|
||||
value = usb_ep_queue(epdata->ep, req, GFP_ATOMIC);
|
||||
if (unlikely(0 != value))
|
||||
usb_ep_free_request(epdata->ep, req);
|
||||
} else
|
||||
value = -EAGAIN;
|
||||
} else
|
||||
value = -ENODEV;
|
||||
value = -ENODEV;
|
||||
if (unlikely(epdata->ep))
|
||||
goto fail;
|
||||
|
||||
req = usb_ep_alloc_request(epdata->ep, GFP_ATOMIC);
|
||||
value = -ENOMEM;
|
||||
if (unlikely(!req))
|
||||
goto fail;
|
||||
|
||||
priv->req = req;
|
||||
req->buf = buf;
|
||||
req->length = len;
|
||||
req->complete = ep_aio_complete;
|
||||
req->context = iocb;
|
||||
value = usb_ep_queue(epdata->ep, req, GFP_ATOMIC);
|
||||
if (unlikely(0 != value)) {
|
||||
usb_ep_free_request(epdata->ep, req);
|
||||
goto fail;
|
||||
}
|
||||
spin_unlock_irq(&epdata->dev->lock);
|
||||
return -EIOCBQUEUED;
|
||||
|
||||
mutex_unlock(&epdata->lock);
|
||||
|
||||
if (unlikely(value)) {
|
||||
kfree(priv);
|
||||
put_ep(epdata);
|
||||
} else
|
||||
value = -EIOCBQUEUED;
|
||||
fail:
|
||||
spin_unlock_irq(&epdata->dev->lock);
|
||||
kfree(priv->to_free);
|
||||
kfree(priv);
|
||||
put_ep(epdata);
|
||||
return value;
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
ep_aio_read(struct kiocb *iocb, const struct iovec *iov,
|
||||
unsigned long nr_segs, loff_t o)
|
||||
ep_read_iter(struct kiocb *iocb, struct iov_iter *to)
|
||||
{
|
||||
struct ep_data *epdata = iocb->ki_filp->private_data;
|
||||
char *buf;
|
||||
struct file *file = iocb->ki_filp;
|
||||
struct ep_data *epdata = file->private_data;
|
||||
size_t len = iov_iter_count(to);
|
||||
ssize_t value;
|
||||
char *buf;
|
||||
|
||||
if (unlikely(usb_endpoint_dir_in(&epdata->desc)))
|
||||
return -EINVAL;
|
||||
if ((value = get_ready_ep(file->f_flags, epdata, false)) < 0)
|
||||
return value;
|
||||
|
||||
buf = kmalloc(iocb->ki_nbytes, GFP_KERNEL);
|
||||
if (unlikely(!buf))
|
||||
/* halt any endpoint by doing a "wrong direction" i/o call */
|
||||
if (usb_endpoint_dir_in(&epdata->desc)) {
|
||||
if (usb_endpoint_xfer_isoc(&epdata->desc) ||
|
||||
!is_sync_kiocb(iocb)) {
|
||||
mutex_unlock(&epdata->lock);
|
||||
return -EINVAL;
|
||||
}
|
||||
DBG (epdata->dev, "%s halt\n", epdata->name);
|
||||
spin_lock_irq(&epdata->dev->lock);
|
||||
if (likely(epdata->ep != NULL))
|
||||
usb_ep_set_halt(epdata->ep);
|
||||
spin_unlock_irq(&epdata->dev->lock);
|
||||
mutex_unlock(&epdata->lock);
|
||||
return -EBADMSG;
|
||||
}
|
||||
|
||||
buf = kmalloc(len, GFP_KERNEL);
|
||||
if (unlikely(!buf)) {
|
||||
mutex_unlock(&epdata->lock);
|
||||
return -ENOMEM;
|
||||
|
||||
return ep_aio_rwtail(iocb, buf, iocb->ki_nbytes, epdata, iov, nr_segs);
|
||||
}
|
||||
if (is_sync_kiocb(iocb)) {
|
||||
value = ep_io(epdata, buf, len);
|
||||
if (value >= 0 && copy_to_iter(buf, value, to))
|
||||
value = -EFAULT;
|
||||
} else {
|
||||
struct kiocb_priv *priv = kzalloc(sizeof *priv, GFP_KERNEL);
|
||||
value = -ENOMEM;
|
||||
if (!priv)
|
||||
goto fail;
|
||||
priv->to_free = dup_iter(&priv->to, to, GFP_KERNEL);
|
||||
if (!priv->to_free) {
|
||||
kfree(priv);
|
||||
goto fail;
|
||||
}
|
||||
value = ep_aio(iocb, priv, epdata, buf, len);
|
||||
if (value == -EIOCBQUEUED)
|
||||
buf = NULL;
|
||||
}
|
||||
fail:
|
||||
kfree(buf);
|
||||
mutex_unlock(&epdata->lock);
|
||||
return value;
|
||||
}
|
||||
|
||||
static ssize_t ep_config(struct ep_data *, const char *, size_t);
|
||||
|
||||
static ssize_t
|
||||
ep_aio_write(struct kiocb *iocb, const struct iovec *iov,
|
||||
unsigned long nr_segs, loff_t o)
|
||||
ep_write_iter(struct kiocb *iocb, struct iov_iter *from)
|
||||
{
|
||||
struct ep_data *epdata = iocb->ki_filp->private_data;
|
||||
char *buf;
|
||||
size_t len = 0;
|
||||
int i = 0;
|
||||
struct file *file = iocb->ki_filp;
|
||||
struct ep_data *epdata = file->private_data;
|
||||
size_t len = iov_iter_count(from);
|
||||
bool configured;
|
||||
ssize_t value;
|
||||
char *buf;
|
||||
|
||||
if (unlikely(!usb_endpoint_dir_in(&epdata->desc)))
|
||||
return -EINVAL;
|
||||
if ((value = get_ready_ep(file->f_flags, epdata, true)) < 0)
|
||||
return value;
|
||||
|
||||
buf = kmalloc(iocb->ki_nbytes, GFP_KERNEL);
|
||||
if (unlikely(!buf))
|
||||
return -ENOMEM;
|
||||
configured = epdata->state == STATE_EP_ENABLED;
|
||||
|
||||
for (i=0; i < nr_segs; i++) {
|
||||
if (unlikely(copy_from_user(&buf[len], iov[i].iov_base,
|
||||
iov[i].iov_len) != 0)) {
|
||||
kfree(buf);
|
||||
return -EFAULT;
|
||||
/* halt any endpoint by doing a "wrong direction" i/o call */
|
||||
if (configured && !usb_endpoint_dir_in(&epdata->desc)) {
|
||||
if (usb_endpoint_xfer_isoc(&epdata->desc) ||
|
||||
!is_sync_kiocb(iocb)) {
|
||||
mutex_unlock(&epdata->lock);
|
||||
return -EINVAL;
|
||||
}
|
||||
len += iov[i].iov_len;
|
||||
DBG (epdata->dev, "%s halt\n", epdata->name);
|
||||
spin_lock_irq(&epdata->dev->lock);
|
||||
if (likely(epdata->ep != NULL))
|
||||
usb_ep_set_halt(epdata->ep);
|
||||
spin_unlock_irq(&epdata->dev->lock);
|
||||
mutex_unlock(&epdata->lock);
|
||||
return -EBADMSG;
|
||||
}
|
||||
return ep_aio_rwtail(iocb, buf, len, epdata, NULL, 0);
|
||||
|
||||
buf = kmalloc(len, GFP_KERNEL);
|
||||
if (unlikely(!buf)) {
|
||||
mutex_unlock(&epdata->lock);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if (unlikely(copy_from_iter(buf, len, from) != len)) {
|
||||
value = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (unlikely(!configured)) {
|
||||
value = ep_config(epdata, buf, len);
|
||||
} else if (is_sync_kiocb(iocb)) {
|
||||
value = ep_io(epdata, buf, len);
|
||||
} else {
|
||||
struct kiocb_priv *priv = kzalloc(sizeof *priv, GFP_KERNEL);
|
||||
value = -ENOMEM;
|
||||
if (priv) {
|
||||
value = ep_aio(iocb, priv, epdata, buf, len);
|
||||
if (value == -EIOCBQUEUED)
|
||||
buf = NULL;
|
||||
}
|
||||
}
|
||||
out:
|
||||
kfree(buf);
|
||||
mutex_unlock(&epdata->lock);
|
||||
return value;
|
||||
}
|
||||
|
||||
/*----------------------------------------------------------------------*/
|
||||
|
@ -745,15 +693,15 @@ ep_aio_write(struct kiocb *iocb, const struct iovec *iov,
|
|||
/* used after endpoint configuration */
|
||||
static const struct file_operations ep_io_operations = {
|
||||
.owner = THIS_MODULE,
|
||||
.llseek = no_llseek,
|
||||
|
||||
.read = ep_read,
|
||||
.write = ep_write,
|
||||
.unlocked_ioctl = ep_ioctl,
|
||||
.open = ep_open,
|
||||
.release = ep_release,
|
||||
|
||||
.aio_read = ep_aio_read,
|
||||
.aio_write = ep_aio_write,
|
||||
.llseek = no_llseek,
|
||||
.read = new_sync_read,
|
||||
.write = new_sync_write,
|
||||
.unlocked_ioctl = ep_ioctl,
|
||||
.read_iter = ep_read_iter,
|
||||
.write_iter = ep_write_iter,
|
||||
};
|
||||
|
||||
/* ENDPOINT INITIALIZATION
|
||||
|
@ -770,17 +718,12 @@ static const struct file_operations ep_io_operations = {
|
|||
* speed descriptor, then optional high speed descriptor.
|
||||
*/
|
||||
static ssize_t
|
||||
ep_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
|
||||
ep_config (struct ep_data *data, const char *buf, size_t len)
|
||||
{
|
||||
struct ep_data *data = fd->private_data;
|
||||
struct usb_ep *ep;
|
||||
u32 tag;
|
||||
int value, length = len;
|
||||
|
||||
value = mutex_lock_interruptible(&data->lock);
|
||||
if (value < 0)
|
||||
return value;
|
||||
|
||||
if (data->state != STATE_EP_READY) {
|
||||
value = -EL2HLT;
|
||||
goto fail;
|
||||
|
@ -791,9 +734,7 @@ ep_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
|
|||
goto fail0;
|
||||
|
||||
/* we might need to change message format someday */
|
||||
if (copy_from_user (&tag, buf, 4)) {
|
||||
goto fail1;
|
||||
}
|
||||
memcpy(&tag, buf, 4);
|
||||
if (tag != 1) {
|
||||
DBG(data->dev, "config %s, bad tag %d\n", data->name, tag);
|
||||
goto fail0;
|
||||
|
@ -806,19 +747,15 @@ ep_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
|
|||
*/
|
||||
|
||||
/* full/low speed descriptor, then high speed */
|
||||
if (copy_from_user (&data->desc, buf, USB_DT_ENDPOINT_SIZE)) {
|
||||
goto fail1;
|
||||
}
|
||||
memcpy(&data->desc, buf, USB_DT_ENDPOINT_SIZE);
|
||||
if (data->desc.bLength != USB_DT_ENDPOINT_SIZE
|
||||
|| data->desc.bDescriptorType != USB_DT_ENDPOINT)
|
||||
goto fail0;
|
||||
if (len != USB_DT_ENDPOINT_SIZE) {
|
||||
if (len != 2 * USB_DT_ENDPOINT_SIZE)
|
||||
goto fail0;
|
||||
if (copy_from_user (&data->hs_desc, buf + USB_DT_ENDPOINT_SIZE,
|
||||
USB_DT_ENDPOINT_SIZE)) {
|
||||
goto fail1;
|
||||
}
|
||||
memcpy(&data->hs_desc, buf + USB_DT_ENDPOINT_SIZE,
|
||||
USB_DT_ENDPOINT_SIZE);
|
||||
if (data->hs_desc.bLength != USB_DT_ENDPOINT_SIZE
|
||||
|| data->hs_desc.bDescriptorType
|
||||
!= USB_DT_ENDPOINT) {
|
||||
|
@ -840,24 +777,20 @@ ep_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
|
|||
case USB_SPEED_LOW:
|
||||
case USB_SPEED_FULL:
|
||||
ep->desc = &data->desc;
|
||||
value = usb_ep_enable(ep);
|
||||
if (value == 0)
|
||||
data->state = STATE_EP_ENABLED;
|
||||
break;
|
||||
case USB_SPEED_HIGH:
|
||||
/* fails if caller didn't provide that descriptor... */
|
||||
ep->desc = &data->hs_desc;
|
||||
value = usb_ep_enable(ep);
|
||||
if (value == 0)
|
||||
data->state = STATE_EP_ENABLED;
|
||||
break;
|
||||
default:
|
||||
DBG(data->dev, "unconnected, %s init abandoned\n",
|
||||
data->name);
|
||||
value = -EINVAL;
|
||||
goto gone;
|
||||
}
|
||||
value = usb_ep_enable(ep);
|
||||
if (value == 0) {
|
||||
fd->f_op = &ep_io_operations;
|
||||
data->state = STATE_EP_ENABLED;
|
||||
value = length;
|
||||
}
|
||||
gone:
|
||||
|
@ -867,14 +800,10 @@ fail:
|
|||
data->desc.bDescriptorType = 0;
|
||||
data->hs_desc.bDescriptorType = 0;
|
||||
}
|
||||
mutex_unlock(&data->lock);
|
||||
return value;
|
||||
fail0:
|
||||
value = -EINVAL;
|
||||
goto fail;
|
||||
fail1:
|
||||
value = -EFAULT;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -902,15 +831,6 @@ ep_open (struct inode *inode, struct file *fd)
|
|||
return value;
|
||||
}
|
||||
|
||||
/* used before endpoint configuration */
|
||||
static const struct file_operations ep_config_operations = {
|
||||
.llseek = no_llseek,
|
||||
|
||||
.open = ep_open,
|
||||
.write = ep_config,
|
||||
.release = ep_release,
|
||||
};
|
||||
|
||||
/*----------------------------------------------------------------------*/
|
||||
|
||||
/* EP0 IMPLEMENTATION can be partly in userspace.
|
||||
|
@ -989,6 +909,10 @@ ep0_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr)
|
|||
enum ep0_state state;
|
||||
|
||||
spin_lock_irq (&dev->lock);
|
||||
if (dev->state <= STATE_DEV_OPENED) {
|
||||
retval = -EINVAL;
|
||||
goto done;
|
||||
}
|
||||
|
||||
/* report fd mode change before acting on it */
|
||||
if (dev->setup_abort) {
|
||||
|
@ -1187,8 +1111,6 @@ ep0_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
|
|||
struct dev_data *dev = fd->private_data;
|
||||
ssize_t retval = -ESRCH;
|
||||
|
||||
spin_lock_irq (&dev->lock);
|
||||
|
||||
/* report fd mode change before acting on it */
|
||||
if (dev->setup_abort) {
|
||||
dev->setup_abort = 0;
|
||||
|
@ -1234,7 +1156,6 @@ ep0_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
|
|||
} else
|
||||
DBG (dev, "fail %s, state %d\n", __func__, dev->state);
|
||||
|
||||
spin_unlock_irq (&dev->lock);
|
||||
return retval;
|
||||
}
|
||||
|
||||
|
@ -1281,6 +1202,9 @@ ep0_poll (struct file *fd, poll_table *wait)
|
|||
struct dev_data *dev = fd->private_data;
|
||||
int mask = 0;
|
||||
|
||||
if (dev->state <= STATE_DEV_OPENED)
|
||||
return DEFAULT_POLLMASK;
|
||||
|
||||
poll_wait(fd, &dev->wait, wait);
|
||||
|
||||
spin_lock_irq (&dev->lock);
|
||||
|
@ -1316,19 +1240,6 @@ static long dev_ioctl (struct file *fd, unsigned code, unsigned long value)
|
|||
return ret;
|
||||
}
|
||||
|
||||
/* used after device configuration */
|
||||
static const struct file_operations ep0_io_operations = {
|
||||
.owner = THIS_MODULE,
|
||||
.llseek = no_llseek,
|
||||
|
||||
.read = ep0_read,
|
||||
.write = ep0_write,
|
||||
.fasync = ep0_fasync,
|
||||
.poll = ep0_poll,
|
||||
.unlocked_ioctl = dev_ioctl,
|
||||
.release = dev_release,
|
||||
};
|
||||
|
||||
/*----------------------------------------------------------------------*/
|
||||
|
||||
/* The in-kernel gadget driver handles most ep0 issues, in particular
|
||||
|
@ -1650,7 +1561,7 @@ static int activate_ep_files (struct dev_data *dev)
|
|||
goto enomem1;
|
||||
|
||||
data->dentry = gadgetfs_create_file (dev->sb, data->name,
|
||||
data, &ep_config_operations);
|
||||
data, &ep_io_operations);
|
||||
if (!data->dentry)
|
||||
goto enomem2;
|
||||
list_add_tail (&data->epfiles, &dev->epfiles);
|
||||
|
@ -1852,6 +1763,14 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
|
|||
u32 tag;
|
||||
char *kbuf;
|
||||
|
||||
spin_lock_irq(&dev->lock);
|
||||
if (dev->state > STATE_DEV_OPENED) {
|
||||
value = ep0_write(fd, buf, len, ptr);
|
||||
spin_unlock_irq(&dev->lock);
|
||||
return value;
|
||||
}
|
||||
spin_unlock_irq(&dev->lock);
|
||||
|
||||
if (len < (USB_DT_CONFIG_SIZE + USB_DT_DEVICE_SIZE + 4))
|
||||
return -EINVAL;
|
||||
|
||||
|
@ -1925,7 +1844,6 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
|
|||
* on, they can work ... except in cleanup paths that
|
||||
* kick in after the ep0 descriptor is closed.
|
||||
*/
|
||||
fd->f_op = &ep0_io_operations;
|
||||
value = len;
|
||||
}
|
||||
return value;
|
||||
|
@ -1956,12 +1874,14 @@ dev_open (struct inode *inode, struct file *fd)
|
|||
return value;
|
||||
}
|
||||
|
||||
static const struct file_operations dev_init_operations = {
|
||||
static const struct file_operations ep0_operations = {
|
||||
.llseek = no_llseek,
|
||||
|
||||
.open = dev_open,
|
||||
.read = ep0_read,
|
||||
.write = dev_config,
|
||||
.fasync = ep0_fasync,
|
||||
.poll = ep0_poll,
|
||||
.unlocked_ioctl = dev_ioctl,
|
||||
.release = dev_release,
|
||||
};
|
||||
|
@ -2077,7 +1997,7 @@ gadgetfs_fill_super (struct super_block *sb, void *opts, int silent)
|
|||
goto Enomem;
|
||||
|
||||
dev->sb = sb;
|
||||
dev->dentry = gadgetfs_create_file(sb, CHIP, dev, &dev_init_operations);
|
||||
dev->dentry = gadgetfs_create_file(sb, CHIP, dev, &ep0_operations);
|
||||
if (!dev->dentry) {
|
||||
put_dev(dev);
|
||||
goto Enomem;
|
||||
|
|
|
@ -98,6 +98,8 @@ ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, struct page ***pages,
|
|||
size_t maxsize, size_t *start);
|
||||
int iov_iter_npages(const struct iov_iter *i, int maxpages);
|
||||
|
||||
const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags);
|
||||
|
||||
static inline size_t iov_iter_count(struct iov_iter *i)
|
||||
{
|
||||
return i->count;
|
||||
|
|
|
@ -24,7 +24,7 @@ obj-y += lockref.o
|
|||
|
||||
obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
|
||||
bust_spinlocks.o kasprintf.o bitmap.o scatterlist.o \
|
||||
gcd.o lcm.o list_sort.o uuid.o flex_array.o clz_ctz.o \
|
||||
gcd.o lcm.o list_sort.o uuid.o flex_array.o iov_iter.o clz_ctz.o \
|
||||
bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o kfifo.o \
|
||||
percpu-refcount.o percpu_ida.o rhashtable.o reciprocal_div.o
|
||||
obj-y += string_helpers.o
|
||||
|
|
|
@ -751,3 +751,18 @@ int iov_iter_npages(const struct iov_iter *i, int maxpages)
|
|||
return npages;
|
||||
}
|
||||
EXPORT_SYMBOL(iov_iter_npages);
|
||||
|
||||
const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
|
||||
{
|
||||
*new = *old;
|
||||
if (new->type & ITER_BVEC)
|
||||
return new->bvec = kmemdup(new->bvec,
|
||||
new->nr_segs * sizeof(struct bio_vec),
|
||||
flags);
|
||||
else
|
||||
/* iovec and kvec have identical layout */
|
||||
return new->iov = kmemdup(new->iov,
|
||||
new->nr_segs * sizeof(struct iovec),
|
||||
flags);
|
||||
}
|
||||
EXPORT_SYMBOL(dup_iter);
|
|
@ -21,7 +21,7 @@ obj-y := filemap.o mempool.o oom_kill.o \
|
|||
mm_init.o mmu_context.o percpu.o slab_common.o \
|
||||
compaction.o vmacache.o \
|
||||
interval_tree.o list_lru.o workingset.o \
|
||||
iov_iter.o debug.o $(mmu-y)
|
||||
debug.o $(mmu-y)
|
||||
|
||||
obj-y += init-mm.o
|
||||
|
||||
|
|
Loading…
Reference in New Issue