2010-05-05 18:53:14 +08:00
|
|
|
/*
|
2010-11-12 21:29:28 +08:00
|
|
|
* f_fs.c -- user mode file system API for USB composite function controllers
|
2010-05-05 18:53:14 +08:00
|
|
|
*
|
|
|
|
* Copyright (C) 2010 Samsung Electronics
|
2012-01-13 22:05:16 +08:00
|
|
|
* Author: Michal Nazarewicz <mina86@mina86.com>
|
2010-05-05 18:53:14 +08:00
|
|
|
*
|
2010-11-12 21:29:28 +08:00
|
|
|
* Based on inode.c (GadgetFS) which was:
|
2010-05-05 18:53:14 +08:00
|
|
|
* Copyright (C) 2003-2004 David Brownell
|
|
|
|
* Copyright (C) 2003 Agilent Technologies
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License as published by
|
|
|
|
* the Free Software Foundation; either version 2 of the License, or
|
|
|
|
* (at your option) any later version.
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
|
|
/* #define DEBUG */
|
|
|
|
/* #define VERBOSE_DEBUG */
|
|
|
|
|
|
|
|
#include <linux/blkdev.h>
|
2010-05-11 01:51:36 +08:00
|
|
|
#include <linux/pagemap.h>
|
2011-05-27 21:56:31 +08:00
|
|
|
#include <linux/export.h>
|
2012-05-31 02:43:37 +08:00
|
|
|
#include <linux/hid.h>
|
2013-12-03 22:15:33 +08:00
|
|
|
#include <linux/module.h>
|
2015-02-23 00:58:50 +08:00
|
|
|
#include <linux/uio.h>
|
2010-05-05 18:53:14 +08:00
|
|
|
#include <asm/unaligned.h>
|
|
|
|
|
|
|
|
#include <linux/usb/composite.h>
|
|
|
|
#include <linux/usb/functionfs.h>
|
|
|
|
|
2014-02-10 17:42:44 +08:00
|
|
|
#include <linux/aio.h>
|
|
|
|
#include <linux/mmu_context.h>
|
2014-02-10 17:42:43 +08:00
|
|
|
#include <linux/poll.h>
|
2015-01-23 20:41:01 +08:00
|
|
|
#include <linux/eventfd.h>
|
2014-02-10 17:42:43 +08:00
|
|
|
|
2013-12-03 22:15:31 +08:00
|
|
|
#include "u_fs.h"
|
2014-05-08 20:06:21 +08:00
|
|
|
#include "u_f.h"
|
2014-07-09 18:20:08 +08:00
|
|
|
#include "u_os_desc.h"
|
2013-12-03 22:15:36 +08:00
|
|
|
#include "configfs.h"
|
2010-05-05 18:53:14 +08:00
|
|
|
|
|
|
|
#define FUNCTIONFS_MAGIC 0xa647361 /* Chosen by a honest dice roll ;) */
|
|
|
|
|
|
|
|
/* Reference counter handling */
|
|
|
|
static void ffs_data_get(struct ffs_data *ffs);
|
|
|
|
static void ffs_data_put(struct ffs_data *ffs);
|
|
|
|
/* Creates new ffs_data object. */
|
|
|
|
static struct ffs_data *__must_check ffs_data_new(void) __attribute__((malloc));
|
|
|
|
|
|
|
|
/* Opened counter handling. */
|
|
|
|
static void ffs_data_opened(struct ffs_data *ffs);
|
|
|
|
static void ffs_data_closed(struct ffs_data *ffs);
|
|
|
|
|
2010-11-12 21:29:28 +08:00
|
|
|
/* Called with ffs->mutex held; take over ownership of data. */
|
2010-05-05 18:53:14 +08:00
|
|
|
static int __must_check
|
|
|
|
__ffs_data_got_descs(struct ffs_data *ffs, char *data, size_t len);
|
|
|
|
static int __must_check
|
|
|
|
__ffs_data_got_strings(struct ffs_data *ffs, char *data, size_t len);
|
|
|
|
|
|
|
|
|
|
|
|
/* The function structure ***************************************************/
|
|
|
|
|
|
|
|
struct ffs_ep;
|
|
|
|
|
|
|
|
struct ffs_function {
|
|
|
|
struct usb_configuration *conf;
|
|
|
|
struct usb_gadget *gadget;
|
|
|
|
struct ffs_data *ffs;
|
|
|
|
|
|
|
|
struct ffs_ep *eps;
|
|
|
|
u8 eps_revmap[16];
|
|
|
|
short *interfaces_nums;
|
|
|
|
|
|
|
|
struct usb_function function;
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
static struct ffs_function *ffs_func_from_usb(struct usb_function *f)
|
|
|
|
{
|
|
|
|
return container_of(f, struct ffs_function, function);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2014-02-10 17:42:41 +08:00
|
|
|
static inline enum ffs_setup_state
|
|
|
|
ffs_setup_state_clear_cancelled(struct ffs_data *ffs)
|
|
|
|
{
|
|
|
|
return (enum ffs_setup_state)
|
|
|
|
cmpxchg(&ffs->setup_state, FFS_SETUP_CANCELLED, FFS_NO_SETUP);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-05-05 18:53:14 +08:00
|
|
|
static void ffs_func_eps_disable(struct ffs_function *func);
|
|
|
|
static int __must_check ffs_func_eps_enable(struct ffs_function *func);
|
|
|
|
|
|
|
|
static int ffs_func_bind(struct usb_configuration *,
|
|
|
|
struct usb_function *);
|
|
|
|
static int ffs_func_set_alt(struct usb_function *, unsigned, unsigned);
|
|
|
|
static void ffs_func_disable(struct usb_function *);
|
|
|
|
static int ffs_func_setup(struct usb_function *,
|
|
|
|
const struct usb_ctrlrequest *);
|
|
|
|
static void ffs_func_suspend(struct usb_function *);
|
|
|
|
static void ffs_func_resume(struct usb_function *);
|
|
|
|
|
|
|
|
|
|
|
|
static int ffs_func_revmap_ep(struct ffs_function *func, u8 num);
|
|
|
|
static int ffs_func_revmap_intf(struct ffs_function *func, u8 intf);
|
|
|
|
|
|
|
|
|
|
|
|
/* The endpoints structures *************************************************/
|
|
|
|
|
|
|
|
struct ffs_ep {
|
|
|
|
struct usb_ep *ep; /* P: ffs->eps_lock */
|
|
|
|
struct usb_request *req; /* P: epfile->mutex */
|
|
|
|
|
2014-02-28 19:20:22 +08:00
|
|
|
/* [0]: full speed, [1]: high speed, [2]: super speed */
|
|
|
|
struct usb_endpoint_descriptor *descs[3];
|
2010-05-05 18:53:14 +08:00
|
|
|
|
|
|
|
u8 num;
|
|
|
|
|
|
|
|
int status; /* P: epfile->mutex */
|
|
|
|
};
|
|
|
|
|
|
|
|
struct ffs_epfile {
|
|
|
|
/* Protects ep->ep and ep->req. */
|
|
|
|
struct mutex mutex;
|
|
|
|
wait_queue_head_t wait;
|
|
|
|
|
|
|
|
struct ffs_data *ffs;
|
|
|
|
struct ffs_ep *ep; /* P: ffs->eps_lock */
|
|
|
|
|
|
|
|
struct dentry *dentry;
|
|
|
|
|
|
|
|
char name[5];
|
|
|
|
|
|
|
|
unsigned char in; /* P: ffs->eps_lock */
|
|
|
|
unsigned char isoc; /* P: ffs->eps_lock */
|
|
|
|
|
|
|
|
unsigned char _pad;
|
|
|
|
};
|
|
|
|
|
2014-02-10 17:42:44 +08:00
|
|
|
/* ffs_io_data structure ***************************************************/
|
|
|
|
|
|
|
|
struct ffs_io_data {
|
|
|
|
bool aio;
|
|
|
|
bool read;
|
|
|
|
|
|
|
|
struct kiocb *kiocb;
|
2015-02-01 12:23:35 +08:00
|
|
|
struct iov_iter data;
|
|
|
|
const void *to_free;
|
|
|
|
char *buf;
|
2014-02-10 17:42:44 +08:00
|
|
|
|
|
|
|
struct mm_struct *mm;
|
|
|
|
struct work_struct work;
|
|
|
|
|
|
|
|
struct usb_ep *ep;
|
|
|
|
struct usb_request *req;
|
2015-01-23 20:41:01 +08:00
|
|
|
|
|
|
|
struct ffs_data *ffs;
|
2014-02-10 17:42:44 +08:00
|
|
|
};
|
|
|
|
|
2014-08-25 17:16:27 +08:00
|
|
|
struct ffs_desc_helper {
|
|
|
|
struct ffs_data *ffs;
|
|
|
|
unsigned interfaces_count;
|
|
|
|
unsigned eps_count;
|
|
|
|
};
|
|
|
|
|
2010-05-05 18:53:14 +08:00
|
|
|
static int __must_check ffs_epfiles_create(struct ffs_data *ffs);
|
|
|
|
static void ffs_epfiles_destroy(struct ffs_epfile *epfiles, unsigned count);
|
|
|
|
|
2014-09-04 01:32:19 +08:00
|
|
|
static struct dentry *
|
2010-05-05 18:53:14 +08:00
|
|
|
ffs_sb_create_file(struct super_block *sb, const char *name, void *data,
|
2014-09-04 01:32:19 +08:00
|
|
|
const struct file_operations *fops);
|
2010-05-05 18:53:14 +08:00
|
|
|
|
2013-12-03 22:15:32 +08:00
|
|
|
/* Devices management *******************************************************/
|
|
|
|
|
|
|
|
DEFINE_MUTEX(ffs_lock);
|
2014-04-02 02:19:32 +08:00
|
|
|
EXPORT_SYMBOL_GPL(ffs_lock);
|
2013-12-03 22:15:32 +08:00
|
|
|
|
2014-01-13 23:49:38 +08:00
|
|
|
static struct ffs_dev *_ffs_find_dev(const char *name);
|
|
|
|
static struct ffs_dev *_ffs_alloc_dev(void);
|
2013-12-03 22:15:36 +08:00
|
|
|
static int _ffs_name_dev(struct ffs_dev *dev, const char *name);
|
2014-01-13 23:49:38 +08:00
|
|
|
static void _ffs_free_dev(struct ffs_dev *dev);
|
2013-12-03 22:15:32 +08:00
|
|
|
static void *ffs_acquire_dev(const char *dev_name);
|
|
|
|
static void ffs_release_dev(struct ffs_data *ffs_data);
|
|
|
|
static int ffs_ready(struct ffs_data *ffs);
|
|
|
|
static void ffs_closed(struct ffs_data *ffs);
|
2010-05-05 18:53:14 +08:00
|
|
|
|
|
|
|
/* Misc helper functions ****************************************************/
|
|
|
|
|
|
|
|
static int ffs_mutex_lock(struct mutex *mutex, unsigned nonblock)
|
|
|
|
__attribute__((warn_unused_result, nonnull));
|
2012-09-27 09:43:45 +08:00
|
|
|
static char *ffs_prepare_buffer(const char __user *buf, size_t len)
|
2010-05-05 18:53:14 +08:00
|
|
|
__attribute__((warn_unused_result, nonnull));
|
|
|
|
|
|
|
|
|
|
|
|
/* Control file aka ep0 *****************************************************/
|
|
|
|
|
|
|
|
static void ffs_ep0_complete(struct usb_ep *ep, struct usb_request *req)
|
|
|
|
{
|
|
|
|
struct ffs_data *ffs = req->context;
|
|
|
|
|
|
|
|
complete_all(&ffs->ep0req_completion);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int __ffs_ep0_queue_wait(struct ffs_data *ffs, char *data, size_t len)
|
|
|
|
{
|
|
|
|
struct usb_request *req = ffs->ep0req;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
req->zero = len < le16_to_cpu(ffs->ev.setup.wLength);
|
|
|
|
|
|
|
|
spin_unlock_irq(&ffs->ev.waitq.lock);
|
|
|
|
|
|
|
|
req->buf = data;
|
|
|
|
req->length = len;
|
|
|
|
|
2011-01-28 20:55:36 +08:00
|
|
|
/*
|
|
|
|
* UDC layer requires to provide a buffer even for ZLP, but should
|
|
|
|
* not use it at all. Let's provide some poisoned pointer to catch
|
|
|
|
* possible bug in the driver.
|
|
|
|
*/
|
|
|
|
if (req->buf == NULL)
|
|
|
|
req->buf = (void *)0xDEADBABE;
|
|
|
|
|
2013-11-15 06:32:02 +08:00
|
|
|
reinit_completion(&ffs->ep0req_completion);
|
2010-05-05 18:53:14 +08:00
|
|
|
|
|
|
|
ret = usb_ep_queue(ffs->gadget->ep0, req, GFP_ATOMIC);
|
|
|
|
if (unlikely(ret < 0))
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
ret = wait_for_completion_interruptible(&ffs->ep0req_completion);
|
|
|
|
if (unlikely(ret)) {
|
|
|
|
usb_ep_dequeue(ffs->gadget->ep0, req);
|
|
|
|
return -EINTR;
|
|
|
|
}
|
|
|
|
|
|
|
|
ffs->setup_state = FFS_NO_SETUP;
|
2014-02-10 17:42:42 +08:00
|
|
|
return req->status ? req->status : req->actual;
|
2010-05-05 18:53:14 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int __ffs_ep0_stall(struct ffs_data *ffs)
|
|
|
|
{
|
|
|
|
if (ffs->ev.can_stall) {
|
2010-11-18 00:09:47 +08:00
|
|
|
pr_vdebug("ep0 stall\n");
|
2010-05-05 18:53:14 +08:00
|
|
|
usb_ep_set_halt(ffs->gadget->ep0);
|
|
|
|
ffs->setup_state = FFS_NO_SETUP;
|
|
|
|
return -EL2HLT;
|
|
|
|
} else {
|
2010-11-18 00:09:47 +08:00
|
|
|
pr_debug("bogus ep0 stall!\n");
|
2010-05-05 18:53:14 +08:00
|
|
|
return -ESRCH;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t ffs_ep0_write(struct file *file, const char __user *buf,
|
|
|
|
size_t len, loff_t *ptr)
|
|
|
|
{
|
|
|
|
struct ffs_data *ffs = file->private_data;
|
|
|
|
ssize_t ret;
|
|
|
|
char *data;
|
|
|
|
|
|
|
|
ENTER();
|
|
|
|
|
|
|
|
/* Fast check if setup was canceled */
|
2014-02-10 17:42:41 +08:00
|
|
|
if (ffs_setup_state_clear_cancelled(ffs) == FFS_SETUP_CANCELLED)
|
2010-05-05 18:53:14 +08:00
|
|
|
return -EIDRM;
|
|
|
|
|
|
|
|
/* Acquire mutex */
|
|
|
|
ret = ffs_mutex_lock(&ffs->mutex, file->f_flags & O_NONBLOCK);
|
|
|
|
if (unlikely(ret < 0))
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
/* Check state */
|
|
|
|
switch (ffs->state) {
|
|
|
|
case FFS_READ_DESCRIPTORS:
|
|
|
|
case FFS_READ_STRINGS:
|
|
|
|
/* Copy data */
|
|
|
|
if (unlikely(len < 16)) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
data = ffs_prepare_buffer(buf, len);
|
2010-12-09 22:52:39 +08:00
|
|
|
if (IS_ERR(data)) {
|
2010-05-05 18:53:14 +08:00
|
|
|
ret = PTR_ERR(data);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Handle data */
|
|
|
|
if (ffs->state == FFS_READ_DESCRIPTORS) {
|
2010-11-18 00:09:47 +08:00
|
|
|
pr_info("read descriptors\n");
|
2010-05-05 18:53:14 +08:00
|
|
|
ret = __ffs_data_got_descs(ffs, data, len);
|
|
|
|
if (unlikely(ret < 0))
|
|
|
|
break;
|
|
|
|
|
|
|
|
ffs->state = FFS_READ_STRINGS;
|
|
|
|
ret = len;
|
|
|
|
} else {
|
2010-11-18 00:09:47 +08:00
|
|
|
pr_info("read strings\n");
|
2010-05-05 18:53:14 +08:00
|
|
|
ret = __ffs_data_got_strings(ffs, data, len);
|
|
|
|
if (unlikely(ret < 0))
|
|
|
|
break;
|
|
|
|
|
|
|
|
ret = ffs_epfiles_create(ffs);
|
|
|
|
if (unlikely(ret)) {
|
|
|
|
ffs->state = FFS_CLOSING;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
ffs->state = FFS_ACTIVE;
|
|
|
|
mutex_unlock(&ffs->mutex);
|
|
|
|
|
2013-12-03 22:15:32 +08:00
|
|
|
ret = ffs_ready(ffs);
|
2010-05-05 18:53:14 +08:00
|
|
|
if (unlikely(ret < 0)) {
|
|
|
|
ffs->state = FFS_CLOSING;
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
return len;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case FFS_ACTIVE:
|
|
|
|
data = NULL;
|
2010-11-12 21:29:28 +08:00
|
|
|
/*
|
|
|
|
* We're called from user space, we can use _irq
|
|
|
|
* rather then _irqsave
|
|
|
|
*/
|
2010-05-05 18:53:14 +08:00
|
|
|
spin_lock_irq(&ffs->ev.waitq.lock);
|
2014-02-10 17:42:41 +08:00
|
|
|
switch (ffs_setup_state_clear_cancelled(ffs)) {
|
2014-02-10 17:42:40 +08:00
|
|
|
case FFS_SETUP_CANCELLED:
|
2010-05-05 18:53:14 +08:00
|
|
|
ret = -EIDRM;
|
|
|
|
goto done_spin;
|
|
|
|
|
|
|
|
case FFS_NO_SETUP:
|
|
|
|
ret = -ESRCH;
|
|
|
|
goto done_spin;
|
|
|
|
|
|
|
|
case FFS_SETUP_PENDING:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* FFS_SETUP_PENDING */
|
|
|
|
if (!(ffs->ev.setup.bRequestType & USB_DIR_IN)) {
|
|
|
|
spin_unlock_irq(&ffs->ev.waitq.lock);
|
|
|
|
ret = __ffs_ep0_stall(ffs);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* FFS_SETUP_PENDING and not stall */
|
|
|
|
len = min(len, (size_t)le16_to_cpu(ffs->ev.setup.wLength));
|
|
|
|
|
|
|
|
spin_unlock_irq(&ffs->ev.waitq.lock);
|
|
|
|
|
|
|
|
data = ffs_prepare_buffer(buf, len);
|
2010-12-09 22:52:39 +08:00
|
|
|
if (IS_ERR(data)) {
|
2010-05-05 18:53:14 +08:00
|
|
|
ret = PTR_ERR(data);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
spin_lock_irq(&ffs->ev.waitq.lock);
|
|
|
|
|
2010-11-12 21:29:28 +08:00
|
|
|
/*
|
|
|
|
* We are guaranteed to be still in FFS_ACTIVE state
|
2010-05-05 18:53:14 +08:00
|
|
|
* but the state of setup could have changed from
|
2014-02-10 17:42:40 +08:00
|
|
|
* FFS_SETUP_PENDING to FFS_SETUP_CANCELLED so we need
|
2010-05-05 18:53:14 +08:00
|
|
|
* to check for that. If that happened we copied data
|
2010-11-12 21:29:28 +08:00
|
|
|
* from user space in vain but it's unlikely.
|
|
|
|
*
|
|
|
|
* For sure we are not in FFS_NO_SETUP since this is
|
2010-05-05 18:53:14 +08:00
|
|
|
* the only place FFS_SETUP_PENDING -> FFS_NO_SETUP
|
|
|
|
* transition can be performed and it's protected by
|
2010-11-12 21:29:28 +08:00
|
|
|
* mutex.
|
|
|
|
*/
|
2014-02-10 17:42:41 +08:00
|
|
|
if (ffs_setup_state_clear_cancelled(ffs) ==
|
|
|
|
FFS_SETUP_CANCELLED) {
|
2010-05-05 18:53:14 +08:00
|
|
|
ret = -EIDRM;
|
|
|
|
done_spin:
|
|
|
|
spin_unlock_irq(&ffs->ev.waitq.lock);
|
|
|
|
} else {
|
|
|
|
/* unlocks spinlock */
|
|
|
|
ret = __ffs_ep0_queue_wait(ffs, data, len);
|
|
|
|
}
|
|
|
|
kfree(data);
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
ret = -EBADFD;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
mutex_unlock(&ffs->mutex);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2014-09-10 23:50:24 +08:00
|
|
|
/* Called with ffs->ev.waitq.lock and ffs->mutex held, both released on exit. */
|
2010-05-05 18:53:14 +08:00
|
|
|
static ssize_t __ffs_ep0_read_events(struct ffs_data *ffs, char __user *buf,
|
|
|
|
size_t n)
|
|
|
|
{
|
2010-11-12 21:29:28 +08:00
|
|
|
/*
|
2014-09-10 23:50:24 +08:00
|
|
|
* n cannot be bigger than ffs->ev.count, which cannot be bigger than
|
|
|
|
* size of ffs->ev.types array (which is four) so that's how much space
|
|
|
|
* we reserve.
|
2010-11-12 21:29:28 +08:00
|
|
|
*/
|
2014-09-10 23:50:24 +08:00
|
|
|
struct usb_functionfs_event events[ARRAY_SIZE(ffs->ev.types)];
|
|
|
|
const size_t size = n * sizeof *events;
|
2010-05-05 18:53:14 +08:00
|
|
|
unsigned i = 0;
|
|
|
|
|
2014-09-10 23:50:24 +08:00
|
|
|
memset(events, 0, size);
|
2010-05-05 18:53:14 +08:00
|
|
|
|
|
|
|
do {
|
|
|
|
events[i].type = ffs->ev.types[i];
|
|
|
|
if (events[i].type == FUNCTIONFS_SETUP) {
|
|
|
|
events[i].u.setup = ffs->ev.setup;
|
|
|
|
ffs->setup_state = FFS_SETUP_PENDING;
|
|
|
|
}
|
|
|
|
} while (++i < n);
|
|
|
|
|
2014-09-10 23:50:24 +08:00
|
|
|
ffs->ev.count -= n;
|
|
|
|
if (ffs->ev.count)
|
2010-05-05 18:53:14 +08:00
|
|
|
memmove(ffs->ev.types, ffs->ev.types + n,
|
|
|
|
ffs->ev.count * sizeof *ffs->ev.types);
|
|
|
|
|
|
|
|
spin_unlock_irq(&ffs->ev.waitq.lock);
|
|
|
|
mutex_unlock(&ffs->mutex);
|
|
|
|
|
2015-11-19 00:15:49 +08:00
|
|
|
return unlikely(copy_to_user(buf, events, size)) ? -EFAULT : size;
|
2010-05-05 18:53:14 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t ffs_ep0_read(struct file *file, char __user *buf,
|
|
|
|
size_t len, loff_t *ptr)
|
|
|
|
{
|
|
|
|
struct ffs_data *ffs = file->private_data;
|
|
|
|
char *data = NULL;
|
|
|
|
size_t n;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ENTER();
|
|
|
|
|
|
|
|
/* Fast check if setup was canceled */
|
2014-02-10 17:42:41 +08:00
|
|
|
if (ffs_setup_state_clear_cancelled(ffs) == FFS_SETUP_CANCELLED)
|
2010-05-05 18:53:14 +08:00
|
|
|
return -EIDRM;
|
|
|
|
|
|
|
|
/* Acquire mutex */
|
|
|
|
ret = ffs_mutex_lock(&ffs->mutex, file->f_flags & O_NONBLOCK);
|
|
|
|
if (unlikely(ret < 0))
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
/* Check state */
|
|
|
|
if (ffs->state != FFS_ACTIVE) {
|
|
|
|
ret = -EBADFD;
|
|
|
|
goto done_mutex;
|
|
|
|
}
|
|
|
|
|
2010-11-12 21:29:28 +08:00
|
|
|
/*
|
|
|
|
* We're called from user space, we can use _irq rather then
|
|
|
|
* _irqsave
|
|
|
|
*/
|
2010-05-05 18:53:14 +08:00
|
|
|
spin_lock_irq(&ffs->ev.waitq.lock);
|
|
|
|
|
2014-02-10 17:42:41 +08:00
|
|
|
switch (ffs_setup_state_clear_cancelled(ffs)) {
|
2014-02-10 17:42:40 +08:00
|
|
|
case FFS_SETUP_CANCELLED:
|
2010-05-05 18:53:14 +08:00
|
|
|
ret = -EIDRM;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case FFS_NO_SETUP:
|
|
|
|
n = len / sizeof(struct usb_functionfs_event);
|
|
|
|
if (unlikely(!n)) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((file->f_flags & O_NONBLOCK) && !ffs->ev.count) {
|
|
|
|
ret = -EAGAIN;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2010-11-12 21:29:28 +08:00
|
|
|
if (wait_event_interruptible_exclusive_locked_irq(ffs->ev.waitq,
|
|
|
|
ffs->ev.count)) {
|
2010-05-05 18:53:14 +08:00
|
|
|
ret = -EINTR;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return __ffs_ep0_read_events(ffs, buf,
|
|
|
|
min(n, (size_t)ffs->ev.count));
|
|
|
|
|
|
|
|
case FFS_SETUP_PENDING:
|
|
|
|
if (ffs->ev.setup.bRequestType & USB_DIR_IN) {
|
|
|
|
spin_unlock_irq(&ffs->ev.waitq.lock);
|
|
|
|
ret = __ffs_ep0_stall(ffs);
|
|
|
|
goto done_mutex;
|
|
|
|
}
|
|
|
|
|
|
|
|
len = min(len, (size_t)le16_to_cpu(ffs->ev.setup.wLength));
|
|
|
|
|
|
|
|
spin_unlock_irq(&ffs->ev.waitq.lock);
|
|
|
|
|
|
|
|
if (likely(len)) {
|
|
|
|
data = kmalloc(len, GFP_KERNEL);
|
|
|
|
if (unlikely(!data)) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto done_mutex;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
spin_lock_irq(&ffs->ev.waitq.lock);
|
|
|
|
|
|
|
|
/* See ffs_ep0_write() */
|
2014-02-10 17:42:41 +08:00
|
|
|
if (ffs_setup_state_clear_cancelled(ffs) ==
|
|
|
|
FFS_SETUP_CANCELLED) {
|
2010-05-05 18:53:14 +08:00
|
|
|
ret = -EIDRM;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* unlocks spinlock */
|
|
|
|
ret = __ffs_ep0_queue_wait(ffs, data, len);
|
2015-11-19 00:15:49 +08:00
|
|
|
if (likely(ret > 0) && unlikely(copy_to_user(buf, data, len)))
|
2010-05-05 18:53:14 +08:00
|
|
|
ret = -EFAULT;
|
|
|
|
goto done_mutex;
|
|
|
|
|
|
|
|
default:
|
|
|
|
ret = -EBADFD;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
spin_unlock_irq(&ffs->ev.waitq.lock);
|
|
|
|
done_mutex:
|
|
|
|
mutex_unlock(&ffs->mutex);
|
|
|
|
kfree(data);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ffs_ep0_open(struct inode *inode, struct file *file)
|
|
|
|
{
|
|
|
|
struct ffs_data *ffs = inode->i_private;
|
|
|
|
|
|
|
|
ENTER();
|
|
|
|
|
|
|
|
if (unlikely(ffs->state == FFS_CLOSING))
|
|
|
|
return -EBUSY;
|
|
|
|
|
|
|
|
file->private_data = ffs;
|
|
|
|
ffs_data_opened(ffs);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ffs_ep0_release(struct inode *inode, struct file *file)
|
|
|
|
{
|
|
|
|
struct ffs_data *ffs = file->private_data;
|
|
|
|
|
|
|
|
ENTER();
|
|
|
|
|
|
|
|
ffs_data_closed(ffs);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static long ffs_ep0_ioctl(struct file *file, unsigned code, unsigned long value)
|
|
|
|
{
|
|
|
|
struct ffs_data *ffs = file->private_data;
|
|
|
|
struct usb_gadget *gadget = ffs->gadget;
|
|
|
|
long ret;
|
|
|
|
|
|
|
|
ENTER();
|
|
|
|
|
|
|
|
if (code == FUNCTIONFS_INTERFACE_REVMAP) {
|
|
|
|
struct ffs_function *func = ffs->func;
|
|
|
|
ret = func ? ffs_func_revmap_intf(func, value) : -ENODEV;
|
2012-03-28 15:30:50 +08:00
|
|
|
} else if (gadget && gadget->ops->ioctl) {
|
2010-05-05 18:53:14 +08:00
|
|
|
ret = gadget->ops->ioctl(gadget, code, value);
|
|
|
|
} else {
|
|
|
|
ret = -ENOTTY;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2014-02-10 17:42:43 +08:00
|
|
|
static unsigned int ffs_ep0_poll(struct file *file, poll_table *wait)
|
|
|
|
{
|
|
|
|
struct ffs_data *ffs = file->private_data;
|
|
|
|
unsigned int mask = POLLWRNORM;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
poll_wait(file, &ffs->ev.waitq, wait);
|
|
|
|
|
|
|
|
ret = ffs_mutex_lock(&ffs->mutex, file->f_flags & O_NONBLOCK);
|
|
|
|
if (unlikely(ret < 0))
|
|
|
|
return mask;
|
|
|
|
|
|
|
|
switch (ffs->state) {
|
|
|
|
case FFS_READ_DESCRIPTORS:
|
|
|
|
case FFS_READ_STRINGS:
|
|
|
|
mask |= POLLOUT;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case FFS_ACTIVE:
|
|
|
|
switch (ffs->setup_state) {
|
|
|
|
case FFS_NO_SETUP:
|
|
|
|
if (ffs->ev.count)
|
|
|
|
mask |= POLLIN;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case FFS_SETUP_PENDING:
|
|
|
|
case FFS_SETUP_CANCELLED:
|
|
|
|
mask |= (POLLIN | POLLOUT);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case FFS_CLOSING:
|
|
|
|
break;
|
usb: gadget: f_fs: add "no_disconnect" mode
Since we can compose gadgets from many functions, there is the problem
related to gadget breakage while FunctionFS daemon being closed. FFS
function is userspace code so there is no way to know when it will close
files (it doesn't matter what is the reason of this situation, it can
be daemon logic, program breakage, process kill or any other). So when
we have another function in gadget which, for example, sends some amount
of data, does some software update or implements some real-time functionality,
we may want to keep the gadget connected despite FFS function is no longer
functional.
We can't just remove one of functions from gadget since it has been
enumerated, so the only way to keep entire gadget working is to make
broken FFS function deactivated but still visible to host. For this
purpose this patch introduces "no_disconnect" mode. It can be enabled
by setting mount option "no_disconnect=1", and results with defering
function disconnect to the moment of reopen ep0 file or filesystem
unmount. After closing all endpoint files, FunctionFS is set to state
FFS_DEACTIVATED.
When ffs->state == FFS_DEACTIVATED:
- function is still bound and visible to host,
- setup requests are automatically stalled,
- transfers on other endpoints are refused,
- epfiles, except ep0, are deleted from the filesystem,
- opening ep0 causes the function to be closed, and then FunctionFS
is ready for descriptors and string write,
- altsetting change causes the function to be closed - we want to keep
function alive until another functions are potentialy used, altsetting
change means that another configuration is being selected or USB cable
was unplugged, which indicates that we don't need to stay longer in
FFS_DEACTIVATED state
- unmounting of the FunctionFS instance causes the function to be closed.
Tested-by: David Cohen <david.a.cohen@linux.intel.com>
Acked-by: Michal Nazarewicz <mina86@mina86.com>
Signed-off-by: Robert Baldyga <r.baldyga@samsung.com>
Signed-off-by: Felipe Balbi <balbi@ti.com>
2014-12-18 16:55:10 +08:00
|
|
|
case FFS_DEACTIVATED:
|
|
|
|
break;
|
2014-02-10 17:42:43 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
mutex_unlock(&ffs->mutex);
|
|
|
|
|
|
|
|
return mask;
|
|
|
|
}
|
|
|
|
|
2010-05-05 18:53:14 +08:00
|
|
|
static const struct file_operations ffs_ep0_operations = {
|
|
|
|
.llseek = no_llseek,
|
|
|
|
|
|
|
|
.open = ffs_ep0_open,
|
|
|
|
.write = ffs_ep0_write,
|
|
|
|
.read = ffs_ep0_read,
|
|
|
|
.release = ffs_ep0_release,
|
|
|
|
.unlocked_ioctl = ffs_ep0_ioctl,
|
2014-02-10 17:42:43 +08:00
|
|
|
.poll = ffs_ep0_poll,
|
2010-05-05 18:53:14 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
/* "Normal" endpoints operations ********************************************/
|
|
|
|
|
|
|
|
static void ffs_epfile_io_complete(struct usb_ep *_ep, struct usb_request *req)
|
|
|
|
{
|
|
|
|
ENTER();
|
|
|
|
if (likely(req->context)) {
|
|
|
|
struct ffs_ep *ep = _ep->driver_data;
|
|
|
|
ep->status = req->status ? req->status : req->actual;
|
|
|
|
complete(req->context);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-02-10 17:42:44 +08:00
|
|
|
static void ffs_user_copy_worker(struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct ffs_io_data *io_data = container_of(work, struct ffs_io_data,
|
|
|
|
work);
|
|
|
|
int ret = io_data->req->status ? io_data->req->status :
|
|
|
|
io_data->req->actual;
|
2016-04-14 23:01:17 +08:00
|
|
|
bool kiocb_has_eventfd = io_data->kiocb->ki_flags & IOCB_EVENTFD;
|
2014-02-10 17:42:44 +08:00
|
|
|
|
|
|
|
if (io_data->read && ret > 0) {
|
|
|
|
use_mm(io_data->mm);
|
2015-02-01 12:23:35 +08:00
|
|
|
ret = copy_to_iter(io_data->buf, ret, &io_data->data);
|
2016-03-30 19:49:14 +08:00
|
|
|
if (ret != io_data->req->actual && iov_iter_count(&io_data->data))
|
2015-02-01 12:23:35 +08:00
|
|
|
ret = -EFAULT;
|
2014-02-10 17:42:44 +08:00
|
|
|
unuse_mm(io_data->mm);
|
|
|
|
}
|
|
|
|
|
2015-02-02 21:49:06 +08:00
|
|
|
io_data->kiocb->ki_complete(io_data->kiocb, ret, ret);
|
2014-02-10 17:42:44 +08:00
|
|
|
|
2016-04-14 23:01:17 +08:00
|
|
|
if (io_data->ffs->ffs_eventfd && !kiocb_has_eventfd)
|
2015-01-23 20:41:01 +08:00
|
|
|
eventfd_signal(io_data->ffs->ffs_eventfd, 1);
|
|
|
|
|
2014-02-10 17:42:44 +08:00
|
|
|
usb_ep_free_request(io_data->ep, io_data->req);
|
|
|
|
|
|
|
|
if (io_data->read)
|
2015-02-01 12:23:35 +08:00
|
|
|
kfree(io_data->to_free);
|
2014-02-10 17:42:44 +08:00
|
|
|
kfree(io_data->buf);
|
|
|
|
kfree(io_data);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void ffs_epfile_async_io_complete(struct usb_ep *_ep,
|
|
|
|
struct usb_request *req)
|
|
|
|
{
|
|
|
|
struct ffs_io_data *io_data = req->context;
|
|
|
|
|
|
|
|
ENTER();
|
|
|
|
|
|
|
|
INIT_WORK(&io_data->work, ffs_user_copy_worker);
|
|
|
|
schedule_work(&io_data->work);
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
|
2010-05-05 18:53:14 +08:00
|
|
|
{
|
|
|
|
struct ffs_epfile *epfile = file->private_data;
|
2016-01-05 04:05:59 +08:00
|
|
|
struct usb_request *req;
|
2010-05-05 18:53:14 +08:00
|
|
|
struct ffs_ep *ep;
|
|
|
|
char *data = NULL;
|
2014-10-14 02:15:54 +08:00
|
|
|
ssize_t ret, data_len = -EINVAL;
|
2010-05-05 18:53:14 +08:00
|
|
|
int halt;
|
|
|
|
|
2013-12-10 07:55:36 +08:00
|
|
|
/* Are we still active? */
|
2016-01-05 03:58:12 +08:00
|
|
|
if (WARN_ON(epfile->ffs->state != FFS_ACTIVE))
|
|
|
|
return -ENODEV;
|
2010-05-05 18:53:14 +08:00
|
|
|
|
2013-12-10 07:55:36 +08:00
|
|
|
/* Wait for endpoint to be enabled */
|
|
|
|
ep = epfile->ep;
|
|
|
|
if (!ep) {
|
2016-01-05 03:58:12 +08:00
|
|
|
if (file->f_flags & O_NONBLOCK)
|
|
|
|
return -EAGAIN;
|
2010-05-05 18:53:14 +08:00
|
|
|
|
2013-12-10 07:55:36 +08:00
|
|
|
ret = wait_event_interruptible(epfile->wait, (ep = epfile->ep));
|
2016-01-05 03:58:12 +08:00
|
|
|
if (ret)
|
|
|
|
return -EINTR;
|
2013-12-10 07:55:36 +08:00
|
|
|
}
|
2010-05-05 18:53:14 +08:00
|
|
|
|
2013-12-10 07:55:36 +08:00
|
|
|
/* Do we halt? */
|
2014-02-10 17:42:44 +08:00
|
|
|
halt = (!io_data->read == !epfile->in);
|
2016-01-05 03:58:12 +08:00
|
|
|
if (halt && epfile->isoc)
|
|
|
|
return -EINVAL;
|
2010-05-05 18:53:14 +08:00
|
|
|
|
2013-12-10 07:55:36 +08:00
|
|
|
/* Allocate & copy */
|
|
|
|
if (!halt) {
|
2014-01-20 15:33:50 +08:00
|
|
|
/*
|
|
|
|
* if we _do_ wait above, the epfile->ffs->gadget might be NULL
|
2016-01-05 04:05:59 +08:00
|
|
|
* before the waiting completes, so do not assign to 'gadget'
|
|
|
|
* earlier
|
2014-01-20 15:33:50 +08:00
|
|
|
*/
|
|
|
|
struct usb_gadget *gadget = epfile->ffs->gadget;
|
2015-02-01 12:23:35 +08:00
|
|
|
size_t copied;
|
2014-01-20 15:33:50 +08:00
|
|
|
|
2014-04-14 11:19:53 +08:00
|
|
|
spin_lock_irq(&epfile->ffs->eps_lock);
|
|
|
|
/* In the meantime, endpoint got disabled or changed. */
|
|
|
|
if (epfile->ep != ep) {
|
|
|
|
spin_unlock_irq(&epfile->ffs->eps_lock);
|
|
|
|
return -ESHUTDOWN;
|
|
|
|
}
|
2015-02-01 12:23:35 +08:00
|
|
|
data_len = iov_iter_count(&io_data->data);
|
2013-12-10 07:55:37 +08:00
|
|
|
/*
|
|
|
|
* Controller may require buffer size to be aligned to
|
|
|
|
* maxpacketsize of an out endpoint.
|
|
|
|
*/
|
2015-02-01 12:23:35 +08:00
|
|
|
if (io_data->read)
|
|
|
|
data_len = usb_ep_align_maybe(gadget, ep->ep, data_len);
|
2014-04-14 11:19:53 +08:00
|
|
|
spin_unlock_irq(&epfile->ffs->eps_lock);
|
2013-12-10 07:55:37 +08:00
|
|
|
|
|
|
|
data = kmalloc(data_len, GFP_KERNEL);
|
2013-12-10 07:55:36 +08:00
|
|
|
if (unlikely(!data))
|
|
|
|
return -ENOMEM;
|
2015-02-01 12:23:35 +08:00
|
|
|
if (!io_data->read) {
|
|
|
|
copied = copy_from_iter(data, data_len, &io_data->data);
|
|
|
|
if (copied != data_len) {
|
2014-02-10 17:42:44 +08:00
|
|
|
ret = -EFAULT;
|
|
|
|
goto error;
|
|
|
|
}
|
2013-12-10 07:55:36 +08:00
|
|
|
}
|
|
|
|
}
|
2010-05-05 18:53:14 +08:00
|
|
|
|
2013-12-10 07:55:36 +08:00
|
|
|
/* We will be using request */
|
|
|
|
ret = ffs_mutex_lock(&epfile->mutex, file->f_flags & O_NONBLOCK);
|
|
|
|
if (unlikely(ret))
|
|
|
|
goto error;
|
2010-05-05 18:53:14 +08:00
|
|
|
|
2013-12-10 07:55:36 +08:00
|
|
|
spin_lock_irq(&epfile->ffs->eps_lock);
|
2010-05-05 18:53:14 +08:00
|
|
|
|
2013-12-10 07:55:36 +08:00
|
|
|
if (epfile->ep != ep) {
|
|
|
|
/* In the meantime, endpoint got disabled or changed. */
|
|
|
|
ret = -ESHUTDOWN;
|
|
|
|
} else if (halt) {
|
|
|
|
/* Halt */
|
2010-05-05 18:53:14 +08:00
|
|
|
if (likely(epfile->ep == ep) && !WARN_ON(!ep->ep))
|
|
|
|
usb_ep_set_halt(ep->ep);
|
|
|
|
ret = -EBADMSG;
|
2016-01-05 04:05:59 +08:00
|
|
|
} else if (unlikely(data_len == -EINVAL)) {
|
2014-10-14 02:15:54 +08:00
|
|
|
/*
|
|
|
|
* Sanity Check: even though data_len can't be used
|
|
|
|
* uninitialized at the time I write this comment, some
|
|
|
|
* compilers complain about this situation.
|
|
|
|
* In order to keep the code clean from warnings, data_len is
|
|
|
|
* being initialized to -EINVAL during its declaration, which
|
|
|
|
* means we can't rely on compiler anymore to warn no future
|
|
|
|
* changes won't result in data_len being used uninitialized.
|
|
|
|
* For such reason, we're adding this redundant sanity check
|
|
|
|
* here.
|
|
|
|
*/
|
2016-01-05 04:05:59 +08:00
|
|
|
WARN(1, "%s: data_len == -EINVAL\n", __func__);
|
|
|
|
ret = -EINVAL;
|
|
|
|
} else if (!io_data->aio) {
|
|
|
|
DECLARE_COMPLETION_ONSTACK(done);
|
usb: f_fs: avoid race condition with ffs_epfile_io_complete
ffs_epfile_io and ffs_epfile_io_complete runs in different context, but
there is no synchronization between them.
consider the following scenario:
1) ffs_epfile_io interrupted by sigal while
wait_for_completion_interruptible
2) then ffs_epfile_io set ret to -EINTR
3) just before or during usb_ep_dequeue, the request completed
4) ffs_epfile_io return with -EINTR
In this case, ffs_epfile_io tell caller no transfer success but actually
it may has been done. This break the caller's pipe.
Below script can help test it (adbd is the process which lies on f_fs).
while true
do
pkill -19 adbd #SIGSTOP
pkill -18 adbd #SIGCONT
sleep 0.1
done
To avoid this, just dequeue the request first. After usb_ep_dequeue, the
request must be done or canceled.
With this change, we can ensure no race condition in f_fs driver. But
actually I found some of the udc driver has analogical issue in its
dequeue implementation. For example,
1) the dequeue function hold the controller's lock.
2) before driver request controller to stop transfer, a request
completed.
3) the controller trigger a interrupt, but its irq handler need wait
dequeue function to release the lock.
4) dequeue function give back the request with negative status, and
release lock.
5) irq handler get lock but the request has already been given back.
So, the dequeue implementation should take care of this case. IMO, it
can be done as below steps to dequeue a already started request,
1) request controller to stop transfer on the given ep. HW know the
actual transfer status.
2) after hw stop transfer, driver scan if there are any completed one.
3) if found, process it with real status. if no, the request can
canceled.
Signed-off-by: "Du, Changbin" <changbin.du@intel.com>
[mina86@mina86.com: rebased on top of refactoring commits]
Signed-off-by: Michal Nazarewicz <mina86@mina86.com>
Signed-off-by: Felipe Balbi <balbi@kernel.org>
2015-12-29 14:36:58 +08:00
|
|
|
bool interrupted = false;
|
2010-05-05 18:53:14 +08:00
|
|
|
|
2016-01-05 04:05:59 +08:00
|
|
|
req = ep->req;
|
|
|
|
req->buf = data;
|
|
|
|
req->length = data_len;
|
2010-05-05 18:53:14 +08:00
|
|
|
|
2016-01-05 04:05:59 +08:00
|
|
|
req->context = &done;
|
|
|
|
req->complete = ffs_epfile_io_complete;
|
2014-02-10 17:42:44 +08:00
|
|
|
|
2016-01-05 04:05:59 +08:00
|
|
|
ret = usb_ep_queue(ep->ep, req, GFP_ATOMIC);
|
|
|
|
if (unlikely(ret < 0))
|
|
|
|
goto error_lock;
|
2014-02-10 17:42:44 +08:00
|
|
|
|
2016-01-05 04:05:59 +08:00
|
|
|
spin_unlock_irq(&epfile->ffs->eps_lock);
|
2014-02-10 17:42:44 +08:00
|
|
|
|
2016-01-05 04:05:59 +08:00
|
|
|
if (unlikely(wait_for_completion_interruptible(&done))) {
|
usb: f_fs: avoid race condition with ffs_epfile_io_complete
ffs_epfile_io and ffs_epfile_io_complete runs in different context, but
there is no synchronization between them.
consider the following scenario:
1) ffs_epfile_io interrupted by sigal while
wait_for_completion_interruptible
2) then ffs_epfile_io set ret to -EINTR
3) just before or during usb_ep_dequeue, the request completed
4) ffs_epfile_io return with -EINTR
In this case, ffs_epfile_io tell caller no transfer success but actually
it may has been done. This break the caller's pipe.
Below script can help test it (adbd is the process which lies on f_fs).
while true
do
pkill -19 adbd #SIGSTOP
pkill -18 adbd #SIGCONT
sleep 0.1
done
To avoid this, just dequeue the request first. After usb_ep_dequeue, the
request must be done or canceled.
With this change, we can ensure no race condition in f_fs driver. But
actually I found some of the udc driver has analogical issue in its
dequeue implementation. For example,
1) the dequeue function hold the controller's lock.
2) before driver request controller to stop transfer, a request
completed.
3) the controller trigger a interrupt, but its irq handler need wait
dequeue function to release the lock.
4) dequeue function give back the request with negative status, and
release lock.
5) irq handler get lock but the request has already been given back.
So, the dequeue implementation should take care of this case. IMO, it
can be done as below steps to dequeue a already started request,
1) request controller to stop transfer on the given ep. HW know the
actual transfer status.
2) after hw stop transfer, driver scan if there are any completed one.
3) if found, process it with real status. if no, the request can
canceled.
Signed-off-by: "Du, Changbin" <changbin.du@intel.com>
[mina86@mina86.com: rebased on top of refactoring commits]
Signed-off-by: Michal Nazarewicz <mina86@mina86.com>
Signed-off-by: Felipe Balbi <balbi@kernel.org>
2015-12-29 14:36:58 +08:00
|
|
|
/*
|
|
|
|
* To avoid race condition with ffs_epfile_io_complete,
|
|
|
|
* dequeue the request first then check
|
|
|
|
* status. usb_ep_dequeue API should guarantee no race
|
|
|
|
* condition with req->complete callback.
|
|
|
|
*/
|
2016-01-05 04:05:59 +08:00
|
|
|
usb_ep_dequeue(ep->ep, req);
|
usb: f_fs: avoid race condition with ffs_epfile_io_complete
ffs_epfile_io and ffs_epfile_io_complete runs in different context, but
there is no synchronization between them.
consider the following scenario:
1) ffs_epfile_io interrupted by sigal while
wait_for_completion_interruptible
2) then ffs_epfile_io set ret to -EINTR
3) just before or during usb_ep_dequeue, the request completed
4) ffs_epfile_io return with -EINTR
In this case, ffs_epfile_io tell caller no transfer success but actually
it may has been done. This break the caller's pipe.
Below script can help test it (adbd is the process which lies on f_fs).
while true
do
pkill -19 adbd #SIGSTOP
pkill -18 adbd #SIGCONT
sleep 0.1
done
To avoid this, just dequeue the request first. After usb_ep_dequeue, the
request must be done or canceled.
With this change, we can ensure no race condition in f_fs driver. But
actually I found some of the udc driver has analogical issue in its
dequeue implementation. For example,
1) the dequeue function hold the controller's lock.
2) before driver request controller to stop transfer, a request
completed.
3) the controller trigger a interrupt, but its irq handler need wait
dequeue function to release the lock.
4) dequeue function give back the request with negative status, and
release lock.
5) irq handler get lock but the request has already been given back.
So, the dequeue implementation should take care of this case. IMO, it
can be done as below steps to dequeue a already started request,
1) request controller to stop transfer on the given ep. HW know the
actual transfer status.
2) after hw stop transfer, driver scan if there are any completed one.
3) if found, process it with real status. if no, the request can
canceled.
Signed-off-by: "Du, Changbin" <changbin.du@intel.com>
[mina86@mina86.com: rebased on top of refactoring commits]
Signed-off-by: Michal Nazarewicz <mina86@mina86.com>
Signed-off-by: Felipe Balbi <balbi@kernel.org>
2015-12-29 14:36:58 +08:00
|
|
|
interrupted = ep->status < 0;
|
2016-01-05 04:05:59 +08:00
|
|
|
}
|
2014-02-10 17:42:44 +08:00
|
|
|
|
2016-01-05 04:05:59 +08:00
|
|
|
/*
|
|
|
|
* XXX We may end up silently droping data here. Since data_len
|
|
|
|
* (i.e. req->length) may be bigger than len (after being
|
|
|
|
* rounded up to maxpacketsize), we may end up with more data
|
|
|
|
* then user space has space for.
|
|
|
|
*/
|
usb: f_fs: avoid race condition with ffs_epfile_io_complete
ffs_epfile_io and ffs_epfile_io_complete runs in different context, but
there is no synchronization between them.
consider the following scenario:
1) ffs_epfile_io interrupted by sigal while
wait_for_completion_interruptible
2) then ffs_epfile_io set ret to -EINTR
3) just before or during usb_ep_dequeue, the request completed
4) ffs_epfile_io return with -EINTR
In this case, ffs_epfile_io tell caller no transfer success but actually
it may has been done. This break the caller's pipe.
Below script can help test it (adbd is the process which lies on f_fs).
while true
do
pkill -19 adbd #SIGSTOP
pkill -18 adbd #SIGCONT
sleep 0.1
done
To avoid this, just dequeue the request first. After usb_ep_dequeue, the
request must be done or canceled.
With this change, we can ensure no race condition in f_fs driver. But
actually I found some of the udc driver has analogical issue in its
dequeue implementation. For example,
1) the dequeue function hold the controller's lock.
2) before driver request controller to stop transfer, a request
completed.
3) the controller trigger a interrupt, but its irq handler need wait
dequeue function to release the lock.
4) dequeue function give back the request with negative status, and
release lock.
5) irq handler get lock but the request has already been given back.
So, the dequeue implementation should take care of this case. IMO, it
can be done as below steps to dequeue a already started request,
1) request controller to stop transfer on the given ep. HW know the
actual transfer status.
2) after hw stop transfer, driver scan if there are any completed one.
3) if found, process it with real status. if no, the request can
canceled.
Signed-off-by: "Du, Changbin" <changbin.du@intel.com>
[mina86@mina86.com: rebased on top of refactoring commits]
Signed-off-by: Michal Nazarewicz <mina86@mina86.com>
Signed-off-by: Felipe Balbi <balbi@kernel.org>
2015-12-29 14:36:58 +08:00
|
|
|
ret = interrupted ? -EINTR : ep->status;
|
2016-01-05 04:05:59 +08:00
|
|
|
if (io_data->read && ret > 0) {
|
|
|
|
ret = copy_to_iter(data, ret, &io_data->data);
|
|
|
|
if (!ret)
|
|
|
|
ret = -EFAULT;
|
|
|
|
}
|
|
|
|
goto error_mutex;
|
|
|
|
} else if (!(req = usb_ep_alloc_request(ep->ep, GFP_KERNEL))) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
} else {
|
|
|
|
req->buf = data;
|
|
|
|
req->length = data_len;
|
2014-02-10 17:42:44 +08:00
|
|
|
|
2016-01-05 04:05:59 +08:00
|
|
|
io_data->buf = data;
|
|
|
|
io_data->ep = ep->ep;
|
|
|
|
io_data->req = req;
|
|
|
|
io_data->ffs = epfile->ffs;
|
2014-02-10 17:42:44 +08:00
|
|
|
|
2016-01-05 04:05:59 +08:00
|
|
|
req->context = io_data;
|
|
|
|
req->complete = ffs_epfile_async_io_complete;
|
2014-02-10 17:42:44 +08:00
|
|
|
|
2016-01-05 04:05:59 +08:00
|
|
|
ret = usb_ep_queue(ep->ep, req, GFP_ATOMIC);
|
|
|
|
if (unlikely(ret)) {
|
|
|
|
usb_ep_free_request(ep->ep, req);
|
|
|
|
goto error_lock;
|
2010-05-05 18:53:14 +08:00
|
|
|
}
|
|
|
|
|
2016-01-05 04:05:59 +08:00
|
|
|
ret = -EIOCBQUEUED;
|
|
|
|
/*
|
|
|
|
* Do not kfree the buffer in this function. It will be freed
|
|
|
|
* by ffs_user_copy_worker.
|
|
|
|
*/
|
|
|
|
data = NULL;
|
|
|
|
}
|
2014-03-10 16:33:37 +08:00
|
|
|
|
|
|
|
error_lock:
|
|
|
|
spin_unlock_irq(&epfile->ffs->eps_lock);
|
2016-01-05 04:05:59 +08:00
|
|
|
error_mutex:
|
2014-03-10 16:33:37 +08:00
|
|
|
mutex_unlock(&epfile->mutex);
|
2010-05-05 18:53:14 +08:00
|
|
|
error:
|
|
|
|
kfree(data);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
ffs_epfile_open(struct inode *inode, struct file *file)
|
|
|
|
{
|
|
|
|
struct ffs_epfile *epfile = inode->i_private;
|
|
|
|
|
|
|
|
ENTER();
|
|
|
|
|
|
|
|
if (WARN_ON(epfile->ffs->state != FFS_ACTIVE))
|
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
file->private_data = epfile;
|
|
|
|
ffs_data_opened(epfile->ffs);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-02-10 17:42:44 +08:00
|
|
|
static int ffs_aio_cancel(struct kiocb *kiocb)
|
|
|
|
{
|
|
|
|
struct ffs_io_data *io_data = kiocb->private;
|
|
|
|
struct ffs_epfile *epfile = kiocb->ki_filp->private_data;
|
|
|
|
int value;
|
|
|
|
|
|
|
|
ENTER();
|
|
|
|
|
|
|
|
spin_lock_irq(&epfile->ffs->eps_lock);
|
|
|
|
|
|
|
|
if (likely(io_data && io_data->ep && io_data->req))
|
|
|
|
value = usb_ep_dequeue(io_data->ep, io_data->req);
|
|
|
|
else
|
|
|
|
value = -EINVAL;
|
|
|
|
|
|
|
|
spin_unlock_irq(&epfile->ffs->eps_lock);
|
|
|
|
|
|
|
|
return value;
|
|
|
|
}
|
|
|
|
|
2015-02-01 12:55:39 +08:00
|
|
|
static ssize_t ffs_epfile_write_iter(struct kiocb *kiocb, struct iov_iter *from)
|
2014-02-10 17:42:44 +08:00
|
|
|
{
|
2015-02-01 12:55:39 +08:00
|
|
|
struct ffs_io_data io_data, *p = &io_data;
|
2015-02-01 12:42:34 +08:00
|
|
|
ssize_t res;
|
2014-02-10 17:42:44 +08:00
|
|
|
|
|
|
|
ENTER();
|
|
|
|
|
2015-02-01 12:55:39 +08:00
|
|
|
if (!is_sync_kiocb(kiocb)) {
|
|
|
|
p = kmalloc(sizeof(io_data), GFP_KERNEL);
|
|
|
|
if (unlikely(!p))
|
|
|
|
return -ENOMEM;
|
|
|
|
p->aio = true;
|
|
|
|
} else {
|
|
|
|
p->aio = false;
|
|
|
|
}
|
2014-02-10 17:42:44 +08:00
|
|
|
|
2015-02-01 12:55:39 +08:00
|
|
|
p->read = false;
|
|
|
|
p->kiocb = kiocb;
|
|
|
|
p->data = *from;
|
|
|
|
p->mm = current->mm;
|
2014-02-10 17:42:44 +08:00
|
|
|
|
2015-02-01 12:55:39 +08:00
|
|
|
kiocb->private = p;
|
2014-02-10 17:42:44 +08:00
|
|
|
|
2015-05-18 23:02:07 +08:00
|
|
|
if (p->aio)
|
|
|
|
kiocb_set_cancel_fn(kiocb, ffs_aio_cancel);
|
2014-02-10 17:42:44 +08:00
|
|
|
|
2015-02-01 12:55:39 +08:00
|
|
|
res = ffs_epfile_io(kiocb->ki_filp, p);
|
|
|
|
if (res == -EIOCBQUEUED)
|
|
|
|
return res;
|
|
|
|
if (p->aio)
|
|
|
|
kfree(p);
|
|
|
|
else
|
|
|
|
*from = p->data;
|
2015-02-01 12:42:34 +08:00
|
|
|
return res;
|
2014-02-10 17:42:44 +08:00
|
|
|
}
|
|
|
|
|
2015-02-01 12:55:39 +08:00
|
|
|
static ssize_t ffs_epfile_read_iter(struct kiocb *kiocb, struct iov_iter *to)
|
2014-02-10 17:42:44 +08:00
|
|
|
{
|
2015-02-01 12:55:39 +08:00
|
|
|
struct ffs_io_data io_data, *p = &io_data;
|
2015-02-01 12:42:34 +08:00
|
|
|
ssize_t res;
|
2014-02-10 17:42:44 +08:00
|
|
|
|
|
|
|
ENTER();
|
|
|
|
|
2015-02-01 12:55:39 +08:00
|
|
|
if (!is_sync_kiocb(kiocb)) {
|
|
|
|
p = kmalloc(sizeof(io_data), GFP_KERNEL);
|
|
|
|
if (unlikely(!p))
|
|
|
|
return -ENOMEM;
|
|
|
|
p->aio = true;
|
|
|
|
} else {
|
|
|
|
p->aio = false;
|
2014-02-10 17:42:44 +08:00
|
|
|
}
|
|
|
|
|
2015-02-01 12:55:39 +08:00
|
|
|
p->read = true;
|
|
|
|
p->kiocb = kiocb;
|
|
|
|
if (p->aio) {
|
|
|
|
p->to_free = dup_iter(&p->data, to, GFP_KERNEL);
|
|
|
|
if (!p->to_free) {
|
|
|
|
kfree(p);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
p->data = *to;
|
|
|
|
p->to_free = NULL;
|
|
|
|
}
|
|
|
|
p->mm = current->mm;
|
2014-02-10 17:42:44 +08:00
|
|
|
|
2015-02-01 12:55:39 +08:00
|
|
|
kiocb->private = p;
|
2014-02-10 17:42:44 +08:00
|
|
|
|
2015-05-18 23:02:07 +08:00
|
|
|
if (p->aio)
|
|
|
|
kiocb_set_cancel_fn(kiocb, ffs_aio_cancel);
|
2014-02-10 17:42:44 +08:00
|
|
|
|
2015-02-01 12:55:39 +08:00
|
|
|
res = ffs_epfile_io(kiocb->ki_filp, p);
|
|
|
|
if (res == -EIOCBQUEUED)
|
|
|
|
return res;
|
|
|
|
|
|
|
|
if (p->aio) {
|
|
|
|
kfree(p->to_free);
|
|
|
|
kfree(p);
|
|
|
|
} else {
|
|
|
|
*to = p->data;
|
2015-02-01 12:42:34 +08:00
|
|
|
}
|
|
|
|
return res;
|
2014-02-10 17:42:44 +08:00
|
|
|
}
|
|
|
|
|
2010-05-05 18:53:14 +08:00
|
|
|
static int
|
|
|
|
ffs_epfile_release(struct inode *inode, struct file *file)
|
|
|
|
{
|
|
|
|
struct ffs_epfile *epfile = inode->i_private;
|
|
|
|
|
|
|
|
ENTER();
|
|
|
|
|
|
|
|
ffs_data_closed(epfile->ffs);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static long ffs_epfile_ioctl(struct file *file, unsigned code,
|
|
|
|
unsigned long value)
|
|
|
|
{
|
|
|
|
struct ffs_epfile *epfile = file->private_data;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ENTER();
|
|
|
|
|
|
|
|
if (WARN_ON(epfile->ffs->state != FFS_ACTIVE))
|
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
spin_lock_irq(&epfile->ffs->eps_lock);
|
|
|
|
if (likely(epfile->ep)) {
|
|
|
|
switch (code) {
|
|
|
|
case FUNCTIONFS_FIFO_STATUS:
|
|
|
|
ret = usb_ep_fifo_status(epfile->ep->ep);
|
|
|
|
break;
|
|
|
|
case FUNCTIONFS_FIFO_FLUSH:
|
|
|
|
usb_ep_fifo_flush(epfile->ep->ep);
|
|
|
|
ret = 0;
|
|
|
|
break;
|
|
|
|
case FUNCTIONFS_CLEAR_HALT:
|
|
|
|
ret = usb_ep_clear_halt(epfile->ep->ep);
|
|
|
|
break;
|
|
|
|
case FUNCTIONFS_ENDPOINT_REVMAP:
|
|
|
|
ret = epfile->ep->num;
|
|
|
|
break;
|
2014-09-09 14:23:16 +08:00
|
|
|
case FUNCTIONFS_ENDPOINT_DESC:
|
|
|
|
{
|
|
|
|
int desc_idx;
|
|
|
|
struct usb_endpoint_descriptor *desc;
|
|
|
|
|
|
|
|
switch (epfile->ffs->gadget->speed) {
|
|
|
|
case USB_SPEED_SUPER:
|
|
|
|
desc_idx = 2;
|
|
|
|
break;
|
|
|
|
case USB_SPEED_HIGH:
|
|
|
|
desc_idx = 1;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
desc_idx = 0;
|
|
|
|
}
|
|
|
|
desc = epfile->ep->descs[desc_idx];
|
|
|
|
|
|
|
|
spin_unlock_irq(&epfile->ffs->eps_lock);
|
|
|
|
ret = copy_to_user((void *)value, desc, sizeof(*desc));
|
|
|
|
if (ret)
|
|
|
|
ret = -EFAULT;
|
|
|
|
return ret;
|
|
|
|
}
|
2010-05-05 18:53:14 +08:00
|
|
|
default:
|
|
|
|
ret = -ENOTTY;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
ret = -ENODEV;
|
|
|
|
}
|
|
|
|
spin_unlock_irq(&epfile->ffs->eps_lock);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct file_operations ffs_epfile_operations = {
|
|
|
|
.llseek = no_llseek,
|
|
|
|
|
|
|
|
.open = ffs_epfile_open,
|
2015-02-01 12:55:39 +08:00
|
|
|
.write_iter = ffs_epfile_write_iter,
|
|
|
|
.read_iter = ffs_epfile_read_iter,
|
2010-05-05 18:53:14 +08:00
|
|
|
.release = ffs_epfile_release,
|
|
|
|
.unlocked_ioctl = ffs_epfile_ioctl,
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
/* File system and super block operations ***********************************/
|
|
|
|
|
|
|
|
/*
|
2010-11-12 21:29:28 +08:00
|
|
|
* Mounting the file system creates a controller file, used first for
|
2010-05-05 18:53:14 +08:00
|
|
|
* function configuration then later for event monitoring.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static struct inode *__must_check
|
|
|
|
ffs_sb_make_inode(struct super_block *sb, void *data,
|
|
|
|
const struct file_operations *fops,
|
|
|
|
const struct inode_operations *iops,
|
|
|
|
struct ffs_file_perms *perms)
|
|
|
|
{
|
|
|
|
struct inode *inode;
|
|
|
|
|
|
|
|
ENTER();
|
|
|
|
|
|
|
|
inode = new_inode(sb);
|
|
|
|
|
|
|
|
if (likely(inode)) {
|
|
|
|
struct timespec current_time = CURRENT_TIME;
|
|
|
|
|
2010-10-27 11:19:36 +08:00
|
|
|
inode->i_ino = get_next_ino();
|
2010-05-05 18:53:14 +08:00
|
|
|
inode->i_mode = perms->mode;
|
|
|
|
inode->i_uid = perms->uid;
|
|
|
|
inode->i_gid = perms->gid;
|
|
|
|
inode->i_atime = current_time;
|
|
|
|
inode->i_mtime = current_time;
|
|
|
|
inode->i_ctime = current_time;
|
|
|
|
inode->i_private = data;
|
|
|
|
if (fops)
|
|
|
|
inode->i_fop = fops;
|
|
|
|
if (iops)
|
|
|
|
inode->i_op = iops;
|
|
|
|
}
|
|
|
|
|
|
|
|
return inode;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Create "regular" file */
|
2014-09-04 01:32:19 +08:00
|
|
|
static struct dentry *ffs_sb_create_file(struct super_block *sb,
|
2010-05-05 18:53:14 +08:00
|
|
|
const char *name, void *data,
|
2014-09-04 01:32:19 +08:00
|
|
|
const struct file_operations *fops)
|
2010-05-05 18:53:14 +08:00
|
|
|
{
|
|
|
|
struct ffs_data *ffs = sb->s_fs_info;
|
|
|
|
struct dentry *dentry;
|
|
|
|
struct inode *inode;
|
|
|
|
|
|
|
|
ENTER();
|
|
|
|
|
|
|
|
dentry = d_alloc_name(sb->s_root, name);
|
|
|
|
if (unlikely(!dentry))
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
inode = ffs_sb_make_inode(sb, data, fops, NULL, &ffs->file_perms);
|
|
|
|
if (unlikely(!inode)) {
|
|
|
|
dput(dentry);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
d_add(dentry, inode);
|
2014-09-04 01:32:19 +08:00
|
|
|
return dentry;
|
2010-05-05 18:53:14 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Super block */
|
|
|
|
static const struct super_operations ffs_sb_operations = {
|
|
|
|
.statfs = simple_statfs,
|
|
|
|
.drop_inode = generic_delete_inode,
|
|
|
|
};
|
|
|
|
|
|
|
|
struct ffs_sb_fill_data {
|
|
|
|
struct ffs_file_perms perms;
|
|
|
|
umode_t root_mode;
|
|
|
|
const char *dev_name;
|
usb: gadget: f_fs: add "no_disconnect" mode
Since we can compose gadgets from many functions, there is the problem
related to gadget breakage while FunctionFS daemon being closed. FFS
function is userspace code so there is no way to know when it will close
files (it doesn't matter what is the reason of this situation, it can
be daemon logic, program breakage, process kill or any other). So when
we have another function in gadget which, for example, sends some amount
of data, does some software update or implements some real-time functionality,
we may want to keep the gadget connected despite FFS function is no longer
functional.
We can't just remove one of functions from gadget since it has been
enumerated, so the only way to keep entire gadget working is to make
broken FFS function deactivated but still visible to host. For this
purpose this patch introduces "no_disconnect" mode. It can be enabled
by setting mount option "no_disconnect=1", and results with defering
function disconnect to the moment of reopen ep0 file or filesystem
unmount. After closing all endpoint files, FunctionFS is set to state
FFS_DEACTIVATED.
When ffs->state == FFS_DEACTIVATED:
- function is still bound and visible to host,
- setup requests are automatically stalled,
- transfers on other endpoints are refused,
- epfiles, except ep0, are deleted from the filesystem,
- opening ep0 causes the function to be closed, and then FunctionFS
is ready for descriptors and string write,
- altsetting change causes the function to be closed - we want to keep
function alive until another functions are potentialy used, altsetting
change means that another configuration is being selected or USB cable
was unplugged, which indicates that we don't need to stay longer in
FFS_DEACTIVATED state
- unmounting of the FunctionFS instance causes the function to be closed.
Tested-by: David Cohen <david.a.cohen@linux.intel.com>
Acked-by: Michal Nazarewicz <mina86@mina86.com>
Signed-off-by: Robert Baldyga <r.baldyga@samsung.com>
Signed-off-by: Felipe Balbi <balbi@ti.com>
2014-12-18 16:55:10 +08:00
|
|
|
bool no_disconnect;
|
USB: Fix breakage in ffs_fs_mount()
There's a bunch of failure exits in ffs_fs_mount() with
seriously broken recovery logics. Most of that appears to stem
from misunderstanding of the ->kill_sb() semantics; unlike
->put_super() it is called for *all* superblocks of given type,
no matter how (in)complete the setup had been. ->put_super()
is called only if ->s_root is not NULL; any failure prior to
setting ->s_root will have the call of ->put_super() skipped.
->kill_sb(), OTOH, awaits every superblock that has come from
sget().
Current behaviour of ffs_fs_mount():
We have struct ffs_sb_fill_data data on stack there. We do
ffs_dev = functionfs_acquire_dev_callback(dev_name);
and store that in data.private_data. Then we call mount_nodev(),
passing it ffs_sb_fill() as a callback. That will either fail
outright, or manage to call ffs_sb_fill(). There we allocate an
instance of struct ffs_data, slap the value of ffs_dev (picked
from data.private_data) into ffs->private_data and overwrite
data.private_data by storing ffs into an overlapping member
(data.ffs_data). Then we store ffs into sb->s_fs_info and attempt
to set the rest of the things up (root inode, root dentry, then
create /ep0 there). Any of those might fail. Should that
happen, we get ffs_fs_kill_sb() called before mount_nodev()
returns. If mount_nodev() fails for any reason whatsoever,
we proceed to
functionfs_release_dev_callback(data.ffs_data);
That's broken in a lot of ways. Suppose the thing has failed in
allocation of e.g. root inode or dentry. We have
functionfs_release_dev_callback(ffs);
ffs_data_put(ffs);
done by ffs_fs_kill_sb() (ffs accessed via sb->s_fs_info), followed by
functionfs_release_dev_callback(ffs);
from ffs_fs_mount() (via data.ffs_data). Note that the second
functionfs_release_dev_callback() has every chance to be done to freed memory.
Suppose we fail *before* root inode allocation. What happens then?
ffs_fs_kill_sb() doesn't do anything to ffs (it's either not called at all,
or it doesn't have a pointer to ffs stored in sb->s_fs_info). And
functionfs_release_dev_callback(data.ffs_data);
is called by ffs_fs_mount(), but here we are in nasal daemon country - we
are reading from a member of union we'd never stored into. In practice,
we'll get what we used to store into the overlapping field, i.e. ffs_dev.
And then we get screwed, since we treat it (struct gfs_ffs_obj * in
disguise, returned by functionfs_acquire_dev_callback()) as struct
ffs_data *, pick what would've been ffs_data ->private_data from it
(*well* past the actual end of the struct gfs_ffs_obj - struct ffs_data
is much bigger) and poke in whatever it points to.
FWIW, there's a minor leak on top of all that in case if ffs_sb_fill()
fails on kstrdup() - ffs is obviously forgotten.
The thing is, there is no point in playing all those games with union.
Just allocate and initialize ffs_data *before* calling mount_nodev() and
pass a pointer to it via data.ffs_data. And once it's stored in
sb->s_fs_info, clear data.ffs_data, so that ffs_fs_mount() knows that
it doesn't need to kill the sucker manually - from that point on
we'll have it done by ->kill_sb().
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Acked-by: Michal Nazarewicz <mina86@mina86.com>
Cc: stable <stable@vger.kernel.org> # 3.3+
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2013-09-21 00:14:21 +08:00
|
|
|
struct ffs_data *ffs_data;
|
2010-05-05 18:53:14 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
static int ffs_sb_fill(struct super_block *sb, void *_data, int silent)
|
|
|
|
{
|
|
|
|
struct ffs_sb_fill_data *data = _data;
|
|
|
|
struct inode *inode;
|
USB: Fix breakage in ffs_fs_mount()
There's a bunch of failure exits in ffs_fs_mount() with
seriously broken recovery logics. Most of that appears to stem
from misunderstanding of the ->kill_sb() semantics; unlike
->put_super() it is called for *all* superblocks of given type,
no matter how (in)complete the setup had been. ->put_super()
is called only if ->s_root is not NULL; any failure prior to
setting ->s_root will have the call of ->put_super() skipped.
->kill_sb(), OTOH, awaits every superblock that has come from
sget().
Current behaviour of ffs_fs_mount():
We have struct ffs_sb_fill_data data on stack there. We do
ffs_dev = functionfs_acquire_dev_callback(dev_name);
and store that in data.private_data. Then we call mount_nodev(),
passing it ffs_sb_fill() as a callback. That will either fail
outright, or manage to call ffs_sb_fill(). There we allocate an
instance of struct ffs_data, slap the value of ffs_dev (picked
from data.private_data) into ffs->private_data and overwrite
data.private_data by storing ffs into an overlapping member
(data.ffs_data). Then we store ffs into sb->s_fs_info and attempt
to set the rest of the things up (root inode, root dentry, then
create /ep0 there). Any of those might fail. Should that
happen, we get ffs_fs_kill_sb() called before mount_nodev()
returns. If mount_nodev() fails for any reason whatsoever,
we proceed to
functionfs_release_dev_callback(data.ffs_data);
That's broken in a lot of ways. Suppose the thing has failed in
allocation of e.g. root inode or dentry. We have
functionfs_release_dev_callback(ffs);
ffs_data_put(ffs);
done by ffs_fs_kill_sb() (ffs accessed via sb->s_fs_info), followed by
functionfs_release_dev_callback(ffs);
from ffs_fs_mount() (via data.ffs_data). Note that the second
functionfs_release_dev_callback() has every chance to be done to freed memory.
Suppose we fail *before* root inode allocation. What happens then?
ffs_fs_kill_sb() doesn't do anything to ffs (it's either not called at all,
or it doesn't have a pointer to ffs stored in sb->s_fs_info). And
functionfs_release_dev_callback(data.ffs_data);
is called by ffs_fs_mount(), but here we are in nasal daemon country - we
are reading from a member of union we'd never stored into. In practice,
we'll get what we used to store into the overlapping field, i.e. ffs_dev.
And then we get screwed, since we treat it (struct gfs_ffs_obj * in
disguise, returned by functionfs_acquire_dev_callback()) as struct
ffs_data *, pick what would've been ffs_data ->private_data from it
(*well* past the actual end of the struct gfs_ffs_obj - struct ffs_data
is much bigger) and poke in whatever it points to.
FWIW, there's a minor leak on top of all that in case if ffs_sb_fill()
fails on kstrdup() - ffs is obviously forgotten.
The thing is, there is no point in playing all those games with union.
Just allocate and initialize ffs_data *before* calling mount_nodev() and
pass a pointer to it via data.ffs_data. And once it's stored in
sb->s_fs_info, clear data.ffs_data, so that ffs_fs_mount() knows that
it doesn't need to kill the sucker manually - from that point on
we'll have it done by ->kill_sb().
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Acked-by: Michal Nazarewicz <mina86@mina86.com>
Cc: stable <stable@vger.kernel.org> # 3.3+
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2013-09-21 00:14:21 +08:00
|
|
|
struct ffs_data *ffs = data->ffs_data;
|
2010-05-05 18:53:14 +08:00
|
|
|
|
|
|
|
ENTER();
|
|
|
|
|
|
|
|
ffs->sb = sb;
|
USB: Fix breakage in ffs_fs_mount()
There's a bunch of failure exits in ffs_fs_mount() with
seriously broken recovery logics. Most of that appears to stem
from misunderstanding of the ->kill_sb() semantics; unlike
->put_super() it is called for *all* superblocks of given type,
no matter how (in)complete the setup had been. ->put_super()
is called only if ->s_root is not NULL; any failure prior to
setting ->s_root will have the call of ->put_super() skipped.
->kill_sb(), OTOH, awaits every superblock that has come from
sget().
Current behaviour of ffs_fs_mount():
We have struct ffs_sb_fill_data data on stack there. We do
ffs_dev = functionfs_acquire_dev_callback(dev_name);
and store that in data.private_data. Then we call mount_nodev(),
passing it ffs_sb_fill() as a callback. That will either fail
outright, or manage to call ffs_sb_fill(). There we allocate an
instance of struct ffs_data, slap the value of ffs_dev (picked
from data.private_data) into ffs->private_data and overwrite
data.private_data by storing ffs into an overlapping member
(data.ffs_data). Then we store ffs into sb->s_fs_info and attempt
to set the rest of the things up (root inode, root dentry, then
create /ep0 there). Any of those might fail. Should that
happen, we get ffs_fs_kill_sb() called before mount_nodev()
returns. If mount_nodev() fails for any reason whatsoever,
we proceed to
functionfs_release_dev_callback(data.ffs_data);
That's broken in a lot of ways. Suppose the thing has failed in
allocation of e.g. root inode or dentry. We have
functionfs_release_dev_callback(ffs);
ffs_data_put(ffs);
done by ffs_fs_kill_sb() (ffs accessed via sb->s_fs_info), followed by
functionfs_release_dev_callback(ffs);
from ffs_fs_mount() (via data.ffs_data). Note that the second
functionfs_release_dev_callback() has every chance to be done to freed memory.
Suppose we fail *before* root inode allocation. What happens then?
ffs_fs_kill_sb() doesn't do anything to ffs (it's either not called at all,
or it doesn't have a pointer to ffs stored in sb->s_fs_info). And
functionfs_release_dev_callback(data.ffs_data);
is called by ffs_fs_mount(), but here we are in nasal daemon country - we
are reading from a member of union we'd never stored into. In practice,
we'll get what we used to store into the overlapping field, i.e. ffs_dev.
And then we get screwed, since we treat it (struct gfs_ffs_obj * in
disguise, returned by functionfs_acquire_dev_callback()) as struct
ffs_data *, pick what would've been ffs_data ->private_data from it
(*well* past the actual end of the struct gfs_ffs_obj - struct ffs_data
is much bigger) and poke in whatever it points to.
FWIW, there's a minor leak on top of all that in case if ffs_sb_fill()
fails on kstrdup() - ffs is obviously forgotten.
The thing is, there is no point in playing all those games with union.
Just allocate and initialize ffs_data *before* calling mount_nodev() and
pass a pointer to it via data.ffs_data. And once it's stored in
sb->s_fs_info, clear data.ffs_data, so that ffs_fs_mount() knows that
it doesn't need to kill the sucker manually - from that point on
we'll have it done by ->kill_sb().
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Acked-by: Michal Nazarewicz <mina86@mina86.com>
Cc: stable <stable@vger.kernel.org> # 3.3+
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2013-09-21 00:14:21 +08:00
|
|
|
data->ffs_data = NULL;
|
2010-05-05 18:53:14 +08:00
|
|
|
sb->s_fs_info = ffs;
|
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros
PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time
ago with promise that one day it will be possible to implement page
cache with bigger chunks than PAGE_SIZE.
This promise never materialized. And unlikely will.
We have many places where PAGE_CACHE_SIZE assumed to be equal to
PAGE_SIZE. And it's constant source of confusion on whether
PAGE_CACHE_* or PAGE_* constant should be used in a particular case,
especially on the border between fs and mm.
Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much
breakage to be doable.
Let's stop pretending that pages in page cache are special. They are
not.
The changes are pretty straight-forward:
- <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN};
- page_cache_get() -> get_page();
- page_cache_release() -> put_page();
This patch contains automated changes generated with coccinelle using
script below. For some reason, coccinelle doesn't patch header files.
I've called spatch for them manually.
The only adjustment after coccinelle is revert of changes to
PAGE_CAHCE_ALIGN definition: we are going to drop it later.
There are few places in the code where coccinelle didn't reach. I'll
fix them manually in a separate patch. Comments and documentation also
will be addressed with the separate patch.
virtual patch
@@
expression E;
@@
- E << (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
expression E;
@@
- E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
@@
- PAGE_CACHE_SHIFT
+ PAGE_SHIFT
@@
@@
- PAGE_CACHE_SIZE
+ PAGE_SIZE
@@
@@
- PAGE_CACHE_MASK
+ PAGE_MASK
@@
expression E;
@@
- PAGE_CACHE_ALIGN(E)
+ PAGE_ALIGN(E)
@@
expression E;
@@
- page_cache_get(E)
+ get_page(E)
@@
expression E;
@@
- page_cache_release(E)
+ put_page(E)
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 20:29:47 +08:00
|
|
|
sb->s_blocksize = PAGE_SIZE;
|
|
|
|
sb->s_blocksize_bits = PAGE_SHIFT;
|
2010-05-05 18:53:14 +08:00
|
|
|
sb->s_magic = FUNCTIONFS_MAGIC;
|
|
|
|
sb->s_op = &ffs_sb_operations;
|
|
|
|
sb->s_time_gran = 1;
|
|
|
|
|
|
|
|
/* Root inode */
|
|
|
|
data->perms.mode = data->root_mode;
|
|
|
|
inode = ffs_sb_make_inode(sb, NULL,
|
|
|
|
&simple_dir_operations,
|
|
|
|
&simple_dir_inode_operations,
|
|
|
|
&data->perms);
|
2012-01-09 11:15:13 +08:00
|
|
|
sb->s_root = d_make_root(inode);
|
|
|
|
if (unlikely(!sb->s_root))
|
USB: Fix breakage in ffs_fs_mount()
There's a bunch of failure exits in ffs_fs_mount() with
seriously broken recovery logics. Most of that appears to stem
from misunderstanding of the ->kill_sb() semantics; unlike
->put_super() it is called for *all* superblocks of given type,
no matter how (in)complete the setup had been. ->put_super()
is called only if ->s_root is not NULL; any failure prior to
setting ->s_root will have the call of ->put_super() skipped.
->kill_sb(), OTOH, awaits every superblock that has come from
sget().
Current behaviour of ffs_fs_mount():
We have struct ffs_sb_fill_data data on stack there. We do
ffs_dev = functionfs_acquire_dev_callback(dev_name);
and store that in data.private_data. Then we call mount_nodev(),
passing it ffs_sb_fill() as a callback. That will either fail
outright, or manage to call ffs_sb_fill(). There we allocate an
instance of struct ffs_data, slap the value of ffs_dev (picked
from data.private_data) into ffs->private_data and overwrite
data.private_data by storing ffs into an overlapping member
(data.ffs_data). Then we store ffs into sb->s_fs_info and attempt
to set the rest of the things up (root inode, root dentry, then
create /ep0 there). Any of those might fail. Should that
happen, we get ffs_fs_kill_sb() called before mount_nodev()
returns. If mount_nodev() fails for any reason whatsoever,
we proceed to
functionfs_release_dev_callback(data.ffs_data);
That's broken in a lot of ways. Suppose the thing has failed in
allocation of e.g. root inode or dentry. We have
functionfs_release_dev_callback(ffs);
ffs_data_put(ffs);
done by ffs_fs_kill_sb() (ffs accessed via sb->s_fs_info), followed by
functionfs_release_dev_callback(ffs);
from ffs_fs_mount() (via data.ffs_data). Note that the second
functionfs_release_dev_callback() has every chance to be done to freed memory.
Suppose we fail *before* root inode allocation. What happens then?
ffs_fs_kill_sb() doesn't do anything to ffs (it's either not called at all,
or it doesn't have a pointer to ffs stored in sb->s_fs_info). And
functionfs_release_dev_callback(data.ffs_data);
is called by ffs_fs_mount(), but here we are in nasal daemon country - we
are reading from a member of union we'd never stored into. In practice,
we'll get what we used to store into the overlapping field, i.e. ffs_dev.
And then we get screwed, since we treat it (struct gfs_ffs_obj * in
disguise, returned by functionfs_acquire_dev_callback()) as struct
ffs_data *, pick what would've been ffs_data ->private_data from it
(*well* past the actual end of the struct gfs_ffs_obj - struct ffs_data
is much bigger) and poke in whatever it points to.
FWIW, there's a minor leak on top of all that in case if ffs_sb_fill()
fails on kstrdup() - ffs is obviously forgotten.
The thing is, there is no point in playing all those games with union.
Just allocate and initialize ffs_data *before* calling mount_nodev() and
pass a pointer to it via data.ffs_data. And once it's stored in
sb->s_fs_info, clear data.ffs_data, so that ffs_fs_mount() knows that
it doesn't need to kill the sucker manually - from that point on
we'll have it done by ->kill_sb().
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Acked-by: Michal Nazarewicz <mina86@mina86.com>
Cc: stable <stable@vger.kernel.org> # 3.3+
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2013-09-21 00:14:21 +08:00
|
|
|
return -ENOMEM;
|
2010-05-05 18:53:14 +08:00
|
|
|
|
|
|
|
/* EP0 file */
|
|
|
|
if (unlikely(!ffs_sb_create_file(sb, "ep0", ffs,
|
2014-09-04 01:32:19 +08:00
|
|
|
&ffs_ep0_operations)))
|
USB: Fix breakage in ffs_fs_mount()
There's a bunch of failure exits in ffs_fs_mount() with
seriously broken recovery logics. Most of that appears to stem
from misunderstanding of the ->kill_sb() semantics; unlike
->put_super() it is called for *all* superblocks of given type,
no matter how (in)complete the setup had been. ->put_super()
is called only if ->s_root is not NULL; any failure prior to
setting ->s_root will have the call of ->put_super() skipped.
->kill_sb(), OTOH, awaits every superblock that has come from
sget().
Current behaviour of ffs_fs_mount():
We have struct ffs_sb_fill_data data on stack there. We do
ffs_dev = functionfs_acquire_dev_callback(dev_name);
and store that in data.private_data. Then we call mount_nodev(),
passing it ffs_sb_fill() as a callback. That will either fail
outright, or manage to call ffs_sb_fill(). There we allocate an
instance of struct ffs_data, slap the value of ffs_dev (picked
from data.private_data) into ffs->private_data and overwrite
data.private_data by storing ffs into an overlapping member
(data.ffs_data). Then we store ffs into sb->s_fs_info and attempt
to set the rest of the things up (root inode, root dentry, then
create /ep0 there). Any of those might fail. Should that
happen, we get ffs_fs_kill_sb() called before mount_nodev()
returns. If mount_nodev() fails for any reason whatsoever,
we proceed to
functionfs_release_dev_callback(data.ffs_data);
That's broken in a lot of ways. Suppose the thing has failed in
allocation of e.g. root inode or dentry. We have
functionfs_release_dev_callback(ffs);
ffs_data_put(ffs);
done by ffs_fs_kill_sb() (ffs accessed via sb->s_fs_info), followed by
functionfs_release_dev_callback(ffs);
from ffs_fs_mount() (via data.ffs_data). Note that the second
functionfs_release_dev_callback() has every chance to be done to freed memory.
Suppose we fail *before* root inode allocation. What happens then?
ffs_fs_kill_sb() doesn't do anything to ffs (it's either not called at all,
or it doesn't have a pointer to ffs stored in sb->s_fs_info). And
functionfs_release_dev_callback(data.ffs_data);
is called by ffs_fs_mount(), but here we are in nasal daemon country - we
are reading from a member of union we'd never stored into. In practice,
we'll get what we used to store into the overlapping field, i.e. ffs_dev.
And then we get screwed, since we treat it (struct gfs_ffs_obj * in
disguise, returned by functionfs_acquire_dev_callback()) as struct
ffs_data *, pick what would've been ffs_data ->private_data from it
(*well* past the actual end of the struct gfs_ffs_obj - struct ffs_data
is much bigger) and poke in whatever it points to.
FWIW, there's a minor leak on top of all that in case if ffs_sb_fill()
fails on kstrdup() - ffs is obviously forgotten.
The thing is, there is no point in playing all those games with union.
Just allocate and initialize ffs_data *before* calling mount_nodev() and
pass a pointer to it via data.ffs_data. And once it's stored in
sb->s_fs_info, clear data.ffs_data, so that ffs_fs_mount() knows that
it doesn't need to kill the sucker manually - from that point on
we'll have it done by ->kill_sb().
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Acked-by: Michal Nazarewicz <mina86@mina86.com>
Cc: stable <stable@vger.kernel.org> # 3.3+
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2013-09-21 00:14:21 +08:00
|
|
|
return -ENOMEM;
|
2010-05-05 18:53:14 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ffs_fs_parse_opts(struct ffs_sb_fill_data *data, char *opts)
|
|
|
|
{
|
|
|
|
ENTER();
|
|
|
|
|
|
|
|
if (!opts || !*opts)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
for (;;) {
|
|
|
|
unsigned long value;
|
2013-01-09 17:17:47 +08:00
|
|
|
char *eq, *comma;
|
2010-05-05 18:53:14 +08:00
|
|
|
|
|
|
|
/* Option limit */
|
|
|
|
comma = strchr(opts, ',');
|
|
|
|
if (comma)
|
|
|
|
*comma = 0;
|
|
|
|
|
|
|
|
/* Value limit */
|
|
|
|
eq = strchr(opts, '=');
|
|
|
|
if (unlikely(!eq)) {
|
2010-11-18 00:09:47 +08:00
|
|
|
pr_err("'=' missing in %s\n", opts);
|
2010-05-05 18:53:14 +08:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
*eq = 0;
|
|
|
|
|
|
|
|
/* Parse value */
|
2013-01-09 17:17:47 +08:00
|
|
|
if (kstrtoul(eq + 1, 0, &value)) {
|
2010-11-18 00:09:47 +08:00
|
|
|
pr_err("%s: invalid value: %s\n", opts, eq + 1);
|
2010-05-05 18:53:14 +08:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Interpret option */
|
|
|
|
switch (eq - opts) {
|
usb: gadget: f_fs: add "no_disconnect" mode
Since we can compose gadgets from many functions, there is the problem
related to gadget breakage while FunctionFS daemon being closed. FFS
function is userspace code so there is no way to know when it will close
files (it doesn't matter what is the reason of this situation, it can
be daemon logic, program breakage, process kill or any other). So when
we have another function in gadget which, for example, sends some amount
of data, does some software update or implements some real-time functionality,
we may want to keep the gadget connected despite FFS function is no longer
functional.
We can't just remove one of functions from gadget since it has been
enumerated, so the only way to keep entire gadget working is to make
broken FFS function deactivated but still visible to host. For this
purpose this patch introduces "no_disconnect" mode. It can be enabled
by setting mount option "no_disconnect=1", and results with defering
function disconnect to the moment of reopen ep0 file or filesystem
unmount. After closing all endpoint files, FunctionFS is set to state
FFS_DEACTIVATED.
When ffs->state == FFS_DEACTIVATED:
- function is still bound and visible to host,
- setup requests are automatically stalled,
- transfers on other endpoints are refused,
- epfiles, except ep0, are deleted from the filesystem,
- opening ep0 causes the function to be closed, and then FunctionFS
is ready for descriptors and string write,
- altsetting change causes the function to be closed - we want to keep
function alive until another functions are potentialy used, altsetting
change means that another configuration is being selected or USB cable
was unplugged, which indicates that we don't need to stay longer in
FFS_DEACTIVATED state
- unmounting of the FunctionFS instance causes the function to be closed.
Tested-by: David Cohen <david.a.cohen@linux.intel.com>
Acked-by: Michal Nazarewicz <mina86@mina86.com>
Signed-off-by: Robert Baldyga <r.baldyga@samsung.com>
Signed-off-by: Felipe Balbi <balbi@ti.com>
2014-12-18 16:55:10 +08:00
|
|
|
case 13:
|
|
|
|
if (!memcmp(opts, "no_disconnect", 13))
|
|
|
|
data->no_disconnect = !!value;
|
|
|
|
else
|
|
|
|
goto invalid;
|
|
|
|
break;
|
2010-05-05 18:53:14 +08:00
|
|
|
case 5:
|
|
|
|
if (!memcmp(opts, "rmode", 5))
|
|
|
|
data->root_mode = (value & 0555) | S_IFDIR;
|
|
|
|
else if (!memcmp(opts, "fmode", 5))
|
|
|
|
data->perms.mode = (value & 0666) | S_IFREG;
|
|
|
|
else
|
|
|
|
goto invalid;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case 4:
|
|
|
|
if (!memcmp(opts, "mode", 4)) {
|
|
|
|
data->root_mode = (value & 0555) | S_IFDIR;
|
|
|
|
data->perms.mode = (value & 0666) | S_IFREG;
|
|
|
|
} else {
|
|
|
|
goto invalid;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case 3:
|
2012-06-14 16:19:23 +08:00
|
|
|
if (!memcmp(opts, "uid", 3)) {
|
|
|
|
data->perms.uid = make_kuid(current_user_ns(), value);
|
|
|
|
if (!uid_valid(data->perms.uid)) {
|
|
|
|
pr_err("%s: unmapped value: %lu\n", opts, value);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
2013-01-09 11:57:09 +08:00
|
|
|
} else if (!memcmp(opts, "gid", 3)) {
|
2012-06-14 16:19:23 +08:00
|
|
|
data->perms.gid = make_kgid(current_user_ns(), value);
|
|
|
|
if (!gid_valid(data->perms.gid)) {
|
|
|
|
pr_err("%s: unmapped value: %lu\n", opts, value);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
2013-01-09 11:57:09 +08:00
|
|
|
} else {
|
2010-05-05 18:53:14 +08:00
|
|
|
goto invalid;
|
2013-01-09 11:57:09 +08:00
|
|
|
}
|
2010-05-05 18:53:14 +08:00
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
invalid:
|
2010-11-18 00:09:47 +08:00
|
|
|
pr_err("%s: invalid option\n", opts);
|
2010-05-05 18:53:14 +08:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Next iteration */
|
|
|
|
if (!comma)
|
|
|
|
break;
|
|
|
|
opts = comma + 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* "mount -t functionfs dev_name /dev/function" ends up here */
|
|
|
|
|
2010-07-25 05:48:30 +08:00
|
|
|
static struct dentry *
|
|
|
|
ffs_fs_mount(struct file_system_type *t, int flags,
|
|
|
|
const char *dev_name, void *opts)
|
2010-05-05 18:53:14 +08:00
|
|
|
{
|
|
|
|
struct ffs_sb_fill_data data = {
|
|
|
|
.perms = {
|
|
|
|
.mode = S_IFREG | 0600,
|
2012-06-14 16:19:23 +08:00
|
|
|
.uid = GLOBAL_ROOT_UID,
|
|
|
|
.gid = GLOBAL_ROOT_GID,
|
2010-05-05 18:53:14 +08:00
|
|
|
},
|
|
|
|
.root_mode = S_IFDIR | 0500,
|
usb: gadget: f_fs: add "no_disconnect" mode
Since we can compose gadgets from many functions, there is the problem
related to gadget breakage while FunctionFS daemon being closed. FFS
function is userspace code so there is no way to know when it will close
files (it doesn't matter what is the reason of this situation, it can
be daemon logic, program breakage, process kill or any other). So when
we have another function in gadget which, for example, sends some amount
of data, does some software update or implements some real-time functionality,
we may want to keep the gadget connected despite FFS function is no longer
functional.
We can't just remove one of functions from gadget since it has been
enumerated, so the only way to keep entire gadget working is to make
broken FFS function deactivated but still visible to host. For this
purpose this patch introduces "no_disconnect" mode. It can be enabled
by setting mount option "no_disconnect=1", and results with defering
function disconnect to the moment of reopen ep0 file or filesystem
unmount. After closing all endpoint files, FunctionFS is set to state
FFS_DEACTIVATED.
When ffs->state == FFS_DEACTIVATED:
- function is still bound and visible to host,
- setup requests are automatically stalled,
- transfers on other endpoints are refused,
- epfiles, except ep0, are deleted from the filesystem,
- opening ep0 causes the function to be closed, and then FunctionFS
is ready for descriptors and string write,
- altsetting change causes the function to be closed - we want to keep
function alive until another functions are potentialy used, altsetting
change means that another configuration is being selected or USB cable
was unplugged, which indicates that we don't need to stay longer in
FFS_DEACTIVATED state
- unmounting of the FunctionFS instance causes the function to be closed.
Tested-by: David Cohen <david.a.cohen@linux.intel.com>
Acked-by: Michal Nazarewicz <mina86@mina86.com>
Signed-off-by: Robert Baldyga <r.baldyga@samsung.com>
Signed-off-by: Felipe Balbi <balbi@ti.com>
2014-12-18 16:55:10 +08:00
|
|
|
.no_disconnect = false,
|
2010-05-05 18:53:14 +08:00
|
|
|
};
|
2012-05-14 21:51:52 +08:00
|
|
|
struct dentry *rv;
|
2010-05-05 18:53:14 +08:00
|
|
|
int ret;
|
2012-05-14 21:51:52 +08:00
|
|
|
void *ffs_dev;
|
USB: Fix breakage in ffs_fs_mount()
There's a bunch of failure exits in ffs_fs_mount() with
seriously broken recovery logics. Most of that appears to stem
from misunderstanding of the ->kill_sb() semantics; unlike
->put_super() it is called for *all* superblocks of given type,
no matter how (in)complete the setup had been. ->put_super()
is called only if ->s_root is not NULL; any failure prior to
setting ->s_root will have the call of ->put_super() skipped.
->kill_sb(), OTOH, awaits every superblock that has come from
sget().
Current behaviour of ffs_fs_mount():
We have struct ffs_sb_fill_data data on stack there. We do
ffs_dev = functionfs_acquire_dev_callback(dev_name);
and store that in data.private_data. Then we call mount_nodev(),
passing it ffs_sb_fill() as a callback. That will either fail
outright, or manage to call ffs_sb_fill(). There we allocate an
instance of struct ffs_data, slap the value of ffs_dev (picked
from data.private_data) into ffs->private_data and overwrite
data.private_data by storing ffs into an overlapping member
(data.ffs_data). Then we store ffs into sb->s_fs_info and attempt
to set the rest of the things up (root inode, root dentry, then
create /ep0 there). Any of those might fail. Should that
happen, we get ffs_fs_kill_sb() called before mount_nodev()
returns. If mount_nodev() fails for any reason whatsoever,
we proceed to
functionfs_release_dev_callback(data.ffs_data);
That's broken in a lot of ways. Suppose the thing has failed in
allocation of e.g. root inode or dentry. We have
functionfs_release_dev_callback(ffs);
ffs_data_put(ffs);
done by ffs_fs_kill_sb() (ffs accessed via sb->s_fs_info), followed by
functionfs_release_dev_callback(ffs);
from ffs_fs_mount() (via data.ffs_data). Note that the second
functionfs_release_dev_callback() has every chance to be done to freed memory.
Suppose we fail *before* root inode allocation. What happens then?
ffs_fs_kill_sb() doesn't do anything to ffs (it's either not called at all,
or it doesn't have a pointer to ffs stored in sb->s_fs_info). And
functionfs_release_dev_callback(data.ffs_data);
is called by ffs_fs_mount(), but here we are in nasal daemon country - we
are reading from a member of union we'd never stored into. In practice,
we'll get what we used to store into the overlapping field, i.e. ffs_dev.
And then we get screwed, since we treat it (struct gfs_ffs_obj * in
disguise, returned by functionfs_acquire_dev_callback()) as struct
ffs_data *, pick what would've been ffs_data ->private_data from it
(*well* past the actual end of the struct gfs_ffs_obj - struct ffs_data
is much bigger) and poke in whatever it points to.
FWIW, there's a minor leak on top of all that in case if ffs_sb_fill()
fails on kstrdup() - ffs is obviously forgotten.
The thing is, there is no point in playing all those games with union.
Just allocate and initialize ffs_data *before* calling mount_nodev() and
pass a pointer to it via data.ffs_data. And once it's stored in
sb->s_fs_info, clear data.ffs_data, so that ffs_fs_mount() knows that
it doesn't need to kill the sucker manually - from that point on
we'll have it done by ->kill_sb().
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Acked-by: Michal Nazarewicz <mina86@mina86.com>
Cc: stable <stable@vger.kernel.org> # 3.3+
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2013-09-21 00:14:21 +08:00
|
|
|
struct ffs_data *ffs;
|
2010-05-05 18:53:14 +08:00
|
|
|
|
|
|
|
ENTER();
|
|
|
|
|
|
|
|
ret = ffs_fs_parse_opts(&data, opts);
|
|
|
|
if (unlikely(ret < 0))
|
2010-07-25 05:48:30 +08:00
|
|
|
return ERR_PTR(ret);
|
2010-05-05 18:53:14 +08:00
|
|
|
|
USB: Fix breakage in ffs_fs_mount()
There's a bunch of failure exits in ffs_fs_mount() with
seriously broken recovery logics. Most of that appears to stem
from misunderstanding of the ->kill_sb() semantics; unlike
->put_super() it is called for *all* superblocks of given type,
no matter how (in)complete the setup had been. ->put_super()
is called only if ->s_root is not NULL; any failure prior to
setting ->s_root will have the call of ->put_super() skipped.
->kill_sb(), OTOH, awaits every superblock that has come from
sget().
Current behaviour of ffs_fs_mount():
We have struct ffs_sb_fill_data data on stack there. We do
ffs_dev = functionfs_acquire_dev_callback(dev_name);
and store that in data.private_data. Then we call mount_nodev(),
passing it ffs_sb_fill() as a callback. That will either fail
outright, or manage to call ffs_sb_fill(). There we allocate an
instance of struct ffs_data, slap the value of ffs_dev (picked
from data.private_data) into ffs->private_data and overwrite
data.private_data by storing ffs into an overlapping member
(data.ffs_data). Then we store ffs into sb->s_fs_info and attempt
to set the rest of the things up (root inode, root dentry, then
create /ep0 there). Any of those might fail. Should that
happen, we get ffs_fs_kill_sb() called before mount_nodev()
returns. If mount_nodev() fails for any reason whatsoever,
we proceed to
functionfs_release_dev_callback(data.ffs_data);
That's broken in a lot of ways. Suppose the thing has failed in
allocation of e.g. root inode or dentry. We have
functionfs_release_dev_callback(ffs);
ffs_data_put(ffs);
done by ffs_fs_kill_sb() (ffs accessed via sb->s_fs_info), followed by
functionfs_release_dev_callback(ffs);
from ffs_fs_mount() (via data.ffs_data). Note that the second
functionfs_release_dev_callback() has every chance to be done to freed memory.
Suppose we fail *before* root inode allocation. What happens then?
ffs_fs_kill_sb() doesn't do anything to ffs (it's either not called at all,
or it doesn't have a pointer to ffs stored in sb->s_fs_info). And
functionfs_release_dev_callback(data.ffs_data);
is called by ffs_fs_mount(), but here we are in nasal daemon country - we
are reading from a member of union we'd never stored into. In practice,
we'll get what we used to store into the overlapping field, i.e. ffs_dev.
And then we get screwed, since we treat it (struct gfs_ffs_obj * in
disguise, returned by functionfs_acquire_dev_callback()) as struct
ffs_data *, pick what would've been ffs_data ->private_data from it
(*well* past the actual end of the struct gfs_ffs_obj - struct ffs_data
is much bigger) and poke in whatever it points to.
FWIW, there's a minor leak on top of all that in case if ffs_sb_fill()
fails on kstrdup() - ffs is obviously forgotten.
The thing is, there is no point in playing all those games with union.
Just allocate and initialize ffs_data *before* calling mount_nodev() and
pass a pointer to it via data.ffs_data. And once it's stored in
sb->s_fs_info, clear data.ffs_data, so that ffs_fs_mount() knows that
it doesn't need to kill the sucker manually - from that point on
we'll have it done by ->kill_sb().
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Acked-by: Michal Nazarewicz <mina86@mina86.com>
Cc: stable <stable@vger.kernel.org> # 3.3+
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2013-09-21 00:14:21 +08:00
|
|
|
ffs = ffs_data_new();
|
|
|
|
if (unlikely(!ffs))
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
ffs->file_perms = data.perms;
|
usb: gadget: f_fs: add "no_disconnect" mode
Since we can compose gadgets from many functions, there is the problem
related to gadget breakage while FunctionFS daemon being closed. FFS
function is userspace code so there is no way to know when it will close
files (it doesn't matter what is the reason of this situation, it can
be daemon logic, program breakage, process kill or any other). So when
we have another function in gadget which, for example, sends some amount
of data, does some software update or implements some real-time functionality,
we may want to keep the gadget connected despite FFS function is no longer
functional.
We can't just remove one of functions from gadget since it has been
enumerated, so the only way to keep entire gadget working is to make
broken FFS function deactivated but still visible to host. For this
purpose this patch introduces "no_disconnect" mode. It can be enabled
by setting mount option "no_disconnect=1", and results with defering
function disconnect to the moment of reopen ep0 file or filesystem
unmount. After closing all endpoint files, FunctionFS is set to state
FFS_DEACTIVATED.
When ffs->state == FFS_DEACTIVATED:
- function is still bound and visible to host,
- setup requests are automatically stalled,
- transfers on other endpoints are refused,
- epfiles, except ep0, are deleted from the filesystem,
- opening ep0 causes the function to be closed, and then FunctionFS
is ready for descriptors and string write,
- altsetting change causes the function to be closed - we want to keep
function alive until another functions are potentialy used, altsetting
change means that another configuration is being selected or USB cable
was unplugged, which indicates that we don't need to stay longer in
FFS_DEACTIVATED state
- unmounting of the FunctionFS instance causes the function to be closed.
Tested-by: David Cohen <david.a.cohen@linux.intel.com>
Acked-by: Michal Nazarewicz <mina86@mina86.com>
Signed-off-by: Robert Baldyga <r.baldyga@samsung.com>
Signed-off-by: Felipe Balbi <balbi@ti.com>
2014-12-18 16:55:10 +08:00
|
|
|
ffs->no_disconnect = data.no_disconnect;
|
USB: Fix breakage in ffs_fs_mount()
There's a bunch of failure exits in ffs_fs_mount() with
seriously broken recovery logics. Most of that appears to stem
from misunderstanding of the ->kill_sb() semantics; unlike
->put_super() it is called for *all* superblocks of given type,
no matter how (in)complete the setup had been. ->put_super()
is called only if ->s_root is not NULL; any failure prior to
setting ->s_root will have the call of ->put_super() skipped.
->kill_sb(), OTOH, awaits every superblock that has come from
sget().
Current behaviour of ffs_fs_mount():
We have struct ffs_sb_fill_data data on stack there. We do
ffs_dev = functionfs_acquire_dev_callback(dev_name);
and store that in data.private_data. Then we call mount_nodev(),
passing it ffs_sb_fill() as a callback. That will either fail
outright, or manage to call ffs_sb_fill(). There we allocate an
instance of struct ffs_data, slap the value of ffs_dev (picked
from data.private_data) into ffs->private_data and overwrite
data.private_data by storing ffs into an overlapping member
(data.ffs_data). Then we store ffs into sb->s_fs_info and attempt
to set the rest of the things up (root inode, root dentry, then
create /ep0 there). Any of those might fail. Should that
happen, we get ffs_fs_kill_sb() called before mount_nodev()
returns. If mount_nodev() fails for any reason whatsoever,
we proceed to
functionfs_release_dev_callback(data.ffs_data);
That's broken in a lot of ways. Suppose the thing has failed in
allocation of e.g. root inode or dentry. We have
functionfs_release_dev_callback(ffs);
ffs_data_put(ffs);
done by ffs_fs_kill_sb() (ffs accessed via sb->s_fs_info), followed by
functionfs_release_dev_callback(ffs);
from ffs_fs_mount() (via data.ffs_data). Note that the second
functionfs_release_dev_callback() has every chance to be done to freed memory.
Suppose we fail *before* root inode allocation. What happens then?
ffs_fs_kill_sb() doesn't do anything to ffs (it's either not called at all,
or it doesn't have a pointer to ffs stored in sb->s_fs_info). And
functionfs_release_dev_callback(data.ffs_data);
is called by ffs_fs_mount(), but here we are in nasal daemon country - we
are reading from a member of union we'd never stored into. In practice,
we'll get what we used to store into the overlapping field, i.e. ffs_dev.
And then we get screwed, since we treat it (struct gfs_ffs_obj * in
disguise, returned by functionfs_acquire_dev_callback()) as struct
ffs_data *, pick what would've been ffs_data ->private_data from it
(*well* past the actual end of the struct gfs_ffs_obj - struct ffs_data
is much bigger) and poke in whatever it points to.
FWIW, there's a minor leak on top of all that in case if ffs_sb_fill()
fails on kstrdup() - ffs is obviously forgotten.
The thing is, there is no point in playing all those games with union.
Just allocate and initialize ffs_data *before* calling mount_nodev() and
pass a pointer to it via data.ffs_data. And once it's stored in
sb->s_fs_info, clear data.ffs_data, so that ffs_fs_mount() knows that
it doesn't need to kill the sucker manually - from that point on
we'll have it done by ->kill_sb().
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Acked-by: Michal Nazarewicz <mina86@mina86.com>
Cc: stable <stable@vger.kernel.org> # 3.3+
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2013-09-21 00:14:21 +08:00
|
|
|
|
|
|
|
ffs->dev_name = kstrdup(dev_name, GFP_KERNEL);
|
|
|
|
if (unlikely(!ffs->dev_name)) {
|
|
|
|
ffs_data_put(ffs);
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
}
|
|
|
|
|
2013-12-03 22:15:32 +08:00
|
|
|
ffs_dev = ffs_acquire_dev(dev_name);
|
USB: Fix breakage in ffs_fs_mount()
There's a bunch of failure exits in ffs_fs_mount() with
seriously broken recovery logics. Most of that appears to stem
from misunderstanding of the ->kill_sb() semantics; unlike
->put_super() it is called for *all* superblocks of given type,
no matter how (in)complete the setup had been. ->put_super()
is called only if ->s_root is not NULL; any failure prior to
setting ->s_root will have the call of ->put_super() skipped.
->kill_sb(), OTOH, awaits every superblock that has come from
sget().
Current behaviour of ffs_fs_mount():
We have struct ffs_sb_fill_data data on stack there. We do
ffs_dev = functionfs_acquire_dev_callback(dev_name);
and store that in data.private_data. Then we call mount_nodev(),
passing it ffs_sb_fill() as a callback. That will either fail
outright, or manage to call ffs_sb_fill(). There we allocate an
instance of struct ffs_data, slap the value of ffs_dev (picked
from data.private_data) into ffs->private_data and overwrite
data.private_data by storing ffs into an overlapping member
(data.ffs_data). Then we store ffs into sb->s_fs_info and attempt
to set the rest of the things up (root inode, root dentry, then
create /ep0 there). Any of those might fail. Should that
happen, we get ffs_fs_kill_sb() called before mount_nodev()
returns. If mount_nodev() fails for any reason whatsoever,
we proceed to
functionfs_release_dev_callback(data.ffs_data);
That's broken in a lot of ways. Suppose the thing has failed in
allocation of e.g. root inode or dentry. We have
functionfs_release_dev_callback(ffs);
ffs_data_put(ffs);
done by ffs_fs_kill_sb() (ffs accessed via sb->s_fs_info), followed by
functionfs_release_dev_callback(ffs);
from ffs_fs_mount() (via data.ffs_data). Note that the second
functionfs_release_dev_callback() has every chance to be done to freed memory.
Suppose we fail *before* root inode allocation. What happens then?
ffs_fs_kill_sb() doesn't do anything to ffs (it's either not called at all,
or it doesn't have a pointer to ffs stored in sb->s_fs_info). And
functionfs_release_dev_callback(data.ffs_data);
is called by ffs_fs_mount(), but here we are in nasal daemon country - we
are reading from a member of union we'd never stored into. In practice,
we'll get what we used to store into the overlapping field, i.e. ffs_dev.
And then we get screwed, since we treat it (struct gfs_ffs_obj * in
disguise, returned by functionfs_acquire_dev_callback()) as struct
ffs_data *, pick what would've been ffs_data ->private_data from it
(*well* past the actual end of the struct gfs_ffs_obj - struct ffs_data
is much bigger) and poke in whatever it points to.
FWIW, there's a minor leak on top of all that in case if ffs_sb_fill()
fails on kstrdup() - ffs is obviously forgotten.
The thing is, there is no point in playing all those games with union.
Just allocate and initialize ffs_data *before* calling mount_nodev() and
pass a pointer to it via data.ffs_data. And once it's stored in
sb->s_fs_info, clear data.ffs_data, so that ffs_fs_mount() knows that
it doesn't need to kill the sucker manually - from that point on
we'll have it done by ->kill_sb().
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Acked-by: Michal Nazarewicz <mina86@mina86.com>
Cc: stable <stable@vger.kernel.org> # 3.3+
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2013-09-21 00:14:21 +08:00
|
|
|
if (IS_ERR(ffs_dev)) {
|
|
|
|
ffs_data_put(ffs);
|
|
|
|
return ERR_CAST(ffs_dev);
|
|
|
|
}
|
|
|
|
ffs->private_data = ffs_dev;
|
|
|
|
data.ffs_data = ffs;
|
2012-05-14 21:51:52 +08:00
|
|
|
|
|
|
|
rv = mount_nodev(t, flags, &data, ffs_sb_fill);
|
USB: Fix breakage in ffs_fs_mount()
There's a bunch of failure exits in ffs_fs_mount() with
seriously broken recovery logics. Most of that appears to stem
from misunderstanding of the ->kill_sb() semantics; unlike
->put_super() it is called for *all* superblocks of given type,
no matter how (in)complete the setup had been. ->put_super()
is called only if ->s_root is not NULL; any failure prior to
setting ->s_root will have the call of ->put_super() skipped.
->kill_sb(), OTOH, awaits every superblock that has come from
sget().
Current behaviour of ffs_fs_mount():
We have struct ffs_sb_fill_data data on stack there. We do
ffs_dev = functionfs_acquire_dev_callback(dev_name);
and store that in data.private_data. Then we call mount_nodev(),
passing it ffs_sb_fill() as a callback. That will either fail
outright, or manage to call ffs_sb_fill(). There we allocate an
instance of struct ffs_data, slap the value of ffs_dev (picked
from data.private_data) into ffs->private_data and overwrite
data.private_data by storing ffs into an overlapping member
(data.ffs_data). Then we store ffs into sb->s_fs_info and attempt
to set the rest of the things up (root inode, root dentry, then
create /ep0 there). Any of those might fail. Should that
happen, we get ffs_fs_kill_sb() called before mount_nodev()
returns. If mount_nodev() fails for any reason whatsoever,
we proceed to
functionfs_release_dev_callback(data.ffs_data);
That's broken in a lot of ways. Suppose the thing has failed in
allocation of e.g. root inode or dentry. We have
functionfs_release_dev_callback(ffs);
ffs_data_put(ffs);
done by ffs_fs_kill_sb() (ffs accessed via sb->s_fs_info), followed by
functionfs_release_dev_callback(ffs);
from ffs_fs_mount() (via data.ffs_data). Note that the second
functionfs_release_dev_callback() has every chance to be done to freed memory.
Suppose we fail *before* root inode allocation. What happens then?
ffs_fs_kill_sb() doesn't do anything to ffs (it's either not called at all,
or it doesn't have a pointer to ffs stored in sb->s_fs_info). And
functionfs_release_dev_callback(data.ffs_data);
is called by ffs_fs_mount(), but here we are in nasal daemon country - we
are reading from a member of union we'd never stored into. In practice,
we'll get what we used to store into the overlapping field, i.e. ffs_dev.
And then we get screwed, since we treat it (struct gfs_ffs_obj * in
disguise, returned by functionfs_acquire_dev_callback()) as struct
ffs_data *, pick what would've been ffs_data ->private_data from it
(*well* past the actual end of the struct gfs_ffs_obj - struct ffs_data
is much bigger) and poke in whatever it points to.
FWIW, there's a minor leak on top of all that in case if ffs_sb_fill()
fails on kstrdup() - ffs is obviously forgotten.
The thing is, there is no point in playing all those games with union.
Just allocate and initialize ffs_data *before* calling mount_nodev() and
pass a pointer to it via data.ffs_data. And once it's stored in
sb->s_fs_info, clear data.ffs_data, so that ffs_fs_mount() knows that
it doesn't need to kill the sucker manually - from that point on
we'll have it done by ->kill_sb().
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Acked-by: Michal Nazarewicz <mina86@mina86.com>
Cc: stable <stable@vger.kernel.org> # 3.3+
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2013-09-21 00:14:21 +08:00
|
|
|
if (IS_ERR(rv) && data.ffs_data) {
|
2013-12-03 22:15:32 +08:00
|
|
|
ffs_release_dev(data.ffs_data);
|
USB: Fix breakage in ffs_fs_mount()
There's a bunch of failure exits in ffs_fs_mount() with
seriously broken recovery logics. Most of that appears to stem
from misunderstanding of the ->kill_sb() semantics; unlike
->put_super() it is called for *all* superblocks of given type,
no matter how (in)complete the setup had been. ->put_super()
is called only if ->s_root is not NULL; any failure prior to
setting ->s_root will have the call of ->put_super() skipped.
->kill_sb(), OTOH, awaits every superblock that has come from
sget().
Current behaviour of ffs_fs_mount():
We have struct ffs_sb_fill_data data on stack there. We do
ffs_dev = functionfs_acquire_dev_callback(dev_name);
and store that in data.private_data. Then we call mount_nodev(),
passing it ffs_sb_fill() as a callback. That will either fail
outright, or manage to call ffs_sb_fill(). There we allocate an
instance of struct ffs_data, slap the value of ffs_dev (picked
from data.private_data) into ffs->private_data and overwrite
data.private_data by storing ffs into an overlapping member
(data.ffs_data). Then we store ffs into sb->s_fs_info and attempt
to set the rest of the things up (root inode, root dentry, then
create /ep0 there). Any of those might fail. Should that
happen, we get ffs_fs_kill_sb() called before mount_nodev()
returns. If mount_nodev() fails for any reason whatsoever,
we proceed to
functionfs_release_dev_callback(data.ffs_data);
That's broken in a lot of ways. Suppose the thing has failed in
allocation of e.g. root inode or dentry. We have
functionfs_release_dev_callback(ffs);
ffs_data_put(ffs);
done by ffs_fs_kill_sb() (ffs accessed via sb->s_fs_info), followed by
functionfs_release_dev_callback(ffs);
from ffs_fs_mount() (via data.ffs_data). Note that the second
functionfs_release_dev_callback() has every chance to be done to freed memory.
Suppose we fail *before* root inode allocation. What happens then?
ffs_fs_kill_sb() doesn't do anything to ffs (it's either not called at all,
or it doesn't have a pointer to ffs stored in sb->s_fs_info). And
functionfs_release_dev_callback(data.ffs_data);
is called by ffs_fs_mount(), but here we are in nasal daemon country - we
are reading from a member of union we'd never stored into. In practice,
we'll get what we used to store into the overlapping field, i.e. ffs_dev.
And then we get screwed, since we treat it (struct gfs_ffs_obj * in
disguise, returned by functionfs_acquire_dev_callback()) as struct
ffs_data *, pick what would've been ffs_data ->private_data from it
(*well* past the actual end of the struct gfs_ffs_obj - struct ffs_data
is much bigger) and poke in whatever it points to.
FWIW, there's a minor leak on top of all that in case if ffs_sb_fill()
fails on kstrdup() - ffs is obviously forgotten.
The thing is, there is no point in playing all those games with union.
Just allocate and initialize ffs_data *before* calling mount_nodev() and
pass a pointer to it via data.ffs_data. And once it's stored in
sb->s_fs_info, clear data.ffs_data, so that ffs_fs_mount() knows that
it doesn't need to kill the sucker manually - from that point on
we'll have it done by ->kill_sb().
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Acked-by: Michal Nazarewicz <mina86@mina86.com>
Cc: stable <stable@vger.kernel.org> # 3.3+
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2013-09-21 00:14:21 +08:00
|
|
|
ffs_data_put(data.ffs_data);
|
|
|
|
}
|
2012-05-14 21:51:52 +08:00
|
|
|
return rv;
|
2010-05-05 18:53:14 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
ffs_fs_kill_sb(struct super_block *sb)
|
|
|
|
{
|
|
|
|
ENTER();
|
|
|
|
|
|
|
|
kill_litter_super(sb);
|
2012-05-14 21:51:52 +08:00
|
|
|
if (sb->s_fs_info) {
|
2013-12-03 22:15:32 +08:00
|
|
|
ffs_release_dev(sb->s_fs_info);
|
usb: gadget: f_fs: add "no_disconnect" mode
Since we can compose gadgets from many functions, there is the problem
related to gadget breakage while FunctionFS daemon being closed. FFS
function is userspace code so there is no way to know when it will close
files (it doesn't matter what is the reason of this situation, it can
be daemon logic, program breakage, process kill or any other). So when
we have another function in gadget which, for example, sends some amount
of data, does some software update or implements some real-time functionality,
we may want to keep the gadget connected despite FFS function is no longer
functional.
We can't just remove one of functions from gadget since it has been
enumerated, so the only way to keep entire gadget working is to make
broken FFS function deactivated but still visible to host. For this
purpose this patch introduces "no_disconnect" mode. It can be enabled
by setting mount option "no_disconnect=1", and results with defering
function disconnect to the moment of reopen ep0 file or filesystem
unmount. After closing all endpoint files, FunctionFS is set to state
FFS_DEACTIVATED.
When ffs->state == FFS_DEACTIVATED:
- function is still bound and visible to host,
- setup requests are automatically stalled,
- transfers on other endpoints are refused,
- epfiles, except ep0, are deleted from the filesystem,
- opening ep0 causes the function to be closed, and then FunctionFS
is ready for descriptors and string write,
- altsetting change causes the function to be closed - we want to keep
function alive until another functions are potentialy used, altsetting
change means that another configuration is being selected or USB cable
was unplugged, which indicates that we don't need to stay longer in
FFS_DEACTIVATED state
- unmounting of the FunctionFS instance causes the function to be closed.
Tested-by: David Cohen <david.a.cohen@linux.intel.com>
Acked-by: Michal Nazarewicz <mina86@mina86.com>
Signed-off-by: Robert Baldyga <r.baldyga@samsung.com>
Signed-off-by: Felipe Balbi <balbi@ti.com>
2014-12-18 16:55:10 +08:00
|
|
|
ffs_data_closed(sb->s_fs_info);
|
2012-01-09 04:38:27 +08:00
|
|
|
ffs_data_put(sb->s_fs_info);
|
2012-05-14 21:51:52 +08:00
|
|
|
}
|
2010-05-05 18:53:14 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static struct file_system_type ffs_fs_type = {
|
|
|
|
.owner = THIS_MODULE,
|
|
|
|
.name = "functionfs",
|
2010-07-25 05:48:30 +08:00
|
|
|
.mount = ffs_fs_mount,
|
2010-05-05 18:53:14 +08:00
|
|
|
.kill_sb = ffs_fs_kill_sb,
|
|
|
|
};
|
2013-03-03 11:39:14 +08:00
|
|
|
MODULE_ALIAS_FS("functionfs");
|
2010-05-05 18:53:14 +08:00
|
|
|
|
|
|
|
|
|
|
|
/* Driver's main init/cleanup functions *************************************/
|
|
|
|
|
|
|
|
static int functionfs_init(void)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ENTER();
|
|
|
|
|
|
|
|
ret = register_filesystem(&ffs_fs_type);
|
|
|
|
if (likely(!ret))
|
2010-11-18 00:09:47 +08:00
|
|
|
pr_info("file system registered\n");
|
2010-05-05 18:53:14 +08:00
|
|
|
else
|
2010-11-18 00:09:47 +08:00
|
|
|
pr_err("failed registering file system (%d)\n", ret);
|
2010-05-05 18:53:14 +08:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void functionfs_cleanup(void)
|
|
|
|
{
|
|
|
|
ENTER();
|
|
|
|
|
2010-11-18 00:09:47 +08:00
|
|
|
pr_info("unloading\n");
|
2010-05-05 18:53:14 +08:00
|
|
|
unregister_filesystem(&ffs_fs_type);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/* ffs_data and ffs_function construction and destruction code **************/
|
|
|
|
|
|
|
|
static void ffs_data_clear(struct ffs_data *ffs);
|
|
|
|
static void ffs_data_reset(struct ffs_data *ffs);
|
|
|
|
|
|
|
|
static void ffs_data_get(struct ffs_data *ffs)
|
|
|
|
{
|
|
|
|
ENTER();
|
|
|
|
|
|
|
|
atomic_inc(&ffs->ref);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void ffs_data_opened(struct ffs_data *ffs)
|
|
|
|
{
|
|
|
|
ENTER();
|
|
|
|
|
|
|
|
atomic_inc(&ffs->ref);
|
usb: gadget: f_fs: add "no_disconnect" mode
Since we can compose gadgets from many functions, there is the problem
related to gadget breakage while FunctionFS daemon being closed. FFS
function is userspace code so there is no way to know when it will close
files (it doesn't matter what is the reason of this situation, it can
be daemon logic, program breakage, process kill or any other). So when
we have another function in gadget which, for example, sends some amount
of data, does some software update or implements some real-time functionality,
we may want to keep the gadget connected despite FFS function is no longer
functional.
We can't just remove one of functions from gadget since it has been
enumerated, so the only way to keep entire gadget working is to make
broken FFS function deactivated but still visible to host. For this
purpose this patch introduces "no_disconnect" mode. It can be enabled
by setting mount option "no_disconnect=1", and results with defering
function disconnect to the moment of reopen ep0 file or filesystem
unmount. After closing all endpoint files, FunctionFS is set to state
FFS_DEACTIVATED.
When ffs->state == FFS_DEACTIVATED:
- function is still bound and visible to host,
- setup requests are automatically stalled,
- transfers on other endpoints are refused,
- epfiles, except ep0, are deleted from the filesystem,
- opening ep0 causes the function to be closed, and then FunctionFS
is ready for descriptors and string write,
- altsetting change causes the function to be closed - we want to keep
function alive until another functions are potentialy used, altsetting
change means that another configuration is being selected or USB cable
was unplugged, which indicates that we don't need to stay longer in
FFS_DEACTIVATED state
- unmounting of the FunctionFS instance causes the function to be closed.
Tested-by: David Cohen <david.a.cohen@linux.intel.com>
Acked-by: Michal Nazarewicz <mina86@mina86.com>
Signed-off-by: Robert Baldyga <r.baldyga@samsung.com>
Signed-off-by: Felipe Balbi <balbi@ti.com>
2014-12-18 16:55:10 +08:00
|
|
|
if (atomic_add_return(1, &ffs->opened) == 1 &&
|
|
|
|
ffs->state == FFS_DEACTIVATED) {
|
|
|
|
ffs->state = FFS_CLOSING;
|
|
|
|
ffs_data_reset(ffs);
|
|
|
|
}
|
2010-05-05 18:53:14 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void ffs_data_put(struct ffs_data *ffs)
|
|
|
|
{
|
|
|
|
ENTER();
|
|
|
|
|
|
|
|
if (unlikely(atomic_dec_and_test(&ffs->ref))) {
|
2010-11-18 00:09:47 +08:00
|
|
|
pr_info("%s(): freeing\n", __func__);
|
2010-05-05 18:53:14 +08:00
|
|
|
ffs_data_clear(ffs);
|
2012-03-17 03:01:02 +08:00
|
|
|
BUG_ON(waitqueue_active(&ffs->ev.waitq) ||
|
2010-05-05 18:53:14 +08:00
|
|
|
waitqueue_active(&ffs->ep0req_completion.wait));
|
2012-05-14 21:51:52 +08:00
|
|
|
kfree(ffs->dev_name);
|
2010-05-05 18:53:14 +08:00
|
|
|
kfree(ffs);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void ffs_data_closed(struct ffs_data *ffs)
|
|
|
|
{
|
|
|
|
ENTER();
|
|
|
|
|
|
|
|
if (atomic_dec_and_test(&ffs->opened)) {
|
usb: gadget: f_fs: add "no_disconnect" mode
Since we can compose gadgets from many functions, there is the problem
related to gadget breakage while FunctionFS daemon being closed. FFS
function is userspace code so there is no way to know when it will close
files (it doesn't matter what is the reason of this situation, it can
be daemon logic, program breakage, process kill or any other). So when
we have another function in gadget which, for example, sends some amount
of data, does some software update or implements some real-time functionality,
we may want to keep the gadget connected despite FFS function is no longer
functional.
We can't just remove one of functions from gadget since it has been
enumerated, so the only way to keep entire gadget working is to make
broken FFS function deactivated but still visible to host. For this
purpose this patch introduces "no_disconnect" mode. It can be enabled
by setting mount option "no_disconnect=1", and results with defering
function disconnect to the moment of reopen ep0 file or filesystem
unmount. After closing all endpoint files, FunctionFS is set to state
FFS_DEACTIVATED.
When ffs->state == FFS_DEACTIVATED:
- function is still bound and visible to host,
- setup requests are automatically stalled,
- transfers on other endpoints are refused,
- epfiles, except ep0, are deleted from the filesystem,
- opening ep0 causes the function to be closed, and then FunctionFS
is ready for descriptors and string write,
- altsetting change causes the function to be closed - we want to keep
function alive until another functions are potentialy used, altsetting
change means that another configuration is being selected or USB cable
was unplugged, which indicates that we don't need to stay longer in
FFS_DEACTIVATED state
- unmounting of the FunctionFS instance causes the function to be closed.
Tested-by: David Cohen <david.a.cohen@linux.intel.com>
Acked-by: Michal Nazarewicz <mina86@mina86.com>
Signed-off-by: Robert Baldyga <r.baldyga@samsung.com>
Signed-off-by: Felipe Balbi <balbi@ti.com>
2014-12-18 16:55:10 +08:00
|
|
|
if (ffs->no_disconnect) {
|
|
|
|
ffs->state = FFS_DEACTIVATED;
|
|
|
|
if (ffs->epfiles) {
|
|
|
|
ffs_epfiles_destroy(ffs->epfiles,
|
|
|
|
ffs->eps_count);
|
|
|
|
ffs->epfiles = NULL;
|
|
|
|
}
|
|
|
|
if (ffs->setup_state == FFS_SETUP_PENDING)
|
|
|
|
__ffs_ep0_stall(ffs);
|
|
|
|
} else {
|
|
|
|
ffs->state = FFS_CLOSING;
|
|
|
|
ffs_data_reset(ffs);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (atomic_read(&ffs->opened) < 0) {
|
2010-05-05 18:53:14 +08:00
|
|
|
ffs->state = FFS_CLOSING;
|
|
|
|
ffs_data_reset(ffs);
|
|
|
|
}
|
|
|
|
|
|
|
|
ffs_data_put(ffs);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct ffs_data *ffs_data_new(void)
|
|
|
|
{
|
|
|
|
struct ffs_data *ffs = kzalloc(sizeof *ffs, GFP_KERNEL);
|
|
|
|
if (unlikely(!ffs))
|
2013-12-13 02:15:43 +08:00
|
|
|
return NULL;
|
2010-05-05 18:53:14 +08:00
|
|
|
|
|
|
|
ENTER();
|
|
|
|
|
|
|
|
atomic_set(&ffs->ref, 1);
|
|
|
|
atomic_set(&ffs->opened, 0);
|
|
|
|
ffs->state = FFS_READ_DESCRIPTORS;
|
|
|
|
mutex_init(&ffs->mutex);
|
|
|
|
spin_lock_init(&ffs->eps_lock);
|
|
|
|
init_waitqueue_head(&ffs->ev.waitq);
|
|
|
|
init_completion(&ffs->ep0req_completion);
|
|
|
|
|
|
|
|
/* XXX REVISIT need to update it in some places, or do we? */
|
|
|
|
ffs->ev.can_stall = 1;
|
|
|
|
|
|
|
|
return ffs;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void ffs_data_clear(struct ffs_data *ffs)
|
|
|
|
{
|
|
|
|
ENTER();
|
|
|
|
|
2015-05-22 23:25:18 +08:00
|
|
|
ffs_closed(ffs);
|
2010-05-05 18:53:14 +08:00
|
|
|
|
|
|
|
BUG_ON(ffs->gadget);
|
|
|
|
|
|
|
|
if (ffs->epfiles)
|
|
|
|
ffs_epfiles_destroy(ffs->epfiles, ffs->eps_count);
|
|
|
|
|
2015-01-23 20:41:01 +08:00
|
|
|
if (ffs->ffs_eventfd)
|
|
|
|
eventfd_ctx_put(ffs->ffs_eventfd);
|
|
|
|
|
2014-02-28 19:20:23 +08:00
|
|
|
kfree(ffs->raw_descs_data);
|
2010-05-05 18:53:14 +08:00
|
|
|
kfree(ffs->raw_strings);
|
|
|
|
kfree(ffs->stringtabs);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void ffs_data_reset(struct ffs_data *ffs)
|
|
|
|
{
|
|
|
|
ENTER();
|
|
|
|
|
|
|
|
ffs_data_clear(ffs);
|
|
|
|
|
|
|
|
ffs->epfiles = NULL;
|
2014-02-28 19:20:23 +08:00
|
|
|
ffs->raw_descs_data = NULL;
|
2010-05-05 18:53:14 +08:00
|
|
|
ffs->raw_descs = NULL;
|
|
|
|
ffs->raw_strings = NULL;
|
|
|
|
ffs->stringtabs = NULL;
|
|
|
|
|
|
|
|
ffs->raw_descs_length = 0;
|
|
|
|
ffs->fs_descs_count = 0;
|
|
|
|
ffs->hs_descs_count = 0;
|
2014-02-28 19:20:22 +08:00
|
|
|
ffs->ss_descs_count = 0;
|
2010-05-05 18:53:14 +08:00
|
|
|
|
|
|
|
ffs->strings_count = 0;
|
|
|
|
ffs->interfaces_count = 0;
|
|
|
|
ffs->eps_count = 0;
|
|
|
|
|
|
|
|
ffs->ev.count = 0;
|
|
|
|
|
|
|
|
ffs->state = FFS_READ_DESCRIPTORS;
|
|
|
|
ffs->setup_state = FFS_NO_SETUP;
|
|
|
|
ffs->flags = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int functionfs_bind(struct ffs_data *ffs, struct usb_composite_dev *cdev)
|
|
|
|
{
|
2010-06-16 18:08:00 +08:00
|
|
|
struct usb_gadget_strings **lang;
|
|
|
|
int first_id;
|
2010-05-05 18:53:14 +08:00
|
|
|
|
|
|
|
ENTER();
|
|
|
|
|
|
|
|
if (WARN_ON(ffs->state != FFS_ACTIVE
|
|
|
|
|| test_and_set_bit(FFS_FL_BOUND, &ffs->flags)))
|
|
|
|
return -EBADFD;
|
|
|
|
|
2010-06-16 18:08:00 +08:00
|
|
|
first_id = usb_string_ids_n(cdev, ffs->strings_count);
|
|
|
|
if (unlikely(first_id < 0))
|
|
|
|
return first_id;
|
2010-05-05 18:53:14 +08:00
|
|
|
|
|
|
|
ffs->ep0req = usb_ep_alloc_request(cdev->gadget->ep0, GFP_KERNEL);
|
|
|
|
if (unlikely(!ffs->ep0req))
|
|
|
|
return -ENOMEM;
|
|
|
|
ffs->ep0req->complete = ffs_ep0_complete;
|
|
|
|
ffs->ep0req->context = ffs;
|
|
|
|
|
2010-06-16 18:08:00 +08:00
|
|
|
lang = ffs->stringtabs;
|
2014-06-17 23:47:41 +08:00
|
|
|
if (lang) {
|
|
|
|
for (; *lang; ++lang) {
|
|
|
|
struct usb_string *str = (*lang)->strings;
|
|
|
|
int id = first_id;
|
|
|
|
for (; str->s; ++id, ++str)
|
|
|
|
str->id = id;
|
|
|
|
}
|
2010-05-05 18:53:14 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
ffs->gadget = cdev->gadget;
|
2010-06-16 18:08:00 +08:00
|
|
|
ffs_data_get(ffs);
|
2010-05-05 18:53:14 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void functionfs_unbind(struct ffs_data *ffs)
|
|
|
|
{
|
|
|
|
ENTER();
|
|
|
|
|
|
|
|
if (!WARN_ON(!ffs->gadget)) {
|
|
|
|
usb_ep_free_request(ffs->gadget->ep0, ffs->ep0req);
|
|
|
|
ffs->ep0req = NULL;
|
|
|
|
ffs->gadget = NULL;
|
2012-03-12 19:55:41 +08:00
|
|
|
clear_bit(FFS_FL_BOUND, &ffs->flags);
|
2013-08-23 16:16:15 +08:00
|
|
|
ffs_data_put(ffs);
|
2010-05-05 18:53:14 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ffs_epfiles_create(struct ffs_data *ffs)
|
|
|
|
{
|
|
|
|
struct ffs_epfile *epfile, *epfiles;
|
|
|
|
unsigned i, count;
|
|
|
|
|
|
|
|
ENTER();
|
|
|
|
|
|
|
|
count = ffs->eps_count;
|
2011-11-30 05:08:00 +08:00
|
|
|
epfiles = kcalloc(count, sizeof(*epfiles), GFP_KERNEL);
|
2010-05-05 18:53:14 +08:00
|
|
|
if (!epfiles)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
epfile = epfiles;
|
|
|
|
for (i = 1; i <= count; ++i, ++epfile) {
|
|
|
|
epfile->ffs = ffs;
|
|
|
|
mutex_init(&epfile->mutex);
|
|
|
|
init_waitqueue_head(&epfile->wait);
|
2014-09-09 14:23:17 +08:00
|
|
|
if (ffs->user_flags & FUNCTIONFS_VIRTUAL_ADDR)
|
2015-01-27 03:40:21 +08:00
|
|
|
sprintf(epfile->name, "ep%02x", ffs->eps_addrmap[i]);
|
2014-09-09 14:23:17 +08:00
|
|
|
else
|
2015-01-27 03:40:21 +08:00
|
|
|
sprintf(epfile->name, "ep%u", i);
|
|
|
|
epfile->dentry = ffs_sb_create_file(ffs->sb, epfile->name,
|
2014-09-04 01:32:19 +08:00
|
|
|
epfile,
|
|
|
|
&ffs_epfile_operations);
|
|
|
|
if (unlikely(!epfile->dentry)) {
|
2010-05-05 18:53:14 +08:00
|
|
|
ffs_epfiles_destroy(epfiles, i - 1);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
ffs->epfiles = epfiles;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void ffs_epfiles_destroy(struct ffs_epfile *epfiles, unsigned count)
|
|
|
|
{
|
|
|
|
struct ffs_epfile *epfile = epfiles;
|
|
|
|
|
|
|
|
ENTER();
|
|
|
|
|
|
|
|
for (; count; --count, ++epfile) {
|
|
|
|
BUG_ON(mutex_is_locked(&epfile->mutex) ||
|
|
|
|
waitqueue_active(&epfile->wait));
|
|
|
|
if (epfile->dentry) {
|
|
|
|
d_delete(epfile->dentry);
|
|
|
|
dput(epfile->dentry);
|
|
|
|
epfile->dentry = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
kfree(epfiles);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void ffs_func_eps_disable(struct ffs_function *func)
|
|
|
|
{
|
|
|
|
struct ffs_ep *ep = func->eps;
|
|
|
|
struct ffs_epfile *epfile = func->ffs->epfiles;
|
|
|
|
unsigned count = func->ffs->eps_count;
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&func->ffs->eps_lock, flags);
|
|
|
|
do {
|
|
|
|
/* pending requests get nuked */
|
|
|
|
if (likely(ep->ep))
|
|
|
|
usb_ep_disable(ep->ep);
|
|
|
|
++ep;
|
usb: gadget: f_fs: add "no_disconnect" mode
Since we can compose gadgets from many functions, there is the problem
related to gadget breakage while FunctionFS daemon being closed. FFS
function is userspace code so there is no way to know when it will close
files (it doesn't matter what is the reason of this situation, it can
be daemon logic, program breakage, process kill or any other). So when
we have another function in gadget which, for example, sends some amount
of data, does some software update or implements some real-time functionality,
we may want to keep the gadget connected despite FFS function is no longer
functional.
We can't just remove one of functions from gadget since it has been
enumerated, so the only way to keep entire gadget working is to make
broken FFS function deactivated but still visible to host. For this
purpose this patch introduces "no_disconnect" mode. It can be enabled
by setting mount option "no_disconnect=1", and results with defering
function disconnect to the moment of reopen ep0 file or filesystem
unmount. After closing all endpoint files, FunctionFS is set to state
FFS_DEACTIVATED.
When ffs->state == FFS_DEACTIVATED:
- function is still bound and visible to host,
- setup requests are automatically stalled,
- transfers on other endpoints are refused,
- epfiles, except ep0, are deleted from the filesystem,
- opening ep0 causes the function to be closed, and then FunctionFS
is ready for descriptors and string write,
- altsetting change causes the function to be closed - we want to keep
function alive until another functions are potentialy used, altsetting
change means that another configuration is being selected or USB cable
was unplugged, which indicates that we don't need to stay longer in
FFS_DEACTIVATED state
- unmounting of the FunctionFS instance causes the function to be closed.
Tested-by: David Cohen <david.a.cohen@linux.intel.com>
Acked-by: Michal Nazarewicz <mina86@mina86.com>
Signed-off-by: Robert Baldyga <r.baldyga@samsung.com>
Signed-off-by: Felipe Balbi <balbi@ti.com>
2014-12-18 16:55:10 +08:00
|
|
|
|
|
|
|
if (epfile) {
|
|
|
|
epfile->ep = NULL;
|
|
|
|
++epfile;
|
|
|
|
}
|
2010-05-05 18:53:14 +08:00
|
|
|
} while (--count);
|
|
|
|
spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ffs_func_eps_enable(struct ffs_function *func)
|
|
|
|
{
|
|
|
|
struct ffs_data *ffs = func->ffs;
|
|
|
|
struct ffs_ep *ep = func->eps;
|
|
|
|
struct ffs_epfile *epfile = ffs->epfiles;
|
|
|
|
unsigned count = ffs->eps_count;
|
|
|
|
unsigned long flags;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&func->ffs->eps_lock, flags);
|
|
|
|
do {
|
|
|
|
struct usb_endpoint_descriptor *ds;
|
2014-02-28 19:20:22 +08:00
|
|
|
int desc_idx;
|
|
|
|
|
|
|
|
if (ffs->gadget->speed == USB_SPEED_SUPER)
|
|
|
|
desc_idx = 2;
|
|
|
|
else if (ffs->gadget->speed == USB_SPEED_HIGH)
|
|
|
|
desc_idx = 1;
|
|
|
|
else
|
|
|
|
desc_idx = 0;
|
|
|
|
|
|
|
|
/* fall-back to lower speed if desc missing for current speed */
|
|
|
|
do {
|
|
|
|
ds = ep->descs[desc_idx];
|
|
|
|
} while (!ds && --desc_idx >= 0);
|
|
|
|
|
|
|
|
if (!ds) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
2010-05-05 18:53:14 +08:00
|
|
|
|
|
|
|
ep->ep->driver_data = ep;
|
2011-06-28 21:33:48 +08:00
|
|
|
ep->ep->desc = ds;
|
|
|
|
ret = usb_ep_enable(ep->ep);
|
2010-05-05 18:53:14 +08:00
|
|
|
if (likely(!ret)) {
|
|
|
|
epfile->ep = ep;
|
|
|
|
epfile->in = usb_endpoint_dir_in(ds);
|
|
|
|
epfile->isoc = usb_endpoint_xfer_isoc(ds);
|
|
|
|
} else {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
wake_up(&epfile->wait);
|
|
|
|
|
|
|
|
++ep;
|
|
|
|
++epfile;
|
|
|
|
} while (--count);
|
|
|
|
spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/* Parsing and building descriptors and strings *****************************/
|
|
|
|
|
2010-11-12 21:29:28 +08:00
|
|
|
/*
|
|
|
|
* This validates if data pointed by data is a valid USB descriptor as
|
2010-05-05 18:53:14 +08:00
|
|
|
* well as record how many interfaces, endpoints and strings are
|
2010-11-12 21:29:28 +08:00
|
|
|
* required by given configuration. Returns address after the
|
|
|
|
* descriptor or NULL if data is invalid.
|
|
|
|
*/
|
2010-05-05 18:53:14 +08:00
|
|
|
|
|
|
|
enum ffs_entity_type {
|
|
|
|
FFS_DESCRIPTOR, FFS_INTERFACE, FFS_STRING, FFS_ENDPOINT
|
|
|
|
};
|
|
|
|
|
2014-07-09 18:20:08 +08:00
|
|
|
enum ffs_os_desc_type {
|
|
|
|
FFS_OS_DESC, FFS_OS_DESC_EXT_COMPAT, FFS_OS_DESC_EXT_PROP
|
|
|
|
};
|
|
|
|
|
2010-05-05 18:53:14 +08:00
|
|
|
typedef int (*ffs_entity_callback)(enum ffs_entity_type entity,
|
|
|
|
u8 *valuep,
|
|
|
|
struct usb_descriptor_header *desc,
|
|
|
|
void *priv);
|
|
|
|
|
2014-07-09 18:20:08 +08:00
|
|
|
typedef int (*ffs_os_desc_callback)(enum ffs_os_desc_type entity,
|
|
|
|
struct usb_os_desc_header *h, void *data,
|
|
|
|
unsigned len, void *priv);
|
|
|
|
|
2014-07-09 18:20:06 +08:00
|
|
|
static int __must_check ffs_do_single_desc(char *data, unsigned len,
|
|
|
|
ffs_entity_callback entity,
|
|
|
|
void *priv)
|
2010-05-05 18:53:14 +08:00
|
|
|
{
|
|
|
|
struct usb_descriptor_header *_ds = (void *)data;
|
|
|
|
u8 length;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ENTER();
|
|
|
|
|
|
|
|
/* At least two bytes are required: length and type */
|
|
|
|
if (len < 2) {
|
2010-11-18 00:09:47 +08:00
|
|
|
pr_vdebug("descriptor too short\n");
|
2010-05-05 18:53:14 +08:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If we have at least as many bytes as the descriptor takes? */
|
|
|
|
length = _ds->bLength;
|
|
|
|
if (len < length) {
|
2010-11-18 00:09:47 +08:00
|
|
|
pr_vdebug("descriptor longer then available data\n");
|
2010-05-05 18:53:14 +08:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
#define __entity_check_INTERFACE(val) 1
|
|
|
|
#define __entity_check_STRING(val) (val)
|
|
|
|
#define __entity_check_ENDPOINT(val) ((val) & USB_ENDPOINT_NUMBER_MASK)
|
|
|
|
#define __entity(type, val) do { \
|
2010-11-18 00:09:47 +08:00
|
|
|
pr_vdebug("entity " #type "(%02x)\n", (val)); \
|
2010-05-05 18:53:14 +08:00
|
|
|
if (unlikely(!__entity_check_ ##type(val))) { \
|
2010-11-18 00:09:47 +08:00
|
|
|
pr_vdebug("invalid entity's value\n"); \
|
2010-05-05 18:53:14 +08:00
|
|
|
return -EINVAL; \
|
|
|
|
} \
|
|
|
|
ret = entity(FFS_ ##type, &val, _ds, priv); \
|
|
|
|
if (unlikely(ret < 0)) { \
|
2010-11-18 00:09:47 +08:00
|
|
|
pr_debug("entity " #type "(%02x); ret = %d\n", \
|
2010-11-12 21:29:29 +08:00
|
|
|
(val), ret); \
|
2010-05-05 18:53:14 +08:00
|
|
|
return ret; \
|
|
|
|
} \
|
|
|
|
} while (0)
|
|
|
|
|
|
|
|
/* Parse descriptor depending on type. */
|
|
|
|
switch (_ds->bDescriptorType) {
|
|
|
|
case USB_DT_DEVICE:
|
|
|
|
case USB_DT_CONFIG:
|
|
|
|
case USB_DT_STRING:
|
|
|
|
case USB_DT_DEVICE_QUALIFIER:
|
|
|
|
/* function can't have any of those */
|
2010-11-18 00:09:47 +08:00
|
|
|
pr_vdebug("descriptor reserved for gadget: %d\n",
|
2010-11-12 21:29:28 +08:00
|
|
|
_ds->bDescriptorType);
|
2010-05-05 18:53:14 +08:00
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
case USB_DT_INTERFACE: {
|
|
|
|
struct usb_interface_descriptor *ds = (void *)_ds;
|
2010-11-18 00:09:47 +08:00
|
|
|
pr_vdebug("interface descriptor\n");
|
2010-05-05 18:53:14 +08:00
|
|
|
if (length != sizeof *ds)
|
|
|
|
goto inv_length;
|
|
|
|
|
|
|
|
__entity(INTERFACE, ds->bInterfaceNumber);
|
|
|
|
if (ds->iInterface)
|
|
|
|
__entity(STRING, ds->iInterface);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case USB_DT_ENDPOINT: {
|
|
|
|
struct usb_endpoint_descriptor *ds = (void *)_ds;
|
2010-11-18 00:09:47 +08:00
|
|
|
pr_vdebug("endpoint descriptor\n");
|
2010-05-05 18:53:14 +08:00
|
|
|
if (length != USB_DT_ENDPOINT_SIZE &&
|
|
|
|
length != USB_DT_ENDPOINT_AUDIO_SIZE)
|
|
|
|
goto inv_length;
|
|
|
|
__entity(ENDPOINT, ds->bEndpointAddress);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
2012-05-31 02:43:37 +08:00
|
|
|
case HID_DT_HID:
|
|
|
|
pr_vdebug("hid descriptor\n");
|
|
|
|
if (length != sizeof(struct hid_descriptor))
|
|
|
|
goto inv_length;
|
|
|
|
break;
|
|
|
|
|
2010-05-05 18:53:14 +08:00
|
|
|
case USB_DT_OTG:
|
|
|
|
if (length != sizeof(struct usb_otg_descriptor))
|
|
|
|
goto inv_length;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case USB_DT_INTERFACE_ASSOCIATION: {
|
|
|
|
struct usb_interface_assoc_descriptor *ds = (void *)_ds;
|
2010-11-18 00:09:47 +08:00
|
|
|
pr_vdebug("interface association descriptor\n");
|
2010-05-05 18:53:14 +08:00
|
|
|
if (length != sizeof *ds)
|
|
|
|
goto inv_length;
|
|
|
|
if (ds->iFunction)
|
|
|
|
__entity(STRING, ds->iFunction);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
2014-02-28 19:20:22 +08:00
|
|
|
case USB_DT_SS_ENDPOINT_COMP:
|
|
|
|
pr_vdebug("EP SS companion descriptor\n");
|
|
|
|
if (length != sizeof(struct usb_ss_ep_comp_descriptor))
|
|
|
|
goto inv_length;
|
|
|
|
break;
|
|
|
|
|
2010-05-05 18:53:14 +08:00
|
|
|
case USB_DT_OTHER_SPEED_CONFIG:
|
|
|
|
case USB_DT_INTERFACE_POWER:
|
|
|
|
case USB_DT_DEBUG:
|
|
|
|
case USB_DT_SECURITY:
|
|
|
|
case USB_DT_CS_RADIO_CONTROL:
|
|
|
|
/* TODO */
|
2010-11-18 00:09:47 +08:00
|
|
|
pr_vdebug("unimplemented descriptor: %d\n", _ds->bDescriptorType);
|
2010-05-05 18:53:14 +08:00
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
default:
|
|
|
|
/* We should never be here */
|
2010-11-18 00:09:47 +08:00
|
|
|
pr_vdebug("unknown descriptor: %d\n", _ds->bDescriptorType);
|
2010-05-05 18:53:14 +08:00
|
|
|
return -EINVAL;
|
|
|
|
|
2010-11-12 21:29:28 +08:00
|
|
|
inv_length:
|
2010-11-18 00:09:47 +08:00
|
|
|
pr_vdebug("invalid length: %d (descriptor %d)\n",
|
2010-11-12 21:29:29 +08:00
|
|
|
_ds->bLength, _ds->bDescriptorType);
|
2010-05-05 18:53:14 +08:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
#undef __entity
|
|
|
|
#undef __entity_check_DESCRIPTOR
|
|
|
|
#undef __entity_check_INTERFACE
|
|
|
|
#undef __entity_check_STRING
|
|
|
|
#undef __entity_check_ENDPOINT
|
|
|
|
|
|
|
|
return length;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int __must_check ffs_do_descs(unsigned count, char *data, unsigned len,
|
|
|
|
ffs_entity_callback entity, void *priv)
|
|
|
|
{
|
|
|
|
const unsigned _len = len;
|
|
|
|
unsigned long num = 0;
|
|
|
|
|
|
|
|
ENTER();
|
|
|
|
|
|
|
|
for (;;) {
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (num == count)
|
|
|
|
data = NULL;
|
|
|
|
|
2010-11-12 21:29:28 +08:00
|
|
|
/* Record "descriptor" entity */
|
2010-05-05 18:53:14 +08:00
|
|
|
ret = entity(FFS_DESCRIPTOR, (u8 *)num, (void *)data, priv);
|
|
|
|
if (unlikely(ret < 0)) {
|
2010-11-18 00:09:47 +08:00
|
|
|
pr_debug("entity DESCRIPTOR(%02lx); ret = %d\n",
|
2010-11-12 21:29:29 +08:00
|
|
|
num, ret);
|
2010-05-05 18:53:14 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!data)
|
|
|
|
return _len - len;
|
|
|
|
|
2014-07-09 18:20:06 +08:00
|
|
|
ret = ffs_do_single_desc(data, len, entity, priv);
|
2010-05-05 18:53:14 +08:00
|
|
|
if (unlikely(ret < 0)) {
|
2010-11-18 00:09:47 +08:00
|
|
|
pr_debug("%s returns %d\n", __func__, ret);
|
2010-05-05 18:53:14 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
len -= ret;
|
|
|
|
data += ret;
|
|
|
|
++num;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int __ffs_data_do_entity(enum ffs_entity_type type,
|
|
|
|
u8 *valuep, struct usb_descriptor_header *desc,
|
|
|
|
void *priv)
|
|
|
|
{
|
2014-08-25 17:16:27 +08:00
|
|
|
struct ffs_desc_helper *helper = priv;
|
|
|
|
struct usb_endpoint_descriptor *d;
|
2010-05-05 18:53:14 +08:00
|
|
|
|
|
|
|
ENTER();
|
|
|
|
|
|
|
|
switch (type) {
|
|
|
|
case FFS_DESCRIPTOR:
|
|
|
|
break;
|
|
|
|
|
|
|
|
case FFS_INTERFACE:
|
2010-11-12 21:29:28 +08:00
|
|
|
/*
|
|
|
|
* Interfaces are indexed from zero so if we
|
2010-05-05 18:53:14 +08:00
|
|
|
* encountered interface "n" then there are at least
|
2010-11-12 21:29:28 +08:00
|
|
|
* "n+1" interfaces.
|
|
|
|
*/
|
2014-08-25 17:16:27 +08:00
|
|
|
if (*valuep >= helper->interfaces_count)
|
|
|
|
helper->interfaces_count = *valuep + 1;
|
2010-05-05 18:53:14 +08:00
|
|
|
break;
|
|
|
|
|
|
|
|
case FFS_STRING:
|
2010-11-12 21:29:28 +08:00
|
|
|
/*
|
|
|
|
* Strings are indexed from 1 (0 is magic ;) reserved
|
|
|
|
* for languages list or some such)
|
|
|
|
*/
|
2014-08-25 17:16:27 +08:00
|
|
|
if (*valuep > helper->ffs->strings_count)
|
|
|
|
helper->ffs->strings_count = *valuep;
|
2010-05-05 18:53:14 +08:00
|
|
|
break;
|
|
|
|
|
|
|
|
case FFS_ENDPOINT:
|
2014-08-25 17:16:27 +08:00
|
|
|
d = (void *)desc;
|
|
|
|
helper->eps_count++;
|
|
|
|
if (helper->eps_count >= 15)
|
|
|
|
return -EINVAL;
|
|
|
|
/* Check if descriptors for any speed were already parsed */
|
|
|
|
if (!helper->ffs->eps_count && !helper->ffs->interfaces_count)
|
|
|
|
helper->ffs->eps_addrmap[helper->eps_count] =
|
|
|
|
d->bEndpointAddress;
|
|
|
|
else if (helper->ffs->eps_addrmap[helper->eps_count] !=
|
|
|
|
d->bEndpointAddress)
|
|
|
|
return -EINVAL;
|
2010-05-05 18:53:14 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-07-09 18:20:08 +08:00
|
|
|
static int __ffs_do_os_desc_header(enum ffs_os_desc_type *next_type,
|
|
|
|
struct usb_os_desc_header *desc)
|
|
|
|
{
|
|
|
|
u16 bcd_version = le16_to_cpu(desc->bcdVersion);
|
|
|
|
u16 w_index = le16_to_cpu(desc->wIndex);
|
|
|
|
|
|
|
|
if (bcd_version != 1) {
|
|
|
|
pr_vdebug("unsupported os descriptors version: %d",
|
|
|
|
bcd_version);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
switch (w_index) {
|
|
|
|
case 0x4:
|
|
|
|
*next_type = FFS_OS_DESC_EXT_COMPAT;
|
|
|
|
break;
|
|
|
|
case 0x5:
|
|
|
|
*next_type = FFS_OS_DESC_EXT_PROP;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
pr_vdebug("unsupported os descriptor type: %d", w_index);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return sizeof(*desc);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Process all extended compatibility/extended property descriptors
|
|
|
|
* of a feature descriptor
|
|
|
|
*/
|
|
|
|
static int __must_check ffs_do_single_os_desc(char *data, unsigned len,
|
|
|
|
enum ffs_os_desc_type type,
|
|
|
|
u16 feature_count,
|
|
|
|
ffs_os_desc_callback entity,
|
|
|
|
void *priv,
|
|
|
|
struct usb_os_desc_header *h)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
const unsigned _len = len;
|
|
|
|
|
|
|
|
ENTER();
|
|
|
|
|
|
|
|
/* loop over all ext compat/ext prop descriptors */
|
|
|
|
while (feature_count--) {
|
|
|
|
ret = entity(type, h, data, len, priv);
|
|
|
|
if (unlikely(ret < 0)) {
|
|
|
|
pr_debug("bad OS descriptor, type: %d\n", type);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
data += ret;
|
|
|
|
len -= ret;
|
|
|
|
}
|
|
|
|
return _len - len;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Process a number of complete Feature Descriptors (Ext Compat or Ext Prop) */
|
|
|
|
static int __must_check ffs_do_os_descs(unsigned count,
|
|
|
|
char *data, unsigned len,
|
|
|
|
ffs_os_desc_callback entity, void *priv)
|
|
|
|
{
|
|
|
|
const unsigned _len = len;
|
|
|
|
unsigned long num = 0;
|
|
|
|
|
|
|
|
ENTER();
|
|
|
|
|
|
|
|
for (num = 0; num < count; ++num) {
|
|
|
|
int ret;
|
|
|
|
enum ffs_os_desc_type type;
|
|
|
|
u16 feature_count;
|
|
|
|
struct usb_os_desc_header *desc = (void *)data;
|
|
|
|
|
|
|
|
if (len < sizeof(*desc))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Record "descriptor" entity.
|
|
|
|
* Process dwLength, bcdVersion, wIndex, get b/wCount.
|
|
|
|
* Move the data pointer to the beginning of extended
|
|
|
|
* compatibilities proper or extended properties proper
|
|
|
|
* portions of the data
|
|
|
|
*/
|
|
|
|
if (le32_to_cpu(desc->dwLength) > len)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
ret = __ffs_do_os_desc_header(&type, desc);
|
|
|
|
if (unlikely(ret < 0)) {
|
|
|
|
pr_debug("entity OS_DESCRIPTOR(%02lx); ret = %d\n",
|
|
|
|
num, ret);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* 16-bit hex "?? 00" Little Endian looks like 8-bit hex "??"
|
|
|
|
*/
|
|
|
|
feature_count = le16_to_cpu(desc->wCount);
|
|
|
|
if (type == FFS_OS_DESC_EXT_COMPAT &&
|
|
|
|
(feature_count > 255 || desc->Reserved))
|
|
|
|
return -EINVAL;
|
|
|
|
len -= ret;
|
|
|
|
data += ret;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Process all function/property descriptors
|
|
|
|
* of this Feature Descriptor
|
|
|
|
*/
|
|
|
|
ret = ffs_do_single_os_desc(data, len, type,
|
|
|
|
feature_count, entity, priv, desc);
|
|
|
|
if (unlikely(ret < 0)) {
|
|
|
|
pr_debug("%s returns %d\n", __func__, ret);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
len -= ret;
|
|
|
|
data += ret;
|
|
|
|
}
|
|
|
|
return _len - len;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Validate contents of the buffer from userspace related to OS descriptors.
|
|
|
|
*/
|
|
|
|
static int __ffs_data_do_os_desc(enum ffs_os_desc_type type,
|
|
|
|
struct usb_os_desc_header *h, void *data,
|
|
|
|
unsigned len, void *priv)
|
|
|
|
{
|
|
|
|
struct ffs_data *ffs = priv;
|
|
|
|
u8 length;
|
|
|
|
|
|
|
|
ENTER();
|
|
|
|
|
|
|
|
switch (type) {
|
|
|
|
case FFS_OS_DESC_EXT_COMPAT: {
|
|
|
|
struct usb_ext_compat_desc *d = data;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (len < sizeof(*d) ||
|
|
|
|
d->bFirstInterfaceNumber >= ffs->interfaces_count ||
|
2016-05-20 18:13:19 +08:00
|
|
|
!d->Reserved1)
|
2014-07-09 18:20:08 +08:00
|
|
|
return -EINVAL;
|
|
|
|
for (i = 0; i < ARRAY_SIZE(d->Reserved2); ++i)
|
|
|
|
if (d->Reserved2[i])
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
length = sizeof(struct usb_ext_compat_desc);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case FFS_OS_DESC_EXT_PROP: {
|
|
|
|
struct usb_ext_prop_desc *d = data;
|
|
|
|
u32 type, pdl;
|
|
|
|
u16 pnl;
|
|
|
|
|
|
|
|
if (len < sizeof(*d) || h->interface >= ffs->interfaces_count)
|
|
|
|
return -EINVAL;
|
|
|
|
length = le32_to_cpu(d->dwSize);
|
|
|
|
type = le32_to_cpu(d->dwPropertyDataType);
|
|
|
|
if (type < USB_EXT_PROP_UNICODE ||
|
|
|
|
type > USB_EXT_PROP_UNICODE_MULTI) {
|
|
|
|
pr_vdebug("unsupported os descriptor property type: %d",
|
|
|
|
type);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
pnl = le16_to_cpu(d->wPropertyNameLength);
|
|
|
|
pdl = le32_to_cpu(*(u32 *)((u8 *)data + 10 + pnl));
|
|
|
|
if (length != 14 + pnl + pdl) {
|
|
|
|
pr_vdebug("invalid os descriptor length: %d pnl:%d pdl:%d (descriptor %d)\n",
|
|
|
|
length, pnl, pdl, type);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
++ffs->ms_os_descs_ext_prop_count;
|
|
|
|
/* property name reported to the host as "WCHAR"s */
|
|
|
|
ffs->ms_os_descs_ext_prop_name_len += pnl * 2;
|
|
|
|
ffs->ms_os_descs_ext_prop_data_len += pdl;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
pr_vdebug("unknown descriptor: %d\n", type);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
return length;
|
|
|
|
}
|
|
|
|
|
2010-05-05 18:53:14 +08:00
|
|
|
static int __ffs_data_got_descs(struct ffs_data *ffs,
|
|
|
|
char *const _data, size_t len)
|
|
|
|
{
|
2014-02-28 19:20:23 +08:00
|
|
|
char *data = _data, *raw_descs;
|
2014-07-09 18:20:08 +08:00
|
|
|
unsigned os_descs_count = 0, counts[3], flags;
|
2014-02-28 19:20:23 +08:00
|
|
|
int ret = -EINVAL, i;
|
2014-08-25 17:16:27 +08:00
|
|
|
struct ffs_desc_helper helper;
|
2010-05-05 18:53:14 +08:00
|
|
|
|
|
|
|
ENTER();
|
|
|
|
|
2014-02-28 19:20:23 +08:00
|
|
|
if (get_unaligned_le32(data + 4) != len)
|
2010-05-05 18:53:14 +08:00
|
|
|
goto error;
|
|
|
|
|
2014-02-28 19:20:23 +08:00
|
|
|
switch (get_unaligned_le32(data)) {
|
|
|
|
case FUNCTIONFS_DESCRIPTORS_MAGIC:
|
|
|
|
flags = FUNCTIONFS_HAS_FS_DESC | FUNCTIONFS_HAS_HS_DESC;
|
|
|
|
data += 8;
|
|
|
|
len -= 8;
|
|
|
|
break;
|
|
|
|
case FUNCTIONFS_DESCRIPTORS_MAGIC_V2:
|
|
|
|
flags = get_unaligned_le32(data + 8);
|
2014-09-09 14:23:17 +08:00
|
|
|
ffs->user_flags = flags;
|
2014-02-28 19:20:23 +08:00
|
|
|
if (flags & ~(FUNCTIONFS_HAS_FS_DESC |
|
|
|
|
FUNCTIONFS_HAS_HS_DESC |
|
2014-07-09 18:20:08 +08:00
|
|
|
FUNCTIONFS_HAS_SS_DESC |
|
2014-09-09 14:23:17 +08:00
|
|
|
FUNCTIONFS_HAS_MS_OS_DESC |
|
2015-01-23 20:41:01 +08:00
|
|
|
FUNCTIONFS_VIRTUAL_ADDR |
|
|
|
|
FUNCTIONFS_EVENTFD)) {
|
2014-02-28 19:20:23 +08:00
|
|
|
ret = -ENOSYS;
|
2010-05-05 18:53:14 +08:00
|
|
|
goto error;
|
|
|
|
}
|
2014-02-28 19:20:23 +08:00
|
|
|
data += 12;
|
|
|
|
len -= 12;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
goto error;
|
2010-05-05 18:53:14 +08:00
|
|
|
}
|
|
|
|
|
2015-01-23 20:41:01 +08:00
|
|
|
if (flags & FUNCTIONFS_EVENTFD) {
|
|
|
|
if (len < 4)
|
|
|
|
goto error;
|
|
|
|
ffs->ffs_eventfd =
|
|
|
|
eventfd_ctx_fdget((int)get_unaligned_le32(data));
|
|
|
|
if (IS_ERR(ffs->ffs_eventfd)) {
|
|
|
|
ret = PTR_ERR(ffs->ffs_eventfd);
|
|
|
|
ffs->ffs_eventfd = NULL;
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
data += 4;
|
|
|
|
len -= 4;
|
|
|
|
}
|
|
|
|
|
2014-02-28 19:20:23 +08:00
|
|
|
/* Read fs_count, hs_count and ss_count (if present) */
|
|
|
|
for (i = 0; i < 3; ++i) {
|
|
|
|
if (!(flags & (1 << i))) {
|
|
|
|
counts[i] = 0;
|
|
|
|
} else if (len < 4) {
|
2014-02-28 19:20:22 +08:00
|
|
|
goto error;
|
2014-02-28 19:20:23 +08:00
|
|
|
} else {
|
|
|
|
counts[i] = get_unaligned_le32(data);
|
|
|
|
data += 4;
|
|
|
|
len -= 4;
|
2014-02-28 19:20:22 +08:00
|
|
|
}
|
2010-05-05 18:53:14 +08:00
|
|
|
}
|
2014-07-09 18:20:08 +08:00
|
|
|
if (flags & (1 << i)) {
|
|
|
|
os_descs_count = get_unaligned_le32(data);
|
|
|
|
data += 4;
|
|
|
|
len -= 4;
|
|
|
|
};
|
2010-05-05 18:53:14 +08:00
|
|
|
|
2014-02-28 19:20:23 +08:00
|
|
|
/* Read descriptors */
|
|
|
|
raw_descs = data;
|
2014-08-25 17:16:27 +08:00
|
|
|
helper.ffs = ffs;
|
2014-02-28 19:20:23 +08:00
|
|
|
for (i = 0; i < 3; ++i) {
|
|
|
|
if (!counts[i])
|
|
|
|
continue;
|
2014-08-25 17:16:27 +08:00
|
|
|
helper.interfaces_count = 0;
|
|
|
|
helper.eps_count = 0;
|
2014-02-28 19:20:23 +08:00
|
|
|
ret = ffs_do_descs(counts[i], data, len,
|
2014-08-25 17:16:27 +08:00
|
|
|
__ffs_data_do_entity, &helper);
|
2014-02-28 19:20:23 +08:00
|
|
|
if (ret < 0)
|
2010-05-05 18:53:14 +08:00
|
|
|
goto error;
|
2014-08-25 17:16:27 +08:00
|
|
|
if (!ffs->eps_count && !ffs->interfaces_count) {
|
|
|
|
ffs->eps_count = helper.eps_count;
|
|
|
|
ffs->interfaces_count = helper.interfaces_count;
|
|
|
|
} else {
|
|
|
|
if (ffs->eps_count != helper.eps_count) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
if (ffs->interfaces_count != helper.interfaces_count) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
}
|
2014-02-28 19:20:23 +08:00
|
|
|
data += ret;
|
|
|
|
len -= ret;
|
2010-05-05 18:53:14 +08:00
|
|
|
}
|
2014-07-09 18:20:08 +08:00
|
|
|
if (os_descs_count) {
|
|
|
|
ret = ffs_do_os_descs(os_descs_count, data, len,
|
|
|
|
__ffs_data_do_os_desc, ffs);
|
|
|
|
if (ret < 0)
|
|
|
|
goto error;
|
|
|
|
data += ret;
|
|
|
|
len -= ret;
|
|
|
|
}
|
2010-05-05 18:53:14 +08:00
|
|
|
|
2014-02-28 19:20:23 +08:00
|
|
|
if (raw_descs == data || len) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto error;
|
|
|
|
}
|
2010-05-05 18:53:14 +08:00
|
|
|
|
2014-02-28 19:20:23 +08:00
|
|
|
ffs->raw_descs_data = _data;
|
|
|
|
ffs->raw_descs = raw_descs;
|
|
|
|
ffs->raw_descs_length = data - raw_descs;
|
|
|
|
ffs->fs_descs_count = counts[0];
|
|
|
|
ffs->hs_descs_count = counts[1];
|
|
|
|
ffs->ss_descs_count = counts[2];
|
2014-07-09 18:20:08 +08:00
|
|
|
ffs->ms_os_descs_count = os_descs_count;
|
2010-05-05 18:53:14 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
error:
|
|
|
|
kfree(_data);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int __ffs_data_got_strings(struct ffs_data *ffs,
|
|
|
|
char *const _data, size_t len)
|
|
|
|
{
|
|
|
|
u32 str_count, needed_count, lang_count;
|
|
|
|
struct usb_gadget_strings **stringtabs, *t;
|
|
|
|
const char *data = _data;
|
2016-05-31 20:17:21 +08:00
|
|
|
struct usb_string *s;
|
2010-05-05 18:53:14 +08:00
|
|
|
|
|
|
|
ENTER();
|
|
|
|
|
|
|
|
if (unlikely(get_unaligned_le32(data) != FUNCTIONFS_STRINGS_MAGIC ||
|
|
|
|
get_unaligned_le32(data + 4) != len))
|
|
|
|
goto error;
|
|
|
|
str_count = get_unaligned_le32(data + 8);
|
|
|
|
lang_count = get_unaligned_le32(data + 12);
|
|
|
|
|
|
|
|
/* if one is zero the other must be zero */
|
|
|
|
if (unlikely(!str_count != !lang_count))
|
|
|
|
goto error;
|
|
|
|
|
|
|
|
/* Do we have at least as many strings as descriptors need? */
|
|
|
|
needed_count = ffs->strings_count;
|
|
|
|
if (unlikely(str_count < needed_count))
|
|
|
|
goto error;
|
|
|
|
|
2010-11-12 21:29:28 +08:00
|
|
|
/*
|
|
|
|
* If we don't need any strings just return and free all
|
|
|
|
* memory.
|
|
|
|
*/
|
2010-05-05 18:53:14 +08:00
|
|
|
if (!needed_count) {
|
|
|
|
kfree(_data);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2010-11-12 21:29:28 +08:00
|
|
|
/* Allocate everything in one chunk so there's less maintenance. */
|
2010-05-05 18:53:14 +08:00
|
|
|
{
|
|
|
|
unsigned i = 0;
|
2013-12-03 22:15:30 +08:00
|
|
|
vla_group(d);
|
|
|
|
vla_item(d, struct usb_gadget_strings *, stringtabs,
|
|
|
|
lang_count + 1);
|
|
|
|
vla_item(d, struct usb_gadget_strings, stringtab, lang_count);
|
|
|
|
vla_item(d, struct usb_string, strings,
|
|
|
|
lang_count*(needed_count+1));
|
2010-05-05 18:53:14 +08:00
|
|
|
|
2013-12-03 22:15:30 +08:00
|
|
|
char *vlabuf = kmalloc(vla_group_size(d), GFP_KERNEL);
|
|
|
|
|
|
|
|
if (unlikely(!vlabuf)) {
|
2010-05-05 18:53:14 +08:00
|
|
|
kfree(_data);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
2013-12-03 22:15:30 +08:00
|
|
|
/* Initialize the VLA pointers */
|
|
|
|
stringtabs = vla_ptr(vlabuf, d, stringtabs);
|
|
|
|
t = vla_ptr(vlabuf, d, stringtab);
|
2010-05-05 18:53:14 +08:00
|
|
|
i = lang_count;
|
|
|
|
do {
|
|
|
|
*stringtabs++ = t++;
|
|
|
|
} while (--i);
|
|
|
|
*stringtabs = NULL;
|
|
|
|
|
2013-12-03 22:15:30 +08:00
|
|
|
/* stringtabs = vlabuf = d_stringtabs for later kfree */
|
|
|
|
stringtabs = vla_ptr(vlabuf, d, stringtabs);
|
|
|
|
t = vla_ptr(vlabuf, d, stringtab);
|
|
|
|
s = vla_ptr(vlabuf, d, strings);
|
2010-05-05 18:53:14 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* For each language */
|
|
|
|
data += 16;
|
|
|
|
len -= 16;
|
|
|
|
|
|
|
|
do { /* lang_count > 0 so we can use do-while */
|
|
|
|
unsigned needed = needed_count;
|
|
|
|
|
|
|
|
if (unlikely(len < 3))
|
|
|
|
goto error_free;
|
|
|
|
t->language = get_unaligned_le16(data);
|
|
|
|
t->strings = s;
|
|
|
|
++t;
|
|
|
|
|
|
|
|
data += 2;
|
|
|
|
len -= 2;
|
|
|
|
|
|
|
|
/* For each string */
|
|
|
|
do { /* str_count > 0 so we can use do-while */
|
|
|
|
size_t length = strnlen(data, len);
|
|
|
|
|
|
|
|
if (unlikely(length == len))
|
|
|
|
goto error_free;
|
|
|
|
|
2010-11-12 21:29:28 +08:00
|
|
|
/*
|
|
|
|
* User may provide more strings then we need,
|
|
|
|
* if that's the case we simply ignore the
|
|
|
|
* rest
|
|
|
|
*/
|
2010-05-05 18:53:14 +08:00
|
|
|
if (likely(needed)) {
|
2010-11-12 21:29:28 +08:00
|
|
|
/*
|
|
|
|
* s->id will be set while adding
|
2010-05-05 18:53:14 +08:00
|
|
|
* function to configuration so for
|
2010-11-12 21:29:28 +08:00
|
|
|
* now just leave garbage here.
|
|
|
|
*/
|
2010-05-05 18:53:14 +08:00
|
|
|
s->s = data;
|
|
|
|
--needed;
|
|
|
|
++s;
|
|
|
|
}
|
|
|
|
|
|
|
|
data += length + 1;
|
|
|
|
len -= length + 1;
|
|
|
|
} while (--str_count);
|
|
|
|
|
|
|
|
s->id = 0; /* terminator */
|
|
|
|
s->s = NULL;
|
|
|
|
++s;
|
|
|
|
|
|
|
|
} while (--lang_count);
|
|
|
|
|
|
|
|
/* Some garbage left? */
|
|
|
|
if (unlikely(len))
|
|
|
|
goto error_free;
|
|
|
|
|
|
|
|
/* Done! */
|
|
|
|
ffs->stringtabs = stringtabs;
|
|
|
|
ffs->raw_strings = _data;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
error_free:
|
|
|
|
kfree(stringtabs);
|
|
|
|
error:
|
|
|
|
kfree(_data);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/* Events handling and management *******************************************/
|
|
|
|
|
|
|
|
static void __ffs_event_add(struct ffs_data *ffs,
|
|
|
|
enum usb_functionfs_event_type type)
|
|
|
|
{
|
|
|
|
enum usb_functionfs_event_type rem_type1, rem_type2 = type;
|
|
|
|
int neg = 0;
|
|
|
|
|
2010-11-12 21:29:28 +08:00
|
|
|
/*
|
|
|
|
* Abort any unhandled setup
|
|
|
|
*
|
|
|
|
* We do not need to worry about some cmpxchg() changing value
|
2010-05-05 18:53:14 +08:00
|
|
|
* of ffs->setup_state without holding the lock because when
|
|
|
|
* state is FFS_SETUP_PENDING cmpxchg() in several places in
|
2010-11-12 21:29:28 +08:00
|
|
|
* the source does nothing.
|
|
|
|
*/
|
2010-05-05 18:53:14 +08:00
|
|
|
if (ffs->setup_state == FFS_SETUP_PENDING)
|
2014-02-10 17:42:40 +08:00
|
|
|
ffs->setup_state = FFS_SETUP_CANCELLED;
|
2010-05-05 18:53:14 +08:00
|
|
|
|
2014-09-10 23:50:24 +08:00
|
|
|
/*
|
|
|
|
* Logic of this function guarantees that there are at most four pending
|
|
|
|
* evens on ffs->ev.types queue. This is important because the queue
|
|
|
|
* has space for four elements only and __ffs_ep0_read_events function
|
|
|
|
* depends on that limit as well. If more event types are added, those
|
|
|
|
* limits have to be revisited or guaranteed to still hold.
|
|
|
|
*/
|
2010-05-05 18:53:14 +08:00
|
|
|
switch (type) {
|
|
|
|
case FUNCTIONFS_RESUME:
|
|
|
|
rem_type2 = FUNCTIONFS_SUSPEND;
|
2010-11-12 21:29:28 +08:00
|
|
|
/* FALL THROUGH */
|
2010-05-05 18:53:14 +08:00
|
|
|
case FUNCTIONFS_SUSPEND:
|
|
|
|
case FUNCTIONFS_SETUP:
|
|
|
|
rem_type1 = type;
|
2010-11-12 21:29:28 +08:00
|
|
|
/* Discard all similar events */
|
2010-05-05 18:53:14 +08:00
|
|
|
break;
|
|
|
|
|
|
|
|
case FUNCTIONFS_BIND:
|
|
|
|
case FUNCTIONFS_UNBIND:
|
|
|
|
case FUNCTIONFS_DISABLE:
|
|
|
|
case FUNCTIONFS_ENABLE:
|
2010-11-12 21:29:28 +08:00
|
|
|
/* Discard everything other then power management. */
|
2010-05-05 18:53:14 +08:00
|
|
|
rem_type1 = FUNCTIONFS_SUSPEND;
|
|
|
|
rem_type2 = FUNCTIONFS_RESUME;
|
|
|
|
neg = 1;
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
2014-09-12 00:52:49 +08:00
|
|
|
WARN(1, "%d: unknown event, this should not happen\n", type);
|
|
|
|
return;
|
2010-05-05 18:53:14 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
{
|
|
|
|
u8 *ev = ffs->ev.types, *out = ev;
|
|
|
|
unsigned n = ffs->ev.count;
|
|
|
|
for (; n; --n, ++ev)
|
|
|
|
if ((*ev == rem_type1 || *ev == rem_type2) == neg)
|
|
|
|
*out++ = *ev;
|
|
|
|
else
|
2010-11-18 00:09:47 +08:00
|
|
|
pr_vdebug("purging event %d\n", *ev);
|
2010-05-05 18:53:14 +08:00
|
|
|
ffs->ev.count = out - ffs->ev.types;
|
|
|
|
}
|
|
|
|
|
2010-11-18 00:09:47 +08:00
|
|
|
pr_vdebug("adding event %d\n", type);
|
2010-05-05 18:53:14 +08:00
|
|
|
ffs->ev.types[ffs->ev.count++] = type;
|
|
|
|
wake_up_locked(&ffs->ev.waitq);
|
2015-01-23 20:41:01 +08:00
|
|
|
if (ffs->ffs_eventfd)
|
|
|
|
eventfd_signal(ffs->ffs_eventfd, 1);
|
2010-05-05 18:53:14 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void ffs_event_add(struct ffs_data *ffs,
|
|
|
|
enum usb_functionfs_event_type type)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
spin_lock_irqsave(&ffs->ev.waitq.lock, flags);
|
|
|
|
__ffs_event_add(ffs, type);
|
|
|
|
spin_unlock_irqrestore(&ffs->ev.waitq.lock, flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Bind/unbind USB function hooks *******************************************/
|
|
|
|
|
2014-08-25 17:16:27 +08:00
|
|
|
static int ffs_ep_addr2idx(struct ffs_data *ffs, u8 endpoint_address)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 1; i < ARRAY_SIZE(ffs->eps_addrmap); ++i)
|
|
|
|
if (ffs->eps_addrmap[i] == endpoint_address)
|
|
|
|
return i;
|
|
|
|
return -ENOENT;
|
|
|
|
}
|
|
|
|
|
2010-05-05 18:53:14 +08:00
|
|
|
static int __ffs_func_bind_do_descs(enum ffs_entity_type type, u8 *valuep,
|
|
|
|
struct usb_descriptor_header *desc,
|
|
|
|
void *priv)
|
|
|
|
{
|
|
|
|
struct usb_endpoint_descriptor *ds = (void *)desc;
|
|
|
|
struct ffs_function *func = priv;
|
|
|
|
struct ffs_ep *ffs_ep;
|
2014-09-09 20:06:09 +08:00
|
|
|
unsigned ep_desc_id;
|
|
|
|
int idx;
|
2014-02-28 19:20:22 +08:00
|
|
|
static const char *speed_names[] = { "full", "high", "super" };
|
2010-05-05 18:53:14 +08:00
|
|
|
|
|
|
|
if (type != FFS_DESCRIPTOR)
|
|
|
|
return 0;
|
|
|
|
|
2014-02-28 19:20:22 +08:00
|
|
|
/*
|
|
|
|
* If ss_descriptors is not NULL, we are reading super speed
|
|
|
|
* descriptors; if hs_descriptors is not NULL, we are reading high
|
|
|
|
* speed descriptors; otherwise, we are reading full speed
|
|
|
|
* descriptors.
|
|
|
|
*/
|
|
|
|
if (func->function.ss_descriptors) {
|
|
|
|
ep_desc_id = 2;
|
|
|
|
func->function.ss_descriptors[(long)valuep] = desc;
|
|
|
|
} else if (func->function.hs_descriptors) {
|
|
|
|
ep_desc_id = 1;
|
2010-05-05 18:53:14 +08:00
|
|
|
func->function.hs_descriptors[(long)valuep] = desc;
|
2014-02-28 19:20:22 +08:00
|
|
|
} else {
|
|
|
|
ep_desc_id = 0;
|
2012-10-23 04:15:06 +08:00
|
|
|
func->function.fs_descriptors[(long)valuep] = desc;
|
2014-02-28 19:20:22 +08:00
|
|
|
}
|
2010-05-05 18:53:14 +08:00
|
|
|
|
|
|
|
if (!desc || desc->bDescriptorType != USB_DT_ENDPOINT)
|
|
|
|
return 0;
|
|
|
|
|
2014-08-25 17:16:27 +08:00
|
|
|
idx = ffs_ep_addr2idx(func->ffs, ds->bEndpointAddress) - 1;
|
|
|
|
if (idx < 0)
|
|
|
|
return idx;
|
|
|
|
|
2010-05-05 18:53:14 +08:00
|
|
|
ffs_ep = func->eps + idx;
|
|
|
|
|
2014-02-28 19:20:22 +08:00
|
|
|
if (unlikely(ffs_ep->descs[ep_desc_id])) {
|
|
|
|
pr_err("two %sspeed descriptors for EP %d\n",
|
|
|
|
speed_names[ep_desc_id],
|
2010-11-12 21:29:29 +08:00
|
|
|
ds->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
|
2010-05-05 18:53:14 +08:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
2014-02-28 19:20:22 +08:00
|
|
|
ffs_ep->descs[ep_desc_id] = ds;
|
2010-05-05 18:53:14 +08:00
|
|
|
|
|
|
|
ffs_dump_mem(": Original ep desc", ds, ds->bLength);
|
|
|
|
if (ffs_ep->ep) {
|
|
|
|
ds->bEndpointAddress = ffs_ep->descs[0]->bEndpointAddress;
|
|
|
|
if (!ds->wMaxPacketSize)
|
|
|
|
ds->wMaxPacketSize = ffs_ep->descs[0]->wMaxPacketSize;
|
|
|
|
} else {
|
|
|
|
struct usb_request *req;
|
|
|
|
struct usb_ep *ep;
|
2014-09-09 14:23:17 +08:00
|
|
|
u8 bEndpointAddress;
|
2010-05-05 18:53:14 +08:00
|
|
|
|
2014-09-09 14:23:17 +08:00
|
|
|
/*
|
|
|
|
* We back up bEndpointAddress because autoconfig overwrites
|
|
|
|
* it with physical endpoint address.
|
|
|
|
*/
|
|
|
|
bEndpointAddress = ds->bEndpointAddress;
|
2010-11-18 00:09:47 +08:00
|
|
|
pr_vdebug("autoconfig\n");
|
2010-05-05 18:53:14 +08:00
|
|
|
ep = usb_ep_autoconfig(func->gadget, ds);
|
|
|
|
if (unlikely(!ep))
|
|
|
|
return -ENOTSUPP;
|
2010-11-15 11:04:49 +08:00
|
|
|
ep->driver_data = func->eps + idx;
|
2010-05-05 18:53:14 +08:00
|
|
|
|
|
|
|
req = usb_ep_alloc_request(ep, GFP_KERNEL);
|
|
|
|
if (unlikely(!req))
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
ffs_ep->ep = ep;
|
|
|
|
ffs_ep->req = req;
|
|
|
|
func->eps_revmap[ds->bEndpointAddress &
|
|
|
|
USB_ENDPOINT_NUMBER_MASK] = idx + 1;
|
2014-09-09 14:23:17 +08:00
|
|
|
/*
|
|
|
|
* If we use virtual address mapping, we restore
|
|
|
|
* original bEndpointAddress value.
|
|
|
|
*/
|
|
|
|
if (func->ffs->user_flags & FUNCTIONFS_VIRTUAL_ADDR)
|
|
|
|
ds->bEndpointAddress = bEndpointAddress;
|
2010-05-05 18:53:14 +08:00
|
|
|
}
|
|
|
|
ffs_dump_mem(": Rewritten ep desc", ds, ds->bLength);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int __ffs_func_bind_do_nums(enum ffs_entity_type type, u8 *valuep,
|
|
|
|
struct usb_descriptor_header *desc,
|
|
|
|
void *priv)
|
|
|
|
{
|
|
|
|
struct ffs_function *func = priv;
|
|
|
|
unsigned idx;
|
|
|
|
u8 newValue;
|
|
|
|
|
|
|
|
switch (type) {
|
|
|
|
default:
|
|
|
|
case FFS_DESCRIPTOR:
|
|
|
|
/* Handled in previous pass by __ffs_func_bind_do_descs() */
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
case FFS_INTERFACE:
|
|
|
|
idx = *valuep;
|
|
|
|
if (func->interfaces_nums[idx] < 0) {
|
|
|
|
int id = usb_interface_id(func->conf, &func->function);
|
|
|
|
if (unlikely(id < 0))
|
|
|
|
return id;
|
|
|
|
func->interfaces_nums[idx] = id;
|
|
|
|
}
|
|
|
|
newValue = func->interfaces_nums[idx];
|
|
|
|
break;
|
|
|
|
|
|
|
|
case FFS_STRING:
|
|
|
|
/* String' IDs are allocated when fsf_data is bound to cdev */
|
|
|
|
newValue = func->ffs->stringtabs[0]->strings[*valuep - 1].id;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case FFS_ENDPOINT:
|
2010-11-12 21:29:28 +08:00
|
|
|
/*
|
|
|
|
* USB_DT_ENDPOINT are handled in
|
|
|
|
* __ffs_func_bind_do_descs().
|
|
|
|
*/
|
2010-05-05 18:53:14 +08:00
|
|
|
if (desc->bDescriptorType == USB_DT_ENDPOINT)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
idx = (*valuep & USB_ENDPOINT_NUMBER_MASK) - 1;
|
|
|
|
if (unlikely(!func->eps[idx].ep))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
{
|
|
|
|
struct usb_endpoint_descriptor **descs;
|
|
|
|
descs = func->eps[idx].descs;
|
|
|
|
newValue = descs[descs[0] ? 0 : 1]->bEndpointAddress;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2010-11-18 00:09:47 +08:00
|
|
|
pr_vdebug("%02x -> %02x\n", *valuep, newValue);
|
2010-05-05 18:53:14 +08:00
|
|
|
*valuep = newValue;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-07-09 18:20:08 +08:00
|
|
|
static int __ffs_func_bind_do_os_desc(enum ffs_os_desc_type type,
|
|
|
|
struct usb_os_desc_header *h, void *data,
|
|
|
|
unsigned len, void *priv)
|
|
|
|
{
|
|
|
|
struct ffs_function *func = priv;
|
|
|
|
u8 length = 0;
|
|
|
|
|
|
|
|
switch (type) {
|
|
|
|
case FFS_OS_DESC_EXT_COMPAT: {
|
|
|
|
struct usb_ext_compat_desc *desc = data;
|
|
|
|
struct usb_os_desc_table *t;
|
|
|
|
|
|
|
|
t = &func->function.os_desc_table[desc->bFirstInterfaceNumber];
|
|
|
|
t->if_id = func->interfaces_nums[desc->bFirstInterfaceNumber];
|
|
|
|
memcpy(t->os_desc->ext_compat_id, &desc->CompatibleID,
|
|
|
|
ARRAY_SIZE(desc->CompatibleID) +
|
|
|
|
ARRAY_SIZE(desc->SubCompatibleID));
|
|
|
|
length = sizeof(*desc);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case FFS_OS_DESC_EXT_PROP: {
|
|
|
|
struct usb_ext_prop_desc *desc = data;
|
|
|
|
struct usb_os_desc_table *t;
|
|
|
|
struct usb_os_desc_ext_prop *ext_prop;
|
|
|
|
char *ext_prop_name;
|
|
|
|
char *ext_prop_data;
|
|
|
|
|
|
|
|
t = &func->function.os_desc_table[h->interface];
|
|
|
|
t->if_id = func->interfaces_nums[h->interface];
|
|
|
|
|
|
|
|
ext_prop = func->ffs->ms_os_descs_ext_prop_avail;
|
|
|
|
func->ffs->ms_os_descs_ext_prop_avail += sizeof(*ext_prop);
|
|
|
|
|
|
|
|
ext_prop->type = le32_to_cpu(desc->dwPropertyDataType);
|
|
|
|
ext_prop->name_len = le16_to_cpu(desc->wPropertyNameLength);
|
|
|
|
ext_prop->data_len = le32_to_cpu(*(u32 *)
|
|
|
|
usb_ext_prop_data_len_ptr(data, ext_prop->name_len));
|
|
|
|
length = ext_prop->name_len + ext_prop->data_len + 14;
|
|
|
|
|
|
|
|
ext_prop_name = func->ffs->ms_os_descs_ext_prop_name_avail;
|
|
|
|
func->ffs->ms_os_descs_ext_prop_name_avail +=
|
|
|
|
ext_prop->name_len;
|
|
|
|
|
|
|
|
ext_prop_data = func->ffs->ms_os_descs_ext_prop_data_avail;
|
|
|
|
func->ffs->ms_os_descs_ext_prop_data_avail +=
|
|
|
|
ext_prop->data_len;
|
|
|
|
memcpy(ext_prop_data,
|
|
|
|
usb_ext_prop_data_ptr(data, ext_prop->name_len),
|
|
|
|
ext_prop->data_len);
|
|
|
|
/* unicode data reported to the host as "WCHAR"s */
|
|
|
|
switch (ext_prop->type) {
|
|
|
|
case USB_EXT_PROP_UNICODE:
|
|
|
|
case USB_EXT_PROP_UNICODE_ENV:
|
|
|
|
case USB_EXT_PROP_UNICODE_LINK:
|
|
|
|
case USB_EXT_PROP_UNICODE_MULTI:
|
|
|
|
ext_prop->data_len *= 2;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
ext_prop->data = ext_prop_data;
|
|
|
|
|
|
|
|
memcpy(ext_prop_name, usb_ext_prop_name_ptr(data),
|
|
|
|
ext_prop->name_len);
|
|
|
|
/* property name reported to the host as "WCHAR"s */
|
|
|
|
ext_prop->name_len *= 2;
|
|
|
|
ext_prop->name = ext_prop_name;
|
|
|
|
|
|
|
|
t->os_desc->ext_prop_len +=
|
|
|
|
ext_prop->name_len + ext_prop->data_len + 14;
|
|
|
|
++t->os_desc->ext_prop_count;
|
|
|
|
list_add_tail(&ext_prop->entry, &t->os_desc->ext_prop);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
pr_vdebug("unknown descriptor: %d\n", type);
|
|
|
|
}
|
|
|
|
|
|
|
|
return length;
|
|
|
|
}
|
|
|
|
|
2013-12-03 22:15:33 +08:00
|
|
|
static inline struct f_fs_opts *ffs_do_functionfs_bind(struct usb_function *f,
|
|
|
|
struct usb_configuration *c)
|
|
|
|
{
|
|
|
|
struct ffs_function *func = ffs_func_from_usb(f);
|
|
|
|
struct f_fs_opts *ffs_opts =
|
|
|
|
container_of(f->fi, struct f_fs_opts, func_inst);
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ENTER();
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Legacy gadget triggers binding in functionfs_ready_callback,
|
|
|
|
* which already uses locking; taking the same lock here would
|
|
|
|
* cause a deadlock.
|
|
|
|
*
|
|
|
|
* Configfs-enabled gadgets however do need ffs_dev_lock.
|
|
|
|
*/
|
|
|
|
if (!ffs_opts->no_configfs)
|
|
|
|
ffs_dev_lock();
|
|
|
|
ret = ffs_opts->dev->desc_ready ? 0 : -ENODEV;
|
|
|
|
func->ffs = ffs_opts->dev->ffs_data;
|
|
|
|
if (!ffs_opts->no_configfs)
|
|
|
|
ffs_dev_unlock();
|
|
|
|
if (ret)
|
|
|
|
return ERR_PTR(ret);
|
|
|
|
|
|
|
|
func->conf = c;
|
|
|
|
func->gadget = c->cdev->gadget;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* in drivers/usb/gadget/configfs.c:configfs_composite_bind()
|
|
|
|
* configurations are bound in sequence with list_for_each_entry,
|
|
|
|
* in each configuration its functions are bound in sequence
|
|
|
|
* with list_for_each_entry, so we assume no race condition
|
|
|
|
* with regard to ffs_opts->bound access
|
|
|
|
*/
|
|
|
|
if (!ffs_opts->refcnt) {
|
|
|
|
ret = functionfs_bind(func->ffs, c->cdev);
|
|
|
|
if (ret)
|
|
|
|
return ERR_PTR(ret);
|
|
|
|
}
|
|
|
|
ffs_opts->refcnt++;
|
|
|
|
func->function.strings = func->ffs->stringtabs;
|
|
|
|
|
|
|
|
return ffs_opts;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int _ffs_func_bind(struct usb_configuration *c,
|
|
|
|
struct usb_function *f)
|
2010-05-05 18:53:14 +08:00
|
|
|
{
|
|
|
|
struct ffs_function *func = ffs_func_from_usb(f);
|
|
|
|
struct ffs_data *ffs = func->ffs;
|
|
|
|
|
|
|
|
const int full = !!func->ffs->fs_descs_count;
|
|
|
|
const int high = gadget_is_dualspeed(func->gadget) &&
|
|
|
|
func->ffs->hs_descs_count;
|
2014-02-28 19:20:22 +08:00
|
|
|
const int super = gadget_is_superspeed(func->gadget) &&
|
|
|
|
func->ffs->ss_descs_count;
|
2010-05-05 18:53:14 +08:00
|
|
|
|
2014-07-09 18:20:08 +08:00
|
|
|
int fs_len, hs_len, ss_len, ret, i;
|
2016-05-28 12:48:10 +08:00
|
|
|
struct ffs_ep *eps_ptr;
|
2010-05-05 18:53:14 +08:00
|
|
|
|
|
|
|
/* Make it a single chunk, less management later on */
|
2013-12-03 22:15:30 +08:00
|
|
|
vla_group(d);
|
|
|
|
vla_item_with_sz(d, struct ffs_ep, eps, ffs->eps_count);
|
|
|
|
vla_item_with_sz(d, struct usb_descriptor_header *, fs_descs,
|
|
|
|
full ? ffs->fs_descs_count + 1 : 0);
|
|
|
|
vla_item_with_sz(d, struct usb_descriptor_header *, hs_descs,
|
|
|
|
high ? ffs->hs_descs_count + 1 : 0);
|
2014-02-28 19:20:22 +08:00
|
|
|
vla_item_with_sz(d, struct usb_descriptor_header *, ss_descs,
|
|
|
|
super ? ffs->ss_descs_count + 1 : 0);
|
2013-12-03 22:15:30 +08:00
|
|
|
vla_item_with_sz(d, short, inums, ffs->interfaces_count);
|
2014-07-09 18:20:08 +08:00
|
|
|
vla_item_with_sz(d, struct usb_os_desc_table, os_desc_table,
|
|
|
|
c->cdev->use_os_string ? ffs->interfaces_count : 0);
|
|
|
|
vla_item_with_sz(d, char[16], ext_compat,
|
|
|
|
c->cdev->use_os_string ? ffs->interfaces_count : 0);
|
|
|
|
vla_item_with_sz(d, struct usb_os_desc, os_desc,
|
|
|
|
c->cdev->use_os_string ? ffs->interfaces_count : 0);
|
|
|
|
vla_item_with_sz(d, struct usb_os_desc_ext_prop, ext_prop,
|
|
|
|
ffs->ms_os_descs_ext_prop_count);
|
|
|
|
vla_item_with_sz(d, char, ext_prop_name,
|
|
|
|
ffs->ms_os_descs_ext_prop_name_len);
|
|
|
|
vla_item_with_sz(d, char, ext_prop_data,
|
|
|
|
ffs->ms_os_descs_ext_prop_data_len);
|
2014-02-28 19:20:23 +08:00
|
|
|
vla_item_with_sz(d, char, raw_descs, ffs->raw_descs_length);
|
2013-12-03 22:15:30 +08:00
|
|
|
char *vlabuf;
|
2010-05-05 18:53:14 +08:00
|
|
|
|
|
|
|
ENTER();
|
|
|
|
|
2014-02-28 19:20:22 +08:00
|
|
|
/* Has descriptors only for speeds gadget does not support */
|
|
|
|
if (unlikely(!(full | high | super)))
|
2010-05-05 18:53:14 +08:00
|
|
|
return -ENOTSUPP;
|
|
|
|
|
2013-12-03 22:15:30 +08:00
|
|
|
/* Allocate a single chunk, less management later on */
|
2014-07-09 18:20:08 +08:00
|
|
|
vlabuf = kzalloc(vla_group_size(d), GFP_KERNEL);
|
2013-12-03 22:15:30 +08:00
|
|
|
if (unlikely(!vlabuf))
|
2010-05-05 18:53:14 +08:00
|
|
|
return -ENOMEM;
|
|
|
|
|
2014-07-09 18:20:08 +08:00
|
|
|
ffs->ms_os_descs_ext_prop_avail = vla_ptr(vlabuf, d, ext_prop);
|
|
|
|
ffs->ms_os_descs_ext_prop_name_avail =
|
|
|
|
vla_ptr(vlabuf, d, ext_prop_name);
|
|
|
|
ffs->ms_os_descs_ext_prop_data_avail =
|
|
|
|
vla_ptr(vlabuf, d, ext_prop_data);
|
|
|
|
|
2014-02-28 19:20:23 +08:00
|
|
|
/* Copy descriptors */
|
|
|
|
memcpy(vla_ptr(vlabuf, d, raw_descs), ffs->raw_descs,
|
|
|
|
ffs->raw_descs_length);
|
2014-02-28 19:20:22 +08:00
|
|
|
|
2013-12-03 22:15:30 +08:00
|
|
|
memset(vla_ptr(vlabuf, d, inums), 0xff, d_inums__sz);
|
2016-05-28 12:48:10 +08:00
|
|
|
eps_ptr = vla_ptr(vlabuf, d, eps);
|
|
|
|
for (i = 0; i < ffs->eps_count; i++)
|
|
|
|
eps_ptr[i].num = -1;
|
2010-05-05 18:53:14 +08:00
|
|
|
|
2013-12-03 22:15:30 +08:00
|
|
|
/* Save pointers
|
|
|
|
* d_eps == vlabuf, func->eps used to kfree vlabuf later
|
|
|
|
*/
|
|
|
|
func->eps = vla_ptr(vlabuf, d, eps);
|
|
|
|
func->interfaces_nums = vla_ptr(vlabuf, d, inums);
|
2010-05-05 18:53:14 +08:00
|
|
|
|
2010-11-12 21:29:28 +08:00
|
|
|
/*
|
|
|
|
* Go through all the endpoint descriptors and allocate
|
2010-05-05 18:53:14 +08:00
|
|
|
* endpoints first, so that later we can rewrite the endpoint
|
2010-11-12 21:29:28 +08:00
|
|
|
* numbers without worrying that it may be described later on.
|
|
|
|
*/
|
2010-05-05 18:53:14 +08:00
|
|
|
if (likely(full)) {
|
2013-12-03 22:15:30 +08:00
|
|
|
func->function.fs_descriptors = vla_ptr(vlabuf, d, fs_descs);
|
2014-02-28 19:20:22 +08:00
|
|
|
fs_len = ffs_do_descs(ffs->fs_descs_count,
|
|
|
|
vla_ptr(vlabuf, d, raw_descs),
|
|
|
|
d_raw_descs__sz,
|
|
|
|
__ffs_func_bind_do_descs, func);
|
|
|
|
if (unlikely(fs_len < 0)) {
|
|
|
|
ret = fs_len;
|
2010-05-05 18:53:14 +08:00
|
|
|
goto error;
|
2014-02-28 19:20:22 +08:00
|
|
|
}
|
2010-05-05 18:53:14 +08:00
|
|
|
} else {
|
2014-02-28 19:20:22 +08:00
|
|
|
fs_len = 0;
|
2010-05-05 18:53:14 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (likely(high)) {
|
2013-12-03 22:15:30 +08:00
|
|
|
func->function.hs_descriptors = vla_ptr(vlabuf, d, hs_descs);
|
2014-02-28 19:20:22 +08:00
|
|
|
hs_len = ffs_do_descs(ffs->hs_descs_count,
|
|
|
|
vla_ptr(vlabuf, d, raw_descs) + fs_len,
|
|
|
|
d_raw_descs__sz - fs_len,
|
|
|
|
__ffs_func_bind_do_descs, func);
|
|
|
|
if (unlikely(hs_len < 0)) {
|
|
|
|
ret = hs_len;
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
hs_len = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (likely(super)) {
|
|
|
|
func->function.ss_descriptors = vla_ptr(vlabuf, d, ss_descs);
|
2014-07-09 18:20:08 +08:00
|
|
|
ss_len = ffs_do_descs(ffs->ss_descs_count,
|
2014-02-28 19:20:22 +08:00
|
|
|
vla_ptr(vlabuf, d, raw_descs) + fs_len + hs_len,
|
|
|
|
d_raw_descs__sz - fs_len - hs_len,
|
|
|
|
__ffs_func_bind_do_descs, func);
|
2014-07-09 18:20:08 +08:00
|
|
|
if (unlikely(ss_len < 0)) {
|
|
|
|
ret = ss_len;
|
2013-09-27 18:28:54 +08:00
|
|
|
goto error;
|
2014-07-09 18:20:08 +08:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
ss_len = 0;
|
2010-05-05 18:53:14 +08:00
|
|
|
}
|
|
|
|
|
2010-11-12 21:29:28 +08:00
|
|
|
/*
|
|
|
|
* Now handle interface numbers allocation and interface and
|
|
|
|
* endpoint numbers rewriting. We can do that in one go
|
|
|
|
* now.
|
|
|
|
*/
|
2010-05-05 18:53:14 +08:00
|
|
|
ret = ffs_do_descs(ffs->fs_descs_count +
|
2014-02-28 19:20:22 +08:00
|
|
|
(high ? ffs->hs_descs_count : 0) +
|
|
|
|
(super ? ffs->ss_descs_count : 0),
|
2013-12-03 22:15:30 +08:00
|
|
|
vla_ptr(vlabuf, d, raw_descs), d_raw_descs__sz,
|
2010-05-05 18:53:14 +08:00
|
|
|
__ffs_func_bind_do_nums, func);
|
|
|
|
if (unlikely(ret < 0))
|
|
|
|
goto error;
|
|
|
|
|
2014-07-09 18:20:08 +08:00
|
|
|
func->function.os_desc_table = vla_ptr(vlabuf, d, os_desc_table);
|
2016-05-13 20:32:16 +08:00
|
|
|
if (c->cdev->use_os_string) {
|
2014-07-09 18:20:08 +08:00
|
|
|
for (i = 0; i < ffs->interfaces_count; ++i) {
|
|
|
|
struct usb_os_desc *desc;
|
|
|
|
|
|
|
|
desc = func->function.os_desc_table[i].os_desc =
|
|
|
|
vla_ptr(vlabuf, d, os_desc) +
|
|
|
|
i * sizeof(struct usb_os_desc);
|
|
|
|
desc->ext_compat_id =
|
|
|
|
vla_ptr(vlabuf, d, ext_compat) + i * 16;
|
|
|
|
INIT_LIST_HEAD(&desc->ext_prop);
|
|
|
|
}
|
2016-05-13 20:32:16 +08:00
|
|
|
ret = ffs_do_os_descs(ffs->ms_os_descs_count,
|
|
|
|
vla_ptr(vlabuf, d, raw_descs) +
|
|
|
|
fs_len + hs_len + ss_len,
|
|
|
|
d_raw_descs__sz - fs_len - hs_len -
|
|
|
|
ss_len,
|
|
|
|
__ffs_func_bind_do_os_desc, func);
|
|
|
|
if (unlikely(ret < 0))
|
|
|
|
goto error;
|
|
|
|
}
|
2014-07-09 18:20:08 +08:00
|
|
|
func->function.os_desc_n =
|
|
|
|
c->cdev->use_os_string ? ffs->interfaces_count : 0;
|
|
|
|
|
2010-05-05 18:53:14 +08:00
|
|
|
/* And we're done */
|
|
|
|
ffs_event_add(ffs, FUNCTIONFS_BIND);
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
error:
|
|
|
|
/* XXX Do we need to release all claimed endpoints here? */
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2013-12-03 22:15:33 +08:00
|
|
|
static int ffs_func_bind(struct usb_configuration *c,
|
|
|
|
struct usb_function *f)
|
|
|
|
{
|
|
|
|
struct f_fs_opts *ffs_opts = ffs_do_functionfs_bind(f, c);
|
2015-07-13 17:03:50 +08:00
|
|
|
struct ffs_function *func = ffs_func_from_usb(f);
|
|
|
|
int ret;
|
2013-12-03 22:15:33 +08:00
|
|
|
|
|
|
|
if (IS_ERR(ffs_opts))
|
|
|
|
return PTR_ERR(ffs_opts);
|
|
|
|
|
2015-07-13 17:03:50 +08:00
|
|
|
ret = _ffs_func_bind(c, f);
|
|
|
|
if (ret && !--ffs_opts->refcnt)
|
|
|
|
functionfs_unbind(func->ffs);
|
|
|
|
|
|
|
|
return ret;
|
2013-12-03 22:15:33 +08:00
|
|
|
}
|
|
|
|
|
2010-05-05 18:53:14 +08:00
|
|
|
|
|
|
|
/* Other USB function hooks *************************************************/
|
|
|
|
|
usb: gadget: f_fs: add "no_disconnect" mode
Since we can compose gadgets from many functions, there is the problem
related to gadget breakage while FunctionFS daemon being closed. FFS
function is userspace code so there is no way to know when it will close
files (it doesn't matter what is the reason of this situation, it can
be daemon logic, program breakage, process kill or any other). So when
we have another function in gadget which, for example, sends some amount
of data, does some software update or implements some real-time functionality,
we may want to keep the gadget connected despite FFS function is no longer
functional.
We can't just remove one of functions from gadget since it has been
enumerated, so the only way to keep entire gadget working is to make
broken FFS function deactivated but still visible to host. For this
purpose this patch introduces "no_disconnect" mode. It can be enabled
by setting mount option "no_disconnect=1", and results with defering
function disconnect to the moment of reopen ep0 file or filesystem
unmount. After closing all endpoint files, FunctionFS is set to state
FFS_DEACTIVATED.
When ffs->state == FFS_DEACTIVATED:
- function is still bound and visible to host,
- setup requests are automatically stalled,
- transfers on other endpoints are refused,
- epfiles, except ep0, are deleted from the filesystem,
- opening ep0 causes the function to be closed, and then FunctionFS
is ready for descriptors and string write,
- altsetting change causes the function to be closed - we want to keep
function alive until another functions are potentialy used, altsetting
change means that another configuration is being selected or USB cable
was unplugged, which indicates that we don't need to stay longer in
FFS_DEACTIVATED state
- unmounting of the FunctionFS instance causes the function to be closed.
Tested-by: David Cohen <david.a.cohen@linux.intel.com>
Acked-by: Michal Nazarewicz <mina86@mina86.com>
Signed-off-by: Robert Baldyga <r.baldyga@samsung.com>
Signed-off-by: Felipe Balbi <balbi@ti.com>
2014-12-18 16:55:10 +08:00
|
|
|
static void ffs_reset_work(struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct ffs_data *ffs = container_of(work,
|
|
|
|
struct ffs_data, reset_work);
|
|
|
|
ffs_data_reset(ffs);
|
|
|
|
}
|
|
|
|
|
2010-05-05 18:53:14 +08:00
|
|
|
static int ffs_func_set_alt(struct usb_function *f,
|
|
|
|
unsigned interface, unsigned alt)
|
|
|
|
{
|
|
|
|
struct ffs_function *func = ffs_func_from_usb(f);
|
|
|
|
struct ffs_data *ffs = func->ffs;
|
|
|
|
int ret = 0, intf;
|
|
|
|
|
|
|
|
if (alt != (unsigned)-1) {
|
|
|
|
intf = ffs_func_revmap_intf(func, interface);
|
|
|
|
if (unlikely(intf < 0))
|
|
|
|
return intf;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ffs->func)
|
|
|
|
ffs_func_eps_disable(ffs->func);
|
|
|
|
|
usb: gadget: f_fs: add "no_disconnect" mode
Since we can compose gadgets from many functions, there is the problem
related to gadget breakage while FunctionFS daemon being closed. FFS
function is userspace code so there is no way to know when it will close
files (it doesn't matter what is the reason of this situation, it can
be daemon logic, program breakage, process kill or any other). So when
we have another function in gadget which, for example, sends some amount
of data, does some software update or implements some real-time functionality,
we may want to keep the gadget connected despite FFS function is no longer
functional.
We can't just remove one of functions from gadget since it has been
enumerated, so the only way to keep entire gadget working is to make
broken FFS function deactivated but still visible to host. For this
purpose this patch introduces "no_disconnect" mode. It can be enabled
by setting mount option "no_disconnect=1", and results with defering
function disconnect to the moment of reopen ep0 file or filesystem
unmount. After closing all endpoint files, FunctionFS is set to state
FFS_DEACTIVATED.
When ffs->state == FFS_DEACTIVATED:
- function is still bound and visible to host,
- setup requests are automatically stalled,
- transfers on other endpoints are refused,
- epfiles, except ep0, are deleted from the filesystem,
- opening ep0 causes the function to be closed, and then FunctionFS
is ready for descriptors and string write,
- altsetting change causes the function to be closed - we want to keep
function alive until another functions are potentialy used, altsetting
change means that another configuration is being selected or USB cable
was unplugged, which indicates that we don't need to stay longer in
FFS_DEACTIVATED state
- unmounting of the FunctionFS instance causes the function to be closed.
Tested-by: David Cohen <david.a.cohen@linux.intel.com>
Acked-by: Michal Nazarewicz <mina86@mina86.com>
Signed-off-by: Robert Baldyga <r.baldyga@samsung.com>
Signed-off-by: Felipe Balbi <balbi@ti.com>
2014-12-18 16:55:10 +08:00
|
|
|
if (ffs->state == FFS_DEACTIVATED) {
|
|
|
|
ffs->state = FFS_CLOSING;
|
|
|
|
INIT_WORK(&ffs->reset_work, ffs_reset_work);
|
|
|
|
schedule_work(&ffs->reset_work);
|
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
|
2010-05-05 18:53:14 +08:00
|
|
|
if (ffs->state != FFS_ACTIVE)
|
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
if (alt == (unsigned)-1) {
|
|
|
|
ffs->func = NULL;
|
|
|
|
ffs_event_add(ffs, FUNCTIONFS_DISABLE);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
ffs->func = func;
|
|
|
|
ret = ffs_func_eps_enable(func);
|
|
|
|
if (likely(ret >= 0))
|
|
|
|
ffs_event_add(ffs, FUNCTIONFS_ENABLE);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void ffs_func_disable(struct usb_function *f)
|
|
|
|
{
|
|
|
|
ffs_func_set_alt(f, 0, (unsigned)-1);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ffs_func_setup(struct usb_function *f,
|
|
|
|
const struct usb_ctrlrequest *creq)
|
|
|
|
{
|
|
|
|
struct ffs_function *func = ffs_func_from_usb(f);
|
|
|
|
struct ffs_data *ffs = func->ffs;
|
|
|
|
unsigned long flags;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ENTER();
|
|
|
|
|
2010-11-18 00:09:47 +08:00
|
|
|
pr_vdebug("creq->bRequestType = %02x\n", creq->bRequestType);
|
|
|
|
pr_vdebug("creq->bRequest = %02x\n", creq->bRequest);
|
|
|
|
pr_vdebug("creq->wValue = %04x\n", le16_to_cpu(creq->wValue));
|
|
|
|
pr_vdebug("creq->wIndex = %04x\n", le16_to_cpu(creq->wIndex));
|
|
|
|
pr_vdebug("creq->wLength = %04x\n", le16_to_cpu(creq->wLength));
|
2010-05-05 18:53:14 +08:00
|
|
|
|
2010-11-12 21:29:28 +08:00
|
|
|
/*
|
|
|
|
* Most requests directed to interface go through here
|
2010-05-05 18:53:14 +08:00
|
|
|
* (notable exceptions are set/get interface) so we need to
|
|
|
|
* handle them. All other either handled by composite or
|
|
|
|
* passed to usb_configuration->setup() (if one is set). No
|
|
|
|
* matter, we will handle requests directed to endpoint here
|
|
|
|
* as well (as it's straightforward) but what to do with any
|
2010-11-12 21:29:28 +08:00
|
|
|
* other request?
|
|
|
|
*/
|
2010-05-05 18:53:14 +08:00
|
|
|
if (ffs->state != FFS_ACTIVE)
|
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
switch (creq->bRequestType & USB_RECIP_MASK) {
|
|
|
|
case USB_RECIP_INTERFACE:
|
|
|
|
ret = ffs_func_revmap_intf(func, le16_to_cpu(creq->wIndex));
|
|
|
|
if (unlikely(ret < 0))
|
|
|
|
return ret;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case USB_RECIP_ENDPOINT:
|
|
|
|
ret = ffs_func_revmap_ep(func, le16_to_cpu(creq->wIndex));
|
|
|
|
if (unlikely(ret < 0))
|
|
|
|
return ret;
|
2014-09-09 14:23:17 +08:00
|
|
|
if (func->ffs->user_flags & FUNCTIONFS_VIRTUAL_ADDR)
|
|
|
|
ret = func->ffs->eps_addrmap[ret];
|
2010-05-05 18:53:14 +08:00
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
|
|
|
|
spin_lock_irqsave(&ffs->ev.waitq.lock, flags);
|
|
|
|
ffs->ev.setup = *creq;
|
|
|
|
ffs->ev.setup.wIndex = cpu_to_le16(ret);
|
|
|
|
__ffs_event_add(ffs, FUNCTIONFS_SETUP);
|
|
|
|
spin_unlock_irqrestore(&ffs->ev.waitq.lock, flags);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void ffs_func_suspend(struct usb_function *f)
|
|
|
|
{
|
|
|
|
ENTER();
|
|
|
|
ffs_event_add(ffs_func_from_usb(f)->ffs, FUNCTIONFS_SUSPEND);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void ffs_func_resume(struct usb_function *f)
|
|
|
|
{
|
|
|
|
ENTER();
|
|
|
|
ffs_event_add(ffs_func_from_usb(f)->ffs, FUNCTIONFS_RESUME);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-11-12 21:29:28 +08:00
|
|
|
/* Endpoint and interface numbers reverse mapping ***************************/
|
2010-05-05 18:53:14 +08:00
|
|
|
|
|
|
|
static int ffs_func_revmap_ep(struct ffs_function *func, u8 num)
|
|
|
|
{
|
|
|
|
num = func->eps_revmap[num & USB_ENDPOINT_NUMBER_MASK];
|
|
|
|
return num ? num : -EDOM;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ffs_func_revmap_intf(struct ffs_function *func, u8 intf)
|
|
|
|
{
|
|
|
|
short *nums = func->interfaces_nums;
|
|
|
|
unsigned count = func->ffs->interfaces_count;
|
|
|
|
|
|
|
|
for (; count; --count, ++nums) {
|
|
|
|
if (*nums >= 0 && *nums == intf)
|
|
|
|
return nums - func->interfaces_nums;
|
|
|
|
}
|
|
|
|
|
|
|
|
return -EDOM;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-12-03 22:15:32 +08:00
|
|
|
/* Devices management *******************************************************/
|
|
|
|
|
|
|
|
static LIST_HEAD(ffs_devices);
|
|
|
|
|
2014-01-13 23:49:38 +08:00
|
|
|
static struct ffs_dev *_ffs_do_find_dev(const char *name)
|
2013-12-03 22:15:32 +08:00
|
|
|
{
|
|
|
|
struct ffs_dev *dev;
|
|
|
|
|
|
|
|
list_for_each_entry(dev, &ffs_devices, entry) {
|
|
|
|
if (!dev->name || !name)
|
|
|
|
continue;
|
|
|
|
if (strcmp(dev->name, name) == 0)
|
|
|
|
return dev;
|
|
|
|
}
|
2013-12-03 22:15:36 +08:00
|
|
|
|
2013-12-03 22:15:32 +08:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* ffs_lock must be taken by the caller of this function
|
|
|
|
*/
|
2014-01-13 23:49:38 +08:00
|
|
|
static struct ffs_dev *_ffs_get_single_dev(void)
|
2013-12-03 22:15:32 +08:00
|
|
|
{
|
|
|
|
struct ffs_dev *dev;
|
|
|
|
|
|
|
|
if (list_is_singular(&ffs_devices)) {
|
|
|
|
dev = list_first_entry(&ffs_devices, struct ffs_dev, entry);
|
|
|
|
if (dev->single)
|
|
|
|
return dev;
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* ffs_lock must be taken by the caller of this function
|
|
|
|
*/
|
2014-01-13 23:49:38 +08:00
|
|
|
static struct ffs_dev *_ffs_find_dev(const char *name)
|
2013-12-03 22:15:32 +08:00
|
|
|
{
|
|
|
|
struct ffs_dev *dev;
|
|
|
|
|
2014-01-13 23:49:38 +08:00
|
|
|
dev = _ffs_get_single_dev();
|
2013-12-03 22:15:32 +08:00
|
|
|
if (dev)
|
|
|
|
return dev;
|
|
|
|
|
2014-01-13 23:49:38 +08:00
|
|
|
return _ffs_do_find_dev(name);
|
2013-12-03 22:15:32 +08:00
|
|
|
}
|
|
|
|
|
2013-12-03 22:15:36 +08:00
|
|
|
/* Configfs support *********************************************************/
|
|
|
|
|
|
|
|
static inline struct f_fs_opts *to_ffs_opts(struct config_item *item)
|
|
|
|
{
|
|
|
|
return container_of(to_config_group(item), struct f_fs_opts,
|
|
|
|
func_inst.group);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void ffs_attr_release(struct config_item *item)
|
|
|
|
{
|
|
|
|
struct f_fs_opts *opts = to_ffs_opts(item);
|
|
|
|
|
|
|
|
usb_put_function_instance(&opts->func_inst);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct configfs_item_operations ffs_item_ops = {
|
|
|
|
.release = ffs_attr_release,
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct config_item_type ffs_func_type = {
|
|
|
|
.ct_item_ops = &ffs_item_ops,
|
|
|
|
.ct_owner = THIS_MODULE,
|
|
|
|
};
|
|
|
|
|
|
|
|
|
2013-12-03 22:15:33 +08:00
|
|
|
/* Function registration interface ******************************************/
|
|
|
|
|
|
|
|
static void ffs_free_inst(struct usb_function_instance *f)
|
|
|
|
{
|
|
|
|
struct f_fs_opts *opts;
|
|
|
|
|
|
|
|
opts = to_f_fs_opts(f);
|
|
|
|
ffs_dev_lock();
|
2014-01-13 23:49:38 +08:00
|
|
|
_ffs_free_dev(opts->dev);
|
2013-12-03 22:15:33 +08:00
|
|
|
ffs_dev_unlock();
|
|
|
|
kfree(opts);
|
|
|
|
}
|
|
|
|
|
2013-12-03 22:15:36 +08:00
|
|
|
#define MAX_INST_NAME_LEN 40
|
|
|
|
|
|
|
|
static int ffs_set_inst_name(struct usb_function_instance *fi, const char *name)
|
|
|
|
{
|
|
|
|
struct f_fs_opts *opts;
|
|
|
|
char *ptr;
|
|
|
|
const char *tmp;
|
|
|
|
int name_len, ret;
|
|
|
|
|
|
|
|
name_len = strlen(name) + 1;
|
|
|
|
if (name_len > MAX_INST_NAME_LEN)
|
|
|
|
return -ENAMETOOLONG;
|
|
|
|
|
|
|
|
ptr = kstrndup(name, name_len, GFP_KERNEL);
|
|
|
|
if (!ptr)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
opts = to_f_fs_opts(fi);
|
|
|
|
tmp = NULL;
|
|
|
|
|
|
|
|
ffs_dev_lock();
|
|
|
|
|
|
|
|
tmp = opts->dev->name_allocated ? opts->dev->name : NULL;
|
|
|
|
ret = _ffs_name_dev(opts->dev, ptr);
|
|
|
|
if (ret) {
|
|
|
|
kfree(ptr);
|
|
|
|
ffs_dev_unlock();
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
opts->dev->name_allocated = true;
|
|
|
|
|
|
|
|
ffs_dev_unlock();
|
|
|
|
|
|
|
|
kfree(tmp);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-12-03 22:15:33 +08:00
|
|
|
static struct usb_function_instance *ffs_alloc_inst(void)
|
|
|
|
{
|
|
|
|
struct f_fs_opts *opts;
|
|
|
|
struct ffs_dev *dev;
|
|
|
|
|
|
|
|
opts = kzalloc(sizeof(*opts), GFP_KERNEL);
|
|
|
|
if (!opts)
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
|
2013-12-03 22:15:36 +08:00
|
|
|
opts->func_inst.set_inst_name = ffs_set_inst_name;
|
2013-12-03 22:15:33 +08:00
|
|
|
opts->func_inst.free_func_inst = ffs_free_inst;
|
|
|
|
ffs_dev_lock();
|
2014-01-13 23:49:38 +08:00
|
|
|
dev = _ffs_alloc_dev();
|
2013-12-03 22:15:33 +08:00
|
|
|
ffs_dev_unlock();
|
|
|
|
if (IS_ERR(dev)) {
|
|
|
|
kfree(opts);
|
|
|
|
return ERR_CAST(dev);
|
|
|
|
}
|
|
|
|
opts->dev = dev;
|
2013-12-03 22:15:36 +08:00
|
|
|
dev->opts = opts;
|
2013-12-03 22:15:33 +08:00
|
|
|
|
2013-12-03 22:15:36 +08:00
|
|
|
config_group_init_type_name(&opts->func_inst.group, "",
|
|
|
|
&ffs_func_type);
|
2013-12-03 22:15:33 +08:00
|
|
|
return &opts->func_inst;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void ffs_free(struct usb_function *f)
|
|
|
|
{
|
|
|
|
kfree(ffs_func_from_usb(f));
|
|
|
|
}
|
|
|
|
|
|
|
|
static void ffs_func_unbind(struct usb_configuration *c,
|
|
|
|
struct usb_function *f)
|
|
|
|
{
|
|
|
|
struct ffs_function *func = ffs_func_from_usb(f);
|
|
|
|
struct ffs_data *ffs = func->ffs;
|
|
|
|
struct f_fs_opts *opts =
|
|
|
|
container_of(f->fi, struct f_fs_opts, func_inst);
|
|
|
|
struct ffs_ep *ep = func->eps;
|
|
|
|
unsigned count = ffs->eps_count;
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
ENTER();
|
|
|
|
if (ffs->func == func) {
|
|
|
|
ffs_func_eps_disable(func);
|
|
|
|
ffs->func = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!--opts->refcnt)
|
|
|
|
functionfs_unbind(ffs);
|
|
|
|
|
|
|
|
/* cleanup after autoconfig */
|
|
|
|
spin_lock_irqsave(&func->ffs->eps_lock, flags);
|
|
|
|
do {
|
|
|
|
if (ep->ep && ep->req)
|
|
|
|
usb_ep_free_request(ep->ep, ep->req);
|
|
|
|
ep->req = NULL;
|
|
|
|
++ep;
|
|
|
|
} while (--count);
|
|
|
|
spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
|
|
|
|
kfree(func->eps);
|
|
|
|
func->eps = NULL;
|
|
|
|
/*
|
|
|
|
* eps, descriptors and interfaces_nums are allocated in the
|
|
|
|
* same chunk so only one free is required.
|
|
|
|
*/
|
|
|
|
func->function.fs_descriptors = NULL;
|
|
|
|
func->function.hs_descriptors = NULL;
|
2014-02-28 19:20:22 +08:00
|
|
|
func->function.ss_descriptors = NULL;
|
2013-12-03 22:15:33 +08:00
|
|
|
func->interfaces_nums = NULL;
|
|
|
|
|
|
|
|
ffs_event_add(ffs, FUNCTIONFS_UNBIND);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct usb_function *ffs_alloc(struct usb_function_instance *fi)
|
|
|
|
{
|
|
|
|
struct ffs_function *func;
|
|
|
|
|
|
|
|
ENTER();
|
|
|
|
|
|
|
|
func = kzalloc(sizeof(*func), GFP_KERNEL);
|
|
|
|
if (unlikely(!func))
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
|
|
|
|
func->function.name = "Function FS Gadget";
|
|
|
|
|
|
|
|
func->function.bind = ffs_func_bind;
|
|
|
|
func->function.unbind = ffs_func_unbind;
|
|
|
|
func->function.set_alt = ffs_func_set_alt;
|
|
|
|
func->function.disable = ffs_func_disable;
|
|
|
|
func->function.setup = ffs_func_setup;
|
|
|
|
func->function.suspend = ffs_func_suspend;
|
|
|
|
func->function.resume = ffs_func_resume;
|
|
|
|
func->function.free_func = ffs_free;
|
|
|
|
|
|
|
|
return &func->function;
|
|
|
|
}
|
|
|
|
|
2013-12-03 22:15:32 +08:00
|
|
|
/*
|
|
|
|
* ffs_lock must be taken by the caller of this function
|
|
|
|
*/
|
2014-01-13 23:49:38 +08:00
|
|
|
static struct ffs_dev *_ffs_alloc_dev(void)
|
2013-12-03 22:15:32 +08:00
|
|
|
{
|
|
|
|
struct ffs_dev *dev;
|
|
|
|
int ret;
|
|
|
|
|
2014-01-13 23:49:38 +08:00
|
|
|
if (_ffs_get_single_dev())
|
2013-12-03 22:15:32 +08:00
|
|
|
return ERR_PTR(-EBUSY);
|
|
|
|
|
|
|
|
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
|
|
|
|
if (!dev)
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
|
|
|
|
if (list_empty(&ffs_devices)) {
|
|
|
|
ret = functionfs_init();
|
|
|
|
if (ret) {
|
|
|
|
kfree(dev);
|
|
|
|
return ERR_PTR(ret);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
list_add(&dev->entry, &ffs_devices);
|
|
|
|
|
|
|
|
return dev;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* ffs_lock must be taken by the caller of this function
|
|
|
|
* The caller is responsible for "name" being available whenever f_fs needs it
|
|
|
|
*/
|
|
|
|
static int _ffs_name_dev(struct ffs_dev *dev, const char *name)
|
|
|
|
{
|
|
|
|
struct ffs_dev *existing;
|
|
|
|
|
2014-01-13 23:49:38 +08:00
|
|
|
existing = _ffs_do_find_dev(name);
|
2013-12-03 22:15:32 +08:00
|
|
|
if (existing)
|
|
|
|
return -EBUSY;
|
2014-01-13 23:49:36 +08:00
|
|
|
|
2013-12-03 22:15:32 +08:00
|
|
|
dev->name = name;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The caller is responsible for "name" being available whenever f_fs needs it
|
|
|
|
*/
|
|
|
|
int ffs_name_dev(struct ffs_dev *dev, const char *name)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ffs_dev_lock();
|
|
|
|
ret = _ffs_name_dev(dev, name);
|
|
|
|
ffs_dev_unlock();
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
2014-04-02 02:19:32 +08:00
|
|
|
EXPORT_SYMBOL_GPL(ffs_name_dev);
|
2013-12-03 22:15:32 +08:00
|
|
|
|
|
|
|
int ffs_single_dev(struct ffs_dev *dev)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
ffs_dev_lock();
|
|
|
|
|
|
|
|
if (!list_is_singular(&ffs_devices))
|
|
|
|
ret = -EBUSY;
|
|
|
|
else
|
|
|
|
dev->single = true;
|
|
|
|
|
|
|
|
ffs_dev_unlock();
|
|
|
|
return ret;
|
|
|
|
}
|
2014-04-02 02:19:32 +08:00
|
|
|
EXPORT_SYMBOL_GPL(ffs_single_dev);
|
2013-12-03 22:15:32 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* ffs_lock must be taken by the caller of this function
|
|
|
|
*/
|
2014-01-13 23:49:38 +08:00
|
|
|
static void _ffs_free_dev(struct ffs_dev *dev)
|
2013-12-03 22:15:32 +08:00
|
|
|
{
|
|
|
|
list_del(&dev->entry);
|
2013-12-03 22:15:36 +08:00
|
|
|
if (dev->name_allocated)
|
|
|
|
kfree(dev->name);
|
2013-12-03 22:15:32 +08:00
|
|
|
kfree(dev);
|
|
|
|
if (list_empty(&ffs_devices))
|
|
|
|
functionfs_cleanup();
|
|
|
|
}
|
|
|
|
|
|
|
|
static void *ffs_acquire_dev(const char *dev_name)
|
|
|
|
{
|
|
|
|
struct ffs_dev *ffs_dev;
|
|
|
|
|
|
|
|
ENTER();
|
|
|
|
ffs_dev_lock();
|
|
|
|
|
2014-01-13 23:49:38 +08:00
|
|
|
ffs_dev = _ffs_find_dev(dev_name);
|
2013-12-03 22:15:32 +08:00
|
|
|
if (!ffs_dev)
|
2014-05-21 20:05:35 +08:00
|
|
|
ffs_dev = ERR_PTR(-ENOENT);
|
2013-12-03 22:15:32 +08:00
|
|
|
else if (ffs_dev->mounted)
|
|
|
|
ffs_dev = ERR_PTR(-EBUSY);
|
2013-12-03 22:15:33 +08:00
|
|
|
else if (ffs_dev->ffs_acquire_dev_callback &&
|
|
|
|
ffs_dev->ffs_acquire_dev_callback(ffs_dev))
|
2014-05-21 20:05:35 +08:00
|
|
|
ffs_dev = ERR_PTR(-ENOENT);
|
2013-12-03 22:15:32 +08:00
|
|
|
else
|
|
|
|
ffs_dev->mounted = true;
|
|
|
|
|
|
|
|
ffs_dev_unlock();
|
|
|
|
return ffs_dev;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void ffs_release_dev(struct ffs_data *ffs_data)
|
|
|
|
{
|
|
|
|
struct ffs_dev *ffs_dev;
|
|
|
|
|
|
|
|
ENTER();
|
|
|
|
ffs_dev_lock();
|
|
|
|
|
|
|
|
ffs_dev = ffs_data->private_data;
|
2014-01-13 23:49:35 +08:00
|
|
|
if (ffs_dev) {
|
2013-12-03 22:15:32 +08:00
|
|
|
ffs_dev->mounted = false;
|
2014-01-13 23:49:35 +08:00
|
|
|
|
|
|
|
if (ffs_dev->ffs_release_dev_callback)
|
|
|
|
ffs_dev->ffs_release_dev_callback(ffs_dev);
|
|
|
|
}
|
2013-12-03 22:15:32 +08:00
|
|
|
|
|
|
|
ffs_dev_unlock();
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ffs_ready(struct ffs_data *ffs)
|
|
|
|
{
|
|
|
|
struct ffs_dev *ffs_obj;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
ENTER();
|
|
|
|
ffs_dev_lock();
|
|
|
|
|
|
|
|
ffs_obj = ffs->private_data;
|
|
|
|
if (!ffs_obj) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
if (WARN_ON(ffs_obj->desc_ready)) {
|
|
|
|
ret = -EBUSY;
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
|
|
|
ffs_obj->desc_ready = true;
|
|
|
|
ffs_obj->ffs_data = ffs;
|
|
|
|
|
2015-05-22 23:25:18 +08:00
|
|
|
if (ffs_obj->ffs_ready_callback) {
|
2013-12-03 22:15:32 +08:00
|
|
|
ret = ffs_obj->ffs_ready_callback(ffs);
|
2015-05-22 23:25:18 +08:00
|
|
|
if (ret)
|
|
|
|
goto done;
|
|
|
|
}
|
2013-12-03 22:15:32 +08:00
|
|
|
|
2015-05-22 23:25:18 +08:00
|
|
|
set_bit(FFS_FL_CALL_CLOSED_CALLBACK, &ffs->flags);
|
2013-12-03 22:15:32 +08:00
|
|
|
done:
|
|
|
|
ffs_dev_unlock();
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void ffs_closed(struct ffs_data *ffs)
|
|
|
|
{
|
|
|
|
struct ffs_dev *ffs_obj;
|
2015-05-20 21:52:40 +08:00
|
|
|
struct f_fs_opts *opts;
|
2013-12-03 22:15:32 +08:00
|
|
|
|
|
|
|
ENTER();
|
|
|
|
ffs_dev_lock();
|
|
|
|
|
|
|
|
ffs_obj = ffs->private_data;
|
|
|
|
if (!ffs_obj)
|
|
|
|
goto done;
|
|
|
|
|
|
|
|
ffs_obj->desc_ready = false;
|
|
|
|
|
2015-05-22 23:25:18 +08:00
|
|
|
if (test_and_clear_bit(FFS_FL_CALL_CLOSED_CALLBACK, &ffs->flags) &&
|
|
|
|
ffs_obj->ffs_closed_callback)
|
2013-12-03 22:15:32 +08:00
|
|
|
ffs_obj->ffs_closed_callback(ffs);
|
2013-12-03 22:15:36 +08:00
|
|
|
|
2015-05-20 21:52:40 +08:00
|
|
|
if (ffs_obj->opts)
|
|
|
|
opts = ffs_obj->opts;
|
|
|
|
else
|
|
|
|
goto done;
|
|
|
|
|
|
|
|
if (opts->no_configfs || !opts->func_inst.group.cg_item.ci_parent
|
|
|
|
|| !atomic_read(&opts->func_inst.group.cg_item.ci_kref.refcount))
|
2013-12-03 22:15:36 +08:00
|
|
|
goto done;
|
|
|
|
|
|
|
|
unregister_gadget_item(ffs_obj->opts->
|
|
|
|
func_inst.group.cg_item.ci_parent->ci_parent);
|
2013-12-03 22:15:32 +08:00
|
|
|
done:
|
|
|
|
ffs_dev_unlock();
|
|
|
|
}
|
|
|
|
|
2010-05-05 18:53:14 +08:00
|
|
|
/* Misc helper functions ****************************************************/
|
|
|
|
|
|
|
|
static int ffs_mutex_lock(struct mutex *mutex, unsigned nonblock)
|
|
|
|
{
|
|
|
|
return nonblock
|
|
|
|
? likely(mutex_trylock(mutex)) ? 0 : -EAGAIN
|
|
|
|
: mutex_lock_interruptible(mutex);
|
|
|
|
}
|
|
|
|
|
2012-09-27 09:43:45 +08:00
|
|
|
static char *ffs_prepare_buffer(const char __user *buf, size_t len)
|
2010-05-05 18:53:14 +08:00
|
|
|
{
|
|
|
|
char *data;
|
|
|
|
|
|
|
|
if (unlikely(!len))
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
data = kmalloc(len, GFP_KERNEL);
|
|
|
|
if (unlikely(!data))
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
|
2015-11-19 00:15:49 +08:00
|
|
|
if (unlikely(copy_from_user(data, buf, len))) {
|
2010-05-05 18:53:14 +08:00
|
|
|
kfree(data);
|
|
|
|
return ERR_PTR(-EFAULT);
|
|
|
|
}
|
|
|
|
|
2010-11-18 00:09:47 +08:00
|
|
|
pr_vdebug("Buffer from user space:\n");
|
2010-05-05 18:53:14 +08:00
|
|
|
ffs_dump_mem("", data, len);
|
|
|
|
|
|
|
|
return data;
|
|
|
|
}
|
2013-12-03 22:15:33 +08:00
|
|
|
|
|
|
|
DECLARE_USB_FUNCTION_INIT(ffs, ffs_alloc_inst, ffs_alloc);
|
|
|
|
MODULE_LICENSE("GPL");
|
|
|
|
MODULE_AUTHOR("Michal Nazarewicz");
|