Char/Misc driver fixes for 5.0-rc4
Here are some small char and misc driver fixes to resolve some reported issues, as well as a number of binderfs fixups that were found after auditing the filesystem code by Al Viro. As binderfs hasn't been in a previous release yet, it's good to get these in now before the first users show up. All of these have been in linux-next for a bit with no reported issues. Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> -----BEGIN PGP SIGNATURE----- iG0EABECAC0WIQT0tgzFv3jCIUoxPcsxR9QN2y37KQUCXEr/Iw8cZ3JlZ0Brcm9h aC5jb20ACgkQMUfUDdst+ymLqACgsYcCs0r/RMqXfvMqJ7beUGq02ioAoNPD0hQh Z76nfI+21TiuXx24JCfZ =Fo1U -----END PGP SIGNATURE----- Merge tag 'char-misc-5.0-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc Pull char/misc driver fixes from Greg KH: "Here are some small char and misc driver fixes to resolve some reported issues, as well as a number of binderfs fixups that were found after auditing the filesystem code by Al Viro. As binderfs hasn't been in a previous release yet, it's good to get these in now before the first users show up. All of these have been in linux-next for a bit with no reported issues" * tag 'char-misc-5.0-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc: (26 commits) i3c: master: Fix an error checking typo in 'cdns_i3c_master_probe()' binderfs: switch from d_add() to d_instantiate() binderfs: drop lock in binderfs_binder_ctl_create binderfs: kill_litter_super() before cleanup binderfs: rework binderfs_binder_device_create() binderfs: rework binderfs_fill_super() binderfs: prevent renaming the control dentry binderfs: remove outdated comment binderfs: use __u32 for device numbers binderfs: use correct include guards in header misc: pvpanic: fix warning implicit declaration char/mwave: fix potential Spectre v1 vulnerability misc: ibmvsm: Fix potential NULL pointer dereference binderfs: fix error return code in binderfs_fill_super() mei: me: add denverton innovation engine device IDs mei: me: mark LBG devices as having dma support mei: dma: silent the reject message binderfs: handle !CONFIG_IPC_NS builds binderfs: reserve devices for initial mount binderfs: rename header to binderfs.h ...
This commit is contained in:
commit
d488bd21a4
|
@ -11,6 +11,7 @@
|
|||
#include <linux/kdev_t.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/namei.h>
|
||||
#include <linux/magic.h>
|
||||
#include <linux/major.h>
|
||||
#include <linux/miscdevice.h>
|
||||
|
@ -20,6 +21,7 @@
|
|||
#include <linux/parser.h>
|
||||
#include <linux/radix-tree.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/spinlock_types.h>
|
||||
#include <linux/stddef.h>
|
||||
|
@ -30,7 +32,7 @@
|
|||
#include <linux/xarray.h>
|
||||
#include <uapi/asm-generic/errno-base.h>
|
||||
#include <uapi/linux/android/binder.h>
|
||||
#include <uapi/linux/android/binder_ctl.h>
|
||||
#include <uapi/linux/android/binderfs.h>
|
||||
|
||||
#include "binder_internal.h"
|
||||
|
||||
|
@ -39,13 +41,31 @@
|
|||
#define INODE_OFFSET 3
|
||||
#define INTSTRLEN 21
|
||||
#define BINDERFS_MAX_MINOR (1U << MINORBITS)
|
||||
|
||||
static struct vfsmount *binderfs_mnt;
|
||||
/* Ensure that the initial ipc namespace always has devices available. */
|
||||
#define BINDERFS_MAX_MINOR_CAPPED (BINDERFS_MAX_MINOR - 4)
|
||||
|
||||
static dev_t binderfs_dev;
|
||||
static DEFINE_MUTEX(binderfs_minors_mutex);
|
||||
static DEFINE_IDA(binderfs_minors);
|
||||
|
||||
/**
|
||||
* binderfs_mount_opts - mount options for binderfs
|
||||
* @max: maximum number of allocatable binderfs binder devices
|
||||
*/
|
||||
struct binderfs_mount_opts {
|
||||
int max;
|
||||
};
|
||||
|
||||
enum {
|
||||
Opt_max,
|
||||
Opt_err
|
||||
};
|
||||
|
||||
static const match_table_t tokens = {
|
||||
{ Opt_max, "max=%d" },
|
||||
{ Opt_err, NULL }
|
||||
};
|
||||
|
||||
/**
|
||||
* binderfs_info - information about a binderfs mount
|
||||
* @ipc_ns: The ipc namespace the binderfs mount belongs to.
|
||||
|
@ -55,13 +75,16 @@ static DEFINE_IDA(binderfs_minors);
|
|||
* created.
|
||||
* @root_gid: gid that needs to be used when a new binder device is
|
||||
* created.
|
||||
* @mount_opts: The mount options in use.
|
||||
* @device_count: The current number of allocated binder devices.
|
||||
*/
|
||||
struct binderfs_info {
|
||||
struct ipc_namespace *ipc_ns;
|
||||
struct dentry *control_dentry;
|
||||
kuid_t root_uid;
|
||||
kgid_t root_gid;
|
||||
|
||||
struct binderfs_mount_opts mount_opts;
|
||||
int device_count;
|
||||
};
|
||||
|
||||
static inline struct binderfs_info *BINDERFS_I(const struct inode *inode)
|
||||
|
@ -84,7 +107,7 @@ bool is_binderfs_device(const struct inode *inode)
|
|||
* @userp: buffer to copy information about new device for userspace to
|
||||
* @req: struct binderfs_device as copied from userspace
|
||||
*
|
||||
* This function allocated a new binder_device and reserves a new minor
|
||||
* This function allocates a new binder_device and reserves a new minor
|
||||
* number for it.
|
||||
* Minor numbers are limited and tracked globally in binderfs_minors. The
|
||||
* function will stash a struct binder_device for the specific binder
|
||||
|
@ -100,20 +123,34 @@ static int binderfs_binder_device_create(struct inode *ref_inode,
|
|||
struct binderfs_device *req)
|
||||
{
|
||||
int minor, ret;
|
||||
struct dentry *dentry, *dup, *root;
|
||||
struct dentry *dentry, *root;
|
||||
struct binder_device *device;
|
||||
size_t name_len = BINDERFS_MAX_NAME + 1;
|
||||
char *name = NULL;
|
||||
size_t name_len;
|
||||
struct inode *inode = NULL;
|
||||
struct super_block *sb = ref_inode->i_sb;
|
||||
struct binderfs_info *info = sb->s_fs_info;
|
||||
#if defined(CONFIG_IPC_NS)
|
||||
bool use_reserve = (info->ipc_ns == &init_ipc_ns);
|
||||
#else
|
||||
bool use_reserve = true;
|
||||
#endif
|
||||
|
||||
/* Reserve new minor number for the new device. */
|
||||
mutex_lock(&binderfs_minors_mutex);
|
||||
minor = ida_alloc_max(&binderfs_minors, BINDERFS_MAX_MINOR, GFP_KERNEL);
|
||||
mutex_unlock(&binderfs_minors_mutex);
|
||||
if (minor < 0)
|
||||
if (++info->device_count <= info->mount_opts.max)
|
||||
minor = ida_alloc_max(&binderfs_minors,
|
||||
use_reserve ? BINDERFS_MAX_MINOR :
|
||||
BINDERFS_MAX_MINOR_CAPPED,
|
||||
GFP_KERNEL);
|
||||
else
|
||||
minor = -ENOSPC;
|
||||
if (minor < 0) {
|
||||
--info->device_count;
|
||||
mutex_unlock(&binderfs_minors_mutex);
|
||||
return minor;
|
||||
}
|
||||
mutex_unlock(&binderfs_minors_mutex);
|
||||
|
||||
ret = -ENOMEM;
|
||||
device = kzalloc(sizeof(*device), GFP_KERNEL);
|
||||
|
@ -132,12 +169,13 @@ static int binderfs_binder_device_create(struct inode *ref_inode,
|
|||
inode->i_uid = info->root_uid;
|
||||
inode->i_gid = info->root_gid;
|
||||
|
||||
name = kmalloc(name_len, GFP_KERNEL);
|
||||
req->name[BINDERFS_MAX_NAME] = '\0'; /* NUL-terminate */
|
||||
name_len = strlen(req->name);
|
||||
/* Make sure to include terminating NUL byte */
|
||||
name = kmemdup(req->name, name_len + 1, GFP_KERNEL);
|
||||
if (!name)
|
||||
goto err;
|
||||
|
||||
strscpy(name, req->name, name_len);
|
||||
|
||||
device->binderfs_inode = inode;
|
||||
device->context.binder_context_mgr_uid = INVALID_UID;
|
||||
device->context.name = name;
|
||||
|
@ -156,28 +194,25 @@ static int binderfs_binder_device_create(struct inode *ref_inode,
|
|||
|
||||
root = sb->s_root;
|
||||
inode_lock(d_inode(root));
|
||||
dentry = d_alloc_name(root, name);
|
||||
if (!dentry) {
|
||||
|
||||
/* look it up */
|
||||
dentry = lookup_one_len(name, root, name_len);
|
||||
if (IS_ERR(dentry)) {
|
||||
inode_unlock(d_inode(root));
|
||||
ret = -ENOMEM;
|
||||
ret = PTR_ERR(dentry);
|
||||
goto err;
|
||||
}
|
||||
|
||||
/* Verify that the name userspace gave us is not already in use. */
|
||||
dup = d_lookup(root, &dentry->d_name);
|
||||
if (dup) {
|
||||
if (d_really_is_positive(dup)) {
|
||||
dput(dup);
|
||||
dput(dentry);
|
||||
inode_unlock(d_inode(root));
|
||||
ret = -EEXIST;
|
||||
goto err;
|
||||
}
|
||||
dput(dup);
|
||||
if (d_really_is_positive(dentry)) {
|
||||
/* already exists */
|
||||
dput(dentry);
|
||||
inode_unlock(d_inode(root));
|
||||
ret = -EEXIST;
|
||||
goto err;
|
||||
}
|
||||
|
||||
inode->i_private = device;
|
||||
d_add(dentry, inode);
|
||||
d_instantiate(dentry, inode);
|
||||
fsnotify_create(root->d_inode, dentry);
|
||||
inode_unlock(d_inode(root));
|
||||
|
||||
|
@ -187,6 +222,7 @@ err:
|
|||
kfree(name);
|
||||
kfree(device);
|
||||
mutex_lock(&binderfs_minors_mutex);
|
||||
--info->device_count;
|
||||
ida_free(&binderfs_minors, minor);
|
||||
mutex_unlock(&binderfs_minors_mutex);
|
||||
iput(inode);
|
||||
|
@ -232,6 +268,7 @@ static long binder_ctl_ioctl(struct file *file, unsigned int cmd,
|
|||
static void binderfs_evict_inode(struct inode *inode)
|
||||
{
|
||||
struct binder_device *device = inode->i_private;
|
||||
struct binderfs_info *info = BINDERFS_I(inode);
|
||||
|
||||
clear_inode(inode);
|
||||
|
||||
|
@ -239,6 +276,7 @@ static void binderfs_evict_inode(struct inode *inode)
|
|||
return;
|
||||
|
||||
mutex_lock(&binderfs_minors_mutex);
|
||||
--info->device_count;
|
||||
ida_free(&binderfs_minors, device->miscdev.minor);
|
||||
mutex_unlock(&binderfs_minors_mutex);
|
||||
|
||||
|
@ -246,43 +284,87 @@ static void binderfs_evict_inode(struct inode *inode)
|
|||
kfree(device);
|
||||
}
|
||||
|
||||
/**
|
||||
* binderfs_parse_mount_opts - parse binderfs mount options
|
||||
* @data: options to set (can be NULL in which case defaults are used)
|
||||
*/
|
||||
static int binderfs_parse_mount_opts(char *data,
|
||||
struct binderfs_mount_opts *opts)
|
||||
{
|
||||
char *p;
|
||||
opts->max = BINDERFS_MAX_MINOR;
|
||||
|
||||
while ((p = strsep(&data, ",")) != NULL) {
|
||||
substring_t args[MAX_OPT_ARGS];
|
||||
int token;
|
||||
int max_devices;
|
||||
|
||||
if (!*p)
|
||||
continue;
|
||||
|
||||
token = match_token(p, tokens, args);
|
||||
switch (token) {
|
||||
case Opt_max:
|
||||
if (match_int(&args[0], &max_devices) ||
|
||||
(max_devices < 0 ||
|
||||
(max_devices > BINDERFS_MAX_MINOR)))
|
||||
return -EINVAL;
|
||||
|
||||
opts->max = max_devices;
|
||||
break;
|
||||
default:
|
||||
pr_err("Invalid mount options\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int binderfs_remount(struct super_block *sb, int *flags, char *data)
|
||||
{
|
||||
struct binderfs_info *info = sb->s_fs_info;
|
||||
return binderfs_parse_mount_opts(data, &info->mount_opts);
|
||||
}
|
||||
|
||||
static int binderfs_show_mount_opts(struct seq_file *seq, struct dentry *root)
|
||||
{
|
||||
struct binderfs_info *info;
|
||||
|
||||
info = root->d_sb->s_fs_info;
|
||||
if (info->mount_opts.max <= BINDERFS_MAX_MINOR)
|
||||
seq_printf(seq, ",max=%d", info->mount_opts.max);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct super_operations binderfs_super_ops = {
|
||||
.statfs = simple_statfs,
|
||||
.evict_inode = binderfs_evict_inode,
|
||||
.evict_inode = binderfs_evict_inode,
|
||||
.remount_fs = binderfs_remount,
|
||||
.show_options = binderfs_show_mount_opts,
|
||||
.statfs = simple_statfs,
|
||||
};
|
||||
|
||||
static inline bool is_binderfs_control_device(const struct dentry *dentry)
|
||||
{
|
||||
struct binderfs_info *info = dentry->d_sb->s_fs_info;
|
||||
return info->control_dentry == dentry;
|
||||
}
|
||||
|
||||
static int binderfs_rename(struct inode *old_dir, struct dentry *old_dentry,
|
||||
struct inode *new_dir, struct dentry *new_dentry,
|
||||
unsigned int flags)
|
||||
{
|
||||
struct inode *inode = d_inode(old_dentry);
|
||||
|
||||
/* binderfs doesn't support directories. */
|
||||
if (d_is_dir(old_dentry))
|
||||
if (is_binderfs_control_device(old_dentry) ||
|
||||
is_binderfs_control_device(new_dentry))
|
||||
return -EPERM;
|
||||
|
||||
if (flags & ~RENAME_NOREPLACE)
|
||||
return -EINVAL;
|
||||
|
||||
if (!simple_empty(new_dentry))
|
||||
return -ENOTEMPTY;
|
||||
|
||||
if (d_really_is_positive(new_dentry))
|
||||
simple_unlink(new_dir, new_dentry);
|
||||
|
||||
old_dir->i_ctime = old_dir->i_mtime = new_dir->i_ctime =
|
||||
new_dir->i_mtime = inode->i_ctime = current_time(old_dir);
|
||||
|
||||
return 0;
|
||||
return simple_rename(old_dir, old_dentry, new_dir, new_dentry, flags);
|
||||
}
|
||||
|
||||
static int binderfs_unlink(struct inode *dir, struct dentry *dentry)
|
||||
{
|
||||
/*
|
||||
* The control dentry is only ever touched during mount so checking it
|
||||
* here should not require us to take lock.
|
||||
*/
|
||||
if (BINDERFS_I(dir)->control_dentry == dentry)
|
||||
if (is_binderfs_control_device(dentry))
|
||||
return -EPERM;
|
||||
|
||||
return simple_unlink(dir, dentry);
|
||||
|
@ -318,8 +400,6 @@ static int binderfs_binder_ctl_create(struct super_block *sb)
|
|||
if (!device)
|
||||
return -ENOMEM;
|
||||
|
||||
inode_lock(d_inode(root));
|
||||
|
||||
/* If we have already created a binder-control node, return. */
|
||||
if (info->control_dentry) {
|
||||
ret = 0;
|
||||
|
@ -358,12 +438,10 @@ static int binderfs_binder_ctl_create(struct super_block *sb)
|
|||
inode->i_private = device;
|
||||
info->control_dentry = dentry;
|
||||
d_add(dentry, inode);
|
||||
inode_unlock(d_inode(root));
|
||||
|
||||
return 0;
|
||||
|
||||
out:
|
||||
inode_unlock(d_inode(root));
|
||||
kfree(device);
|
||||
iput(inode);
|
||||
|
||||
|
@ -378,12 +456,9 @@ static const struct inode_operations binderfs_dir_inode_operations = {
|
|||
|
||||
static int binderfs_fill_super(struct super_block *sb, void *data, int silent)
|
||||
{
|
||||
int ret;
|
||||
struct binderfs_info *info;
|
||||
int ret = -ENOMEM;
|
||||
struct inode *inode = NULL;
|
||||
struct ipc_namespace *ipc_ns = sb->s_fs_info;
|
||||
|
||||
get_ipc_ns(ipc_ns);
|
||||
|
||||
sb->s_blocksize = PAGE_SIZE;
|
||||
sb->s_blocksize_bits = PAGE_SHIFT;
|
||||
|
@ -405,11 +480,17 @@ static int binderfs_fill_super(struct super_block *sb, void *data, int silent)
|
|||
sb->s_op = &binderfs_super_ops;
|
||||
sb->s_time_gran = 1;
|
||||
|
||||
info = kzalloc(sizeof(struct binderfs_info), GFP_KERNEL);
|
||||
if (!info)
|
||||
goto err_without_dentry;
|
||||
sb->s_fs_info = kzalloc(sizeof(struct binderfs_info), GFP_KERNEL);
|
||||
if (!sb->s_fs_info)
|
||||
return -ENOMEM;
|
||||
info = sb->s_fs_info;
|
||||
|
||||
info->ipc_ns = get_ipc_ns(current->nsproxy->ipc_ns);
|
||||
|
||||
ret = binderfs_parse_mount_opts(data, &info->mount_opts);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
info->ipc_ns = ipc_ns;
|
||||
info->root_gid = make_kgid(sb->s_user_ns, 0);
|
||||
if (!gid_valid(info->root_gid))
|
||||
info->root_gid = GLOBAL_ROOT_GID;
|
||||
|
@ -417,11 +498,9 @@ static int binderfs_fill_super(struct super_block *sb, void *data, int silent)
|
|||
if (!uid_valid(info->root_uid))
|
||||
info->root_uid = GLOBAL_ROOT_UID;
|
||||
|
||||
sb->s_fs_info = info;
|
||||
|
||||
inode = new_inode(sb);
|
||||
if (!inode)
|
||||
goto err_without_dentry;
|
||||
return -ENOMEM;
|
||||
|
||||
inode->i_ino = FIRST_INODE;
|
||||
inode->i_fop = &simple_dir_operations;
|
||||
|
@ -432,79 +511,28 @@ static int binderfs_fill_super(struct super_block *sb, void *data, int silent)
|
|||
|
||||
sb->s_root = d_make_root(inode);
|
||||
if (!sb->s_root)
|
||||
goto err_without_dentry;
|
||||
return -ENOMEM;
|
||||
|
||||
ret = binderfs_binder_ctl_create(sb);
|
||||
if (ret)
|
||||
goto err_with_dentry;
|
||||
|
||||
return 0;
|
||||
|
||||
err_with_dentry:
|
||||
dput(sb->s_root);
|
||||
sb->s_root = NULL;
|
||||
|
||||
err_without_dentry:
|
||||
put_ipc_ns(ipc_ns);
|
||||
iput(inode);
|
||||
kfree(info);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int binderfs_test_super(struct super_block *sb, void *data)
|
||||
{
|
||||
struct binderfs_info *info = sb->s_fs_info;
|
||||
|
||||
if (info)
|
||||
return info->ipc_ns == data;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int binderfs_set_super(struct super_block *sb, void *data)
|
||||
{
|
||||
sb->s_fs_info = data;
|
||||
return set_anon_super(sb, NULL);
|
||||
return binderfs_binder_ctl_create(sb);
|
||||
}
|
||||
|
||||
static struct dentry *binderfs_mount(struct file_system_type *fs_type,
|
||||
int flags, const char *dev_name,
|
||||
void *data)
|
||||
{
|
||||
struct super_block *sb;
|
||||
struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns;
|
||||
|
||||
if (!ns_capable(ipc_ns->user_ns, CAP_SYS_ADMIN))
|
||||
return ERR_PTR(-EPERM);
|
||||
|
||||
sb = sget_userns(fs_type, binderfs_test_super, binderfs_set_super,
|
||||
flags, ipc_ns->user_ns, ipc_ns);
|
||||
if (IS_ERR(sb))
|
||||
return ERR_CAST(sb);
|
||||
|
||||
if (!sb->s_root) {
|
||||
int ret = binderfs_fill_super(sb, data, flags & SB_SILENT ? 1 : 0);
|
||||
if (ret) {
|
||||
deactivate_locked_super(sb);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
sb->s_flags |= SB_ACTIVE;
|
||||
}
|
||||
|
||||
return dget(sb->s_root);
|
||||
return mount_nodev(fs_type, flags, data, binderfs_fill_super);
|
||||
}
|
||||
|
||||
static void binderfs_kill_super(struct super_block *sb)
|
||||
{
|
||||
struct binderfs_info *info = sb->s_fs_info;
|
||||
|
||||
kill_litter_super(sb);
|
||||
|
||||
if (info && info->ipc_ns)
|
||||
put_ipc_ns(info->ipc_ns);
|
||||
|
||||
kfree(info);
|
||||
kill_litter_super(sb);
|
||||
}
|
||||
|
||||
static struct file_system_type binder_fs_type = {
|
||||
|
@ -530,14 +558,6 @@ static int __init init_binderfs(void)
|
|||
return ret;
|
||||
}
|
||||
|
||||
binderfs_mnt = kern_mount(&binder_fs_type);
|
||||
if (IS_ERR(binderfs_mnt)) {
|
||||
ret = PTR_ERR(binderfs_mnt);
|
||||
binderfs_mnt = NULL;
|
||||
unregister_filesystem(&binder_fs_type);
|
||||
unregister_chrdev_region(binderfs_dev, BINDERFS_MAX_MINOR);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -59,6 +59,7 @@
|
|||
#include <linux/mutex.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/serial_8250.h>
|
||||
#include <linux/nospec.h>
|
||||
#include "smapi.h"
|
||||
#include "mwavedd.h"
|
||||
#include "3780i.h"
|
||||
|
@ -289,6 +290,8 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd,
|
|||
ipcnum);
|
||||
return -EINVAL;
|
||||
}
|
||||
ipcnum = array_index_nospec(ipcnum,
|
||||
ARRAY_SIZE(pDrvData->IPCs));
|
||||
PRINTK_3(TRACE_MWAVE,
|
||||
"mwavedd::mwave_ioctl IOCTL_MW_REGISTER_IPC"
|
||||
" ipcnum %x entry usIntCount %x\n",
|
||||
|
@ -317,6 +320,8 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd,
|
|||
" Invalid ipcnum %x\n", ipcnum);
|
||||
return -EINVAL;
|
||||
}
|
||||
ipcnum = array_index_nospec(ipcnum,
|
||||
ARRAY_SIZE(pDrvData->IPCs));
|
||||
PRINTK_3(TRACE_MWAVE,
|
||||
"mwavedd::mwave_ioctl IOCTL_MW_GET_IPC"
|
||||
" ipcnum %x, usIntCount %x\n",
|
||||
|
@ -383,6 +388,8 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd,
|
|||
ipcnum);
|
||||
return -EINVAL;
|
||||
}
|
||||
ipcnum = array_index_nospec(ipcnum,
|
||||
ARRAY_SIZE(pDrvData->IPCs));
|
||||
mutex_lock(&mwave_mutex);
|
||||
if (pDrvData->IPCs[ipcnum].bIsEnabled == true) {
|
||||
pDrvData->IPCs[ipcnum].bIsEnabled = false;
|
||||
|
|
|
@ -701,19 +701,12 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
|
|||
int vmbus_disconnect_ring(struct vmbus_channel *channel)
|
||||
{
|
||||
struct vmbus_channel *cur_channel, *tmp;
|
||||
unsigned long flags;
|
||||
LIST_HEAD(list);
|
||||
int ret;
|
||||
|
||||
if (channel->primary_channel != NULL)
|
||||
return -EINVAL;
|
||||
|
||||
/* Snapshot the list of subchannels */
|
||||
spin_lock_irqsave(&channel->lock, flags);
|
||||
list_splice_init(&channel->sc_list, &list);
|
||||
spin_unlock_irqrestore(&channel->lock, flags);
|
||||
|
||||
list_for_each_entry_safe(cur_channel, tmp, &list, sc_list) {
|
||||
list_for_each_entry_safe(cur_channel, tmp, &channel->sc_list, sc_list) {
|
||||
if (cur_channel->rescind)
|
||||
wait_for_completion(&cur_channel->rescind_event);
|
||||
|
||||
|
|
|
@ -888,12 +888,14 @@ static unsigned long handle_pg_range(unsigned long pg_start,
|
|||
pfn_cnt -= pgs_ol;
|
||||
/*
|
||||
* Check if the corresponding memory block is already
|
||||
* online by checking its last previously backed page.
|
||||
* In case it is we need to bring rest (which was not
|
||||
* backed previously) online too.
|
||||
* online. It is possible to observe struct pages still
|
||||
* being uninitialized here so check section instead.
|
||||
* In case the section is online we need to bring the
|
||||
* rest of pfns (which were not backed previously)
|
||||
* online too.
|
||||
*/
|
||||
if (start_pfn > has->start_pfn &&
|
||||
!PageReserved(pfn_to_page(start_pfn - 1)))
|
||||
online_section_nr(pfn_to_section_nr(start_pfn)))
|
||||
hv_bring_pgs_online(has, start_pfn, pgs_ol);
|
||||
|
||||
}
|
||||
|
|
|
@ -164,26 +164,25 @@ hv_get_ringbuffer_availbytes(const struct hv_ring_buffer_info *rbi,
|
|||
}
|
||||
|
||||
/* Get various debug metrics for the specified ring buffer. */
|
||||
void hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info,
|
||||
struct hv_ring_buffer_debug_info *debug_info)
|
||||
int hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info,
|
||||
struct hv_ring_buffer_debug_info *debug_info)
|
||||
{
|
||||
u32 bytes_avail_towrite;
|
||||
u32 bytes_avail_toread;
|
||||
|
||||
if (ring_info->ring_buffer) {
|
||||
hv_get_ringbuffer_availbytes(ring_info,
|
||||
&bytes_avail_toread,
|
||||
&bytes_avail_towrite);
|
||||
if (!ring_info->ring_buffer)
|
||||
return -EINVAL;
|
||||
|
||||
debug_info->bytes_avail_toread = bytes_avail_toread;
|
||||
debug_info->bytes_avail_towrite = bytes_avail_towrite;
|
||||
debug_info->current_read_index =
|
||||
ring_info->ring_buffer->read_index;
|
||||
debug_info->current_write_index =
|
||||
ring_info->ring_buffer->write_index;
|
||||
debug_info->current_interrupt_mask =
|
||||
ring_info->ring_buffer->interrupt_mask;
|
||||
}
|
||||
hv_get_ringbuffer_availbytes(ring_info,
|
||||
&bytes_avail_toread,
|
||||
&bytes_avail_towrite);
|
||||
debug_info->bytes_avail_toread = bytes_avail_toread;
|
||||
debug_info->bytes_avail_towrite = bytes_avail_towrite;
|
||||
debug_info->current_read_index = ring_info->ring_buffer->read_index;
|
||||
debug_info->current_write_index = ring_info->ring_buffer->write_index;
|
||||
debug_info->current_interrupt_mask
|
||||
= ring_info->ring_buffer->interrupt_mask;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(hv_ringbuffer_get_debuginfo);
|
||||
|
||||
|
|
|
@ -313,12 +313,16 @@ static ssize_t out_intr_mask_show(struct device *dev,
|
|||
{
|
||||
struct hv_device *hv_dev = device_to_hv_device(dev);
|
||||
struct hv_ring_buffer_debug_info outbound;
|
||||
int ret;
|
||||
|
||||
if (!hv_dev->channel)
|
||||
return -ENODEV;
|
||||
if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
|
||||
return -EINVAL;
|
||||
hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
|
||||
|
||||
ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
|
||||
&outbound);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
return sprintf(buf, "%d\n", outbound.current_interrupt_mask);
|
||||
}
|
||||
static DEVICE_ATTR_RO(out_intr_mask);
|
||||
|
@ -328,12 +332,15 @@ static ssize_t out_read_index_show(struct device *dev,
|
|||
{
|
||||
struct hv_device *hv_dev = device_to_hv_device(dev);
|
||||
struct hv_ring_buffer_debug_info outbound;
|
||||
int ret;
|
||||
|
||||
if (!hv_dev->channel)
|
||||
return -ENODEV;
|
||||
if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
|
||||
return -EINVAL;
|
||||
hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
|
||||
|
||||
ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
|
||||
&outbound);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
return sprintf(buf, "%d\n", outbound.current_read_index);
|
||||
}
|
||||
static DEVICE_ATTR_RO(out_read_index);
|
||||
|
@ -344,12 +351,15 @@ static ssize_t out_write_index_show(struct device *dev,
|
|||
{
|
||||
struct hv_device *hv_dev = device_to_hv_device(dev);
|
||||
struct hv_ring_buffer_debug_info outbound;
|
||||
int ret;
|
||||
|
||||
if (!hv_dev->channel)
|
||||
return -ENODEV;
|
||||
if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
|
||||
return -EINVAL;
|
||||
hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
|
||||
|
||||
ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
|
||||
&outbound);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
return sprintf(buf, "%d\n", outbound.current_write_index);
|
||||
}
|
||||
static DEVICE_ATTR_RO(out_write_index);
|
||||
|
@ -360,12 +370,15 @@ static ssize_t out_read_bytes_avail_show(struct device *dev,
|
|||
{
|
||||
struct hv_device *hv_dev = device_to_hv_device(dev);
|
||||
struct hv_ring_buffer_debug_info outbound;
|
||||
int ret;
|
||||
|
||||
if (!hv_dev->channel)
|
||||
return -ENODEV;
|
||||
if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
|
||||
return -EINVAL;
|
||||
hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
|
||||
|
||||
ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
|
||||
&outbound);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
return sprintf(buf, "%d\n", outbound.bytes_avail_toread);
|
||||
}
|
||||
static DEVICE_ATTR_RO(out_read_bytes_avail);
|
||||
|
@ -376,12 +389,15 @@ static ssize_t out_write_bytes_avail_show(struct device *dev,
|
|||
{
|
||||
struct hv_device *hv_dev = device_to_hv_device(dev);
|
||||
struct hv_ring_buffer_debug_info outbound;
|
||||
int ret;
|
||||
|
||||
if (!hv_dev->channel)
|
||||
return -ENODEV;
|
||||
if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
|
||||
return -EINVAL;
|
||||
hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
|
||||
|
||||
ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
|
||||
&outbound);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
return sprintf(buf, "%d\n", outbound.bytes_avail_towrite);
|
||||
}
|
||||
static DEVICE_ATTR_RO(out_write_bytes_avail);
|
||||
|
@ -391,12 +407,15 @@ static ssize_t in_intr_mask_show(struct device *dev,
|
|||
{
|
||||
struct hv_device *hv_dev = device_to_hv_device(dev);
|
||||
struct hv_ring_buffer_debug_info inbound;
|
||||
int ret;
|
||||
|
||||
if (!hv_dev->channel)
|
||||
return -ENODEV;
|
||||
if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
|
||||
return -EINVAL;
|
||||
hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
|
||||
|
||||
ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
return sprintf(buf, "%d\n", inbound.current_interrupt_mask);
|
||||
}
|
||||
static DEVICE_ATTR_RO(in_intr_mask);
|
||||
|
@ -406,12 +425,15 @@ static ssize_t in_read_index_show(struct device *dev,
|
|||
{
|
||||
struct hv_device *hv_dev = device_to_hv_device(dev);
|
||||
struct hv_ring_buffer_debug_info inbound;
|
||||
int ret;
|
||||
|
||||
if (!hv_dev->channel)
|
||||
return -ENODEV;
|
||||
if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
|
||||
return -EINVAL;
|
||||
hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
|
||||
|
||||
ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
return sprintf(buf, "%d\n", inbound.current_read_index);
|
||||
}
|
||||
static DEVICE_ATTR_RO(in_read_index);
|
||||
|
@ -421,12 +443,15 @@ static ssize_t in_write_index_show(struct device *dev,
|
|||
{
|
||||
struct hv_device *hv_dev = device_to_hv_device(dev);
|
||||
struct hv_ring_buffer_debug_info inbound;
|
||||
int ret;
|
||||
|
||||
if (!hv_dev->channel)
|
||||
return -ENODEV;
|
||||
if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
|
||||
return -EINVAL;
|
||||
hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
|
||||
|
||||
ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
return sprintf(buf, "%d\n", inbound.current_write_index);
|
||||
}
|
||||
static DEVICE_ATTR_RO(in_write_index);
|
||||
|
@ -437,12 +462,15 @@ static ssize_t in_read_bytes_avail_show(struct device *dev,
|
|||
{
|
||||
struct hv_device *hv_dev = device_to_hv_device(dev);
|
||||
struct hv_ring_buffer_debug_info inbound;
|
||||
int ret;
|
||||
|
||||
if (!hv_dev->channel)
|
||||
return -ENODEV;
|
||||
if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
|
||||
return -EINVAL;
|
||||
hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
|
||||
|
||||
ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
return sprintf(buf, "%d\n", inbound.bytes_avail_toread);
|
||||
}
|
||||
static DEVICE_ATTR_RO(in_read_bytes_avail);
|
||||
|
@ -453,12 +481,15 @@ static ssize_t in_write_bytes_avail_show(struct device *dev,
|
|||
{
|
||||
struct hv_device *hv_dev = device_to_hv_device(dev);
|
||||
struct hv_ring_buffer_debug_info inbound;
|
||||
int ret;
|
||||
|
||||
if (!hv_dev->channel)
|
||||
return -ENODEV;
|
||||
if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
|
||||
return -EINVAL;
|
||||
hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
|
||||
|
||||
ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
return sprintf(buf, "%d\n", inbound.bytes_avail_towrite);
|
||||
}
|
||||
static DEVICE_ATTR_RO(in_write_bytes_avail);
|
||||
|
|
|
@ -820,21 +820,24 @@ static int ibmvmc_send_msg(struct crq_server_adapter *adapter,
|
|||
*
|
||||
* Return:
|
||||
* 0 - Success
|
||||
* Non-zero - Failure
|
||||
*/
|
||||
static int ibmvmc_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
struct ibmvmc_file_session *session;
|
||||
int rc = 0;
|
||||
|
||||
pr_debug("%s: inode = 0x%lx, file = 0x%lx, state = 0x%x\n", __func__,
|
||||
(unsigned long)inode, (unsigned long)file,
|
||||
ibmvmc.state);
|
||||
|
||||
session = kzalloc(sizeof(*session), GFP_KERNEL);
|
||||
if (!session)
|
||||
return -ENOMEM;
|
||||
|
||||
session->file = file;
|
||||
file->private_data = session;
|
||||
|
||||
return rc;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -1187,9 +1187,15 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
|
|||
dma_setup_res = (struct hbm_dma_setup_response *)mei_msg;
|
||||
|
||||
if (dma_setup_res->status) {
|
||||
dev_info(dev->dev, "hbm: dma setup response: failure = %d %s\n",
|
||||
dma_setup_res->status,
|
||||
mei_hbm_status_str(dma_setup_res->status));
|
||||
u8 status = dma_setup_res->status;
|
||||
|
||||
if (status == MEI_HBMS_NOT_ALLOWED) {
|
||||
dev_dbg(dev->dev, "hbm: dma setup not allowed\n");
|
||||
} else {
|
||||
dev_info(dev->dev, "hbm: dma setup response: failure = %d %s\n",
|
||||
status,
|
||||
mei_hbm_status_str(status));
|
||||
}
|
||||
dev->hbm_f_dr_supported = 0;
|
||||
mei_dmam_ring_free(dev);
|
||||
}
|
||||
|
|
|
@ -127,6 +127,8 @@
|
|||
#define MEI_DEV_ID_BXT_M 0x1A9A /* Broxton M */
|
||||
#define MEI_DEV_ID_APL_I 0x5A9A /* Apollo Lake I */
|
||||
|
||||
#define MEI_DEV_ID_DNV_IE 0x19E5 /* Denverton IE */
|
||||
|
||||
#define MEI_DEV_ID_GLK 0x319A /* Gemini Lake */
|
||||
|
||||
#define MEI_DEV_ID_KBP 0xA2BA /* Kaby Point */
|
||||
|
|
|
@ -88,11 +88,13 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
|
|||
{MEI_PCI_DEVICE(MEI_DEV_ID_SPT_2, MEI_ME_PCH8_CFG)},
|
||||
{MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H, MEI_ME_PCH8_SPS_CFG)},
|
||||
{MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H_2, MEI_ME_PCH8_SPS_CFG)},
|
||||
{MEI_PCI_DEVICE(MEI_DEV_ID_LBG, MEI_ME_PCH8_CFG)},
|
||||
{MEI_PCI_DEVICE(MEI_DEV_ID_LBG, MEI_ME_PCH12_CFG)},
|
||||
|
||||
{MEI_PCI_DEVICE(MEI_DEV_ID_BXT_M, MEI_ME_PCH8_CFG)},
|
||||
{MEI_PCI_DEVICE(MEI_DEV_ID_APL_I, MEI_ME_PCH8_CFG)},
|
||||
|
||||
{MEI_PCI_DEVICE(MEI_DEV_ID_DNV_IE, MEI_ME_PCH8_CFG)},
|
||||
|
||||
{MEI_PCI_DEVICE(MEI_DEV_ID_GLK, MEI_ME_PCH8_CFG)},
|
||||
|
||||
{MEI_PCI_DEVICE(MEI_DEV_ID_KBP, MEI_ME_PCH8_CFG)},
|
||||
|
|
|
@ -70,8 +70,12 @@ pvpanic_walk_resources(struct acpi_resource *res, void *context)
|
|||
struct resource r;
|
||||
|
||||
if (acpi_dev_resource_io(res, &r)) {
|
||||
#ifdef CONFIG_HAS_IOPORT_MAP
|
||||
base = ioport_map(r.start, resource_size(&r));
|
||||
return AE_OK;
|
||||
#else
|
||||
return AE_ERROR;
|
||||
#endif
|
||||
} else if (acpi_dev_resource_memory(res, &r)) {
|
||||
base = ioremap(r.start, resource_size(&r));
|
||||
return AE_OK;
|
||||
|
|
|
@ -1159,8 +1159,9 @@ struct hv_ring_buffer_debug_info {
|
|||
u32 bytes_avail_towrite;
|
||||
};
|
||||
|
||||
void hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info,
|
||||
struct hv_ring_buffer_debug_info *debug_info);
|
||||
|
||||
int hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info,
|
||||
struct hv_ring_buffer_debug_info *debug_info);
|
||||
|
||||
/* Vmbus interface */
|
||||
#define vmbus_driver_register(driver) \
|
||||
|
|
|
@ -4,8 +4,8 @@
|
|||
*
|
||||
*/
|
||||
|
||||
#ifndef _UAPI_LINUX_BINDER_CTL_H
|
||||
#define _UAPI_LINUX_BINDER_CTL_H
|
||||
#ifndef _UAPI_LINUX_BINDERFS_H
|
||||
#define _UAPI_LINUX_BINDERFS_H
|
||||
|
||||
#include <linux/android/binder.h>
|
||||
#include <linux/types.h>
|
||||
|
@ -22,8 +22,8 @@
|
|||
*/
|
||||
struct binderfs_device {
|
||||
char name[BINDERFS_MAX_NAME + 1];
|
||||
__u8 major;
|
||||
__u8 minor;
|
||||
__u32 major;
|
||||
__u32 minor;
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -31,5 +31,5 @@ struct binderfs_device {
|
|||
*/
|
||||
#define BINDER_CTL_ADD _IOWR('b', 1, struct binderfs_device)
|
||||
|
||||
#endif /* _UAPI_LINUX_BINDER_CTL_H */
|
||||
#endif /* _UAPI_LINUX_BINDERFS_H */
|
||||
|
Loading…
Reference in New Issue