2005-07-08 08:57:13 +08:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2005 Topspin Communications. All rights reserved.
|
2006-01-31 06:29:21 +08:00
|
|
|
* Copyright (c) 2005, 2006 Cisco Systems. All rights reserved.
|
2005-08-11 14:03:10 +08:00
|
|
|
* Copyright (c) 2005 Mellanox Technologies. All rights reserved.
|
|
|
|
* Copyright (c) 2005 Voltaire, Inc. All rights reserved.
|
2005-10-15 06:26:04 +08:00
|
|
|
* Copyright (c) 2005 PathScale, Inc. All rights reserved.
|
2005-07-08 08:57:13 +08:00
|
|
|
*
|
|
|
|
* This software is available to you under a choice of one of two
|
|
|
|
* licenses. You may choose to be licensed under the terms of the GNU
|
|
|
|
* General Public License (GPL) Version 2, available from the file
|
|
|
|
* COPYING in the main directory of this source tree, or the
|
|
|
|
* OpenIB.org BSD license below:
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or
|
|
|
|
* without modification, are permitted provided that the following
|
|
|
|
* conditions are met:
|
|
|
|
*
|
|
|
|
* - Redistributions of source code must retain the above
|
|
|
|
* copyright notice, this list of conditions and the following
|
|
|
|
* disclaimer.
|
|
|
|
*
|
|
|
|
* - Redistributions in binary form must reproduce the above
|
|
|
|
* copyright notice, this list of conditions and the following
|
|
|
|
* disclaimer in the documentation and/or other materials
|
|
|
|
* provided with the distribution.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
|
|
|
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
|
|
|
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
|
|
|
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
|
|
|
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
|
|
|
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
|
|
|
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
|
|
* SOFTWARE.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/device.h>
|
|
|
|
#include <linux/err.h>
|
|
|
|
#include <linux/fs.h>
|
|
|
|
#include <linux/poll.h>
|
2009-10-04 20:11:37 +08:00
|
|
|
#include <linux/sched.h>
|
2005-07-08 08:57:13 +08:00
|
|
|
#include <linux/file.h>
|
2005-10-29 06:38:26 +08:00
|
|
|
#include <linux/cdev.h>
|
2010-02-25 08:51:20 +08:00
|
|
|
#include <linux/anon_inodes.h>
|
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit slab.h inclusion from percpu.h
percpu.h is included by sched.h and module.h and thus ends up being
included when building most .c files. percpu.h includes slab.h which
in turn includes gfp.h making everything defined by the two files
universally available and complicating inclusion dependencies.
percpu.h -> slab.h dependency is about to be removed. Prepare for
this change by updating users of gfp and slab facilities include those
headers directly instead of assuming availability. As this conversion
needs to touch large number of source files, the following script is
used as the basis of conversion.
http://userweb.kernel.org/~tj/misc/slabh-sweep.py
The script does the followings.
* Scan files for gfp and slab usages and update includes such that
only the necessary includes are there. ie. if only gfp is used,
gfp.h, if slab is used, slab.h.
* When the script inserts a new include, it looks at the include
blocks and try to put the new include such that its order conforms
to its surrounding. It's put in the include block which contains
core kernel includes, in the same order that the rest are ordered -
alphabetical, Christmas tree, rev-Xmas-tree or at the end if there
doesn't seem to be any matching order.
* If the script can't find a place to put a new include (mostly
because the file doesn't have fitting include block), it prints out
an error message indicating which .h file needs to be added to the
file.
The conversion was done in the following steps.
1. The initial automatic conversion of all .c files updated slightly
over 4000 files, deleting around 700 includes and adding ~480 gfp.h
and ~3000 slab.h inclusions. The script emitted errors for ~400
files.
2. Each error was manually checked. Some didn't need the inclusion,
some needed manual addition while adding it to implementation .h or
embedding .c file was more appropriate for others. This step added
inclusions to around 150 files.
3. The script was run again and the output was compared to the edits
from #2 to make sure no file was left behind.
4. Several build tests were done and a couple of problems were fixed.
e.g. lib/decompress_*.c used malloc/free() wrappers around slab
APIs requiring slab.h to be added manually.
5. The script was run on all .h files but without automatically
editing them as sprinkling gfp.h and slab.h inclusions around .h
files could easily lead to inclusion dependency hell. Most gfp.h
inclusion directives were ignored as stuff from gfp.h was usually
wildly available and often used in preprocessor macros. Each
slab.h inclusion directive was examined and added manually as
necessary.
6. percpu.h was updated not to include slab.h.
7. Build test were done on the following configurations and failures
were fixed. CONFIG_GCOV_KERNEL was turned off for all tests (as my
distributed build env didn't work with gcov compiles) and a few
more options had to be turned off depending on archs to make things
build (like ipr on powerpc/64 which failed due to missing writeq).
* x86 and x86_64 UP and SMP allmodconfig and a custom test config.
* powerpc and powerpc64 SMP allmodconfig
* sparc and sparc64 SMP allmodconfig
* ia64 SMP allmodconfig
* s390 SMP allmodconfig
* alpha SMP allmodconfig
* um on x86_64 SMP allmodconfig
8. percpu.h modifications were reverted so that it could be applied as
a separate patch and serve as bisection point.
Given the fact that I had only a couple of failures from tests on step
6, I'm fairly confident about the coverage of this conversion patch.
If there is a breakage, it's likely to be something in one of the arch
headers which should be easily discoverable easily on most builds of
the specific arch.
Signed-off-by: Tejun Heo <tj@kernel.org>
Guess-its-ok-by: Christoph Lameter <cl@linux-foundation.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
2010-03-24 16:04:11 +08:00
|
|
|
#include <linux/slab.h>
|
2018-09-17 01:43:08 +08:00
|
|
|
#include <linux/sched/mm.h>
|
2005-07-08 08:57:13 +08:00
|
|
|
|
2016-12-25 03:46:01 +08:00
|
|
|
#include <linux/uaccess.h>
|
2005-07-08 08:57:13 +08:00
|
|
|
|
2016-04-11 09:13:13 +08:00
|
|
|
#include <rdma/ib.h>
|
2017-08-03 21:07:06 +08:00
|
|
|
#include <rdma/uverbs_std_types.h>
|
2016-04-11 09:13:13 +08:00
|
|
|
|
2005-07-08 08:57:13 +08:00
|
|
|
#include "uverbs.h"
|
2017-01-10 08:02:14 +08:00
|
|
|
#include "core_priv.h"
|
2017-04-04 18:31:44 +08:00
|
|
|
#include "rdma_core.h"
|
2005-07-08 08:57:13 +08:00
|
|
|
|
|
|
|
MODULE_AUTHOR("Roland Dreier");
|
|
|
|
MODULE_DESCRIPTION("InfiniBand userspace verbs access");
|
|
|
|
MODULE_LICENSE("Dual BSD/GPL");
|
|
|
|
|
|
|
|
enum {
|
|
|
|
IB_UVERBS_MAJOR = 231,
|
|
|
|
IB_UVERBS_BASE_MINOR = 192,
|
2018-01-08 18:15:38 +08:00
|
|
|
IB_UVERBS_MAX_DEVICES = RDMA_MAX_PORTS,
|
|
|
|
IB_UVERBS_NUM_FIXED_MINOR = 32,
|
|
|
|
IB_UVERBS_NUM_DYNAMIC_MINOR = IB_UVERBS_MAX_DEVICES - IB_UVERBS_NUM_FIXED_MINOR,
|
2005-07-08 08:57:13 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
#define IB_UVERBS_BASE_DEV MKDEV(IB_UVERBS_MAJOR, IB_UVERBS_BASE_MINOR)
|
|
|
|
|
2018-01-08 18:15:38 +08:00
|
|
|
static dev_t dynamic_uverbs_dev;
|
2005-10-29 06:38:26 +08:00
|
|
|
static struct class *uverbs_class;
|
|
|
|
|
2018-10-02 16:13:29 +08:00
|
|
|
static DEFINE_IDA(uverbs_ida);
|
2005-07-08 08:57:13 +08:00
|
|
|
static void ib_uverbs_add_one(struct ib_device *device);
|
2015-07-30 22:50:14 +08:00
|
|
|
static void ib_uverbs_remove_one(struct ib_device *device, void *client_data);
|
2005-07-08 08:57:13 +08:00
|
|
|
|
2018-07-11 03:43:06 +08:00
|
|
|
/*
|
|
|
|
* Must be called with the ufile->device->disassociate_srcu held, and the lock
|
|
|
|
* must be held until use of the ucontext is finished.
|
|
|
|
*/
|
2018-11-26 02:51:13 +08:00
|
|
|
struct ib_ucontext *ib_uverbs_get_ucontext_file(struct ib_uverbs_file *ufile)
|
2018-06-17 17:59:59 +08:00
|
|
|
{
|
2018-07-11 03:43:06 +08:00
|
|
|
/*
|
|
|
|
* We do not hold the hw_destroy_rwsem lock for this flow, instead
|
|
|
|
* srcu is used. It does not matter if someone races this with
|
|
|
|
* get_context, we get NULL or valid ucontext.
|
|
|
|
*/
|
|
|
|
struct ib_ucontext *ucontext = smp_load_acquire(&ufile->ucontext);
|
|
|
|
|
|
|
|
if (!srcu_dereference(ufile->device->ib_dev,
|
|
|
|
&ufile->device->disassociate_srcu))
|
|
|
|
return ERR_PTR(-EIO);
|
|
|
|
|
|
|
|
if (!ucontext)
|
|
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
|
|
|
|
return ucontext;
|
2018-06-17 17:59:59 +08:00
|
|
|
}
|
2018-11-26 02:51:13 +08:00
|
|
|
EXPORT_SYMBOL(ib_uverbs_get_ucontext_file);
|
2018-06-17 17:59:59 +08:00
|
|
|
|
2015-12-24 02:12:48 +08:00
|
|
|
int uverbs_dealloc_mw(struct ib_mw *mw)
|
|
|
|
{
|
|
|
|
struct ib_pd *pd = mw->pd;
|
|
|
|
int ret;
|
|
|
|
|
2018-12-11 03:09:48 +08:00
|
|
|
ret = mw->device->ops.dealloc_mw(mw);
|
2015-12-24 02:12:48 +08:00
|
|
|
if (!ret)
|
|
|
|
atomic_dec(&pd->usecnt);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2018-09-05 14:47:59 +08:00
|
|
|
static void ib_uverbs_release_dev(struct device *device)
|
2005-10-29 06:38:26 +08:00
|
|
|
{
|
|
|
|
struct ib_uverbs_device *dev =
|
2018-09-05 14:47:59 +08:00
|
|
|
container_of(device, struct ib_uverbs_device, dev);
|
2005-10-29 06:38:26 +08:00
|
|
|
|
2018-08-10 10:14:36 +08:00
|
|
|
uverbs_destroy_api(dev->uapi);
|
2015-08-13 23:32:05 +08:00
|
|
|
cleanup_srcu_struct(&dev->disassociate_srcu);
|
2015-08-13 23:32:03 +08:00
|
|
|
kfree(dev);
|
2005-10-29 06:38:26 +08:00
|
|
|
}
|
|
|
|
|
2017-04-04 18:31:47 +08:00
|
|
|
static void ib_uverbs_release_async_event_file(struct kref *ref)
|
2007-10-10 10:59:15 +08:00
|
|
|
{
|
2017-04-04 18:31:47 +08:00
|
|
|
struct ib_uverbs_async_event_file *file =
|
|
|
|
container_of(ref, struct ib_uverbs_async_event_file, ref);
|
2007-10-10 10:59:15 +08:00
|
|
|
|
|
|
|
kfree(file);
|
|
|
|
}
|
|
|
|
|
2005-10-29 06:38:26 +08:00
|
|
|
void ib_uverbs_release_ucq(struct ib_uverbs_file *file,
|
2017-04-04 18:31:47 +08:00
|
|
|
struct ib_uverbs_completion_event_file *ev_file,
|
2005-10-29 06:38:26 +08:00
|
|
|
struct ib_ucq_object *uobj)
|
|
|
|
{
|
|
|
|
struct ib_uverbs_event *evt, *tmp;
|
|
|
|
|
|
|
|
if (ev_file) {
|
2017-04-18 17:03:42 +08:00
|
|
|
spin_lock_irq(&ev_file->ev_queue.lock);
|
2005-10-29 06:38:26 +08:00
|
|
|
list_for_each_entry_safe(evt, tmp, &uobj->comp_list, obj_list) {
|
|
|
|
list_del(&evt->list);
|
|
|
|
kfree(evt);
|
|
|
|
}
|
2017-04-18 17:03:42 +08:00
|
|
|
spin_unlock_irq(&ev_file->ev_queue.lock);
|
2005-10-29 06:38:26 +08:00
|
|
|
|
2018-07-04 16:32:11 +08:00
|
|
|
uverbs_uobject_put(&ev_file->uobj);
|
2005-10-29 06:38:26 +08:00
|
|
|
}
|
|
|
|
|
2017-04-18 17:03:42 +08:00
|
|
|
spin_lock_irq(&file->async_file->ev_queue.lock);
|
2005-10-29 06:38:26 +08:00
|
|
|
list_for_each_entry_safe(evt, tmp, &uobj->async_list, obj_list) {
|
|
|
|
list_del(&evt->list);
|
|
|
|
kfree(evt);
|
|
|
|
}
|
2017-04-18 17:03:42 +08:00
|
|
|
spin_unlock_irq(&file->async_file->ev_queue.lock);
|
2005-10-29 06:38:26 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void ib_uverbs_release_uevent(struct ib_uverbs_file *file,
|
|
|
|
struct ib_uevent_object *uobj)
|
|
|
|
{
|
|
|
|
struct ib_uverbs_event *evt, *tmp;
|
|
|
|
|
2017-04-18 17:03:42 +08:00
|
|
|
spin_lock_irq(&file->async_file->ev_queue.lock);
|
2005-10-29 06:38:26 +08:00
|
|
|
list_for_each_entry_safe(evt, tmp, &uobj->event_list, obj_list) {
|
|
|
|
list_del(&evt->list);
|
|
|
|
kfree(evt);
|
|
|
|
}
|
2017-04-18 17:03:42 +08:00
|
|
|
spin_unlock_irq(&file->async_file->ev_queue.lock);
|
2005-10-29 06:38:26 +08:00
|
|
|
}
|
|
|
|
|
2017-04-04 18:31:43 +08:00
|
|
|
void ib_uverbs_detach_umcast(struct ib_qp *qp,
|
|
|
|
struct ib_uqp_object *uobj)
|
2005-11-30 08:57:01 +08:00
|
|
|
{
|
|
|
|
struct ib_uverbs_mcast_entry *mcast, *tmp;
|
|
|
|
|
|
|
|
list_for_each_entry_safe(mcast, tmp, &uobj->mcast_list, list) {
|
|
|
|
ib_detach_mcast(qp, &mcast->gid, mcast->lid);
|
|
|
|
list_del(&mcast->list);
|
|
|
|
kfree(mcast);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-08-13 23:32:03 +08:00
|
|
|
static void ib_uverbs_comp_dev(struct ib_uverbs_device *dev)
|
|
|
|
{
|
|
|
|
complete(&dev->comp);
|
|
|
|
}
|
|
|
|
|
2017-04-04 18:31:46 +08:00
|
|
|
void ib_uverbs_release_file(struct kref *ref)
|
2005-07-08 08:57:13 +08:00
|
|
|
{
|
|
|
|
struct ib_uverbs_file *file =
|
|
|
|
container_of(ref, struct ib_uverbs_file, ref);
|
2015-08-13 23:32:05 +08:00
|
|
|
struct ib_device *ib_dev;
|
|
|
|
int srcu_key;
|
|
|
|
|
2018-07-26 11:40:20 +08:00
|
|
|
release_ufile_idr_uobject(file);
|
|
|
|
|
2015-08-13 23:32:05 +08:00
|
|
|
srcu_key = srcu_read_lock(&file->device->disassociate_srcu);
|
|
|
|
ib_dev = srcu_dereference(file->device->ib_dev,
|
|
|
|
&file->device->disassociate_srcu);
|
2018-12-11 03:09:48 +08:00
|
|
|
if (ib_dev && !ib_dev->ops.disassociate_ucontext)
|
2015-08-13 23:32:05 +08:00
|
|
|
module_put(ib_dev->owner);
|
|
|
|
srcu_read_unlock(&file->device->disassociate_srcu, srcu_key);
|
2005-07-08 08:57:13 +08:00
|
|
|
|
2015-08-13 23:32:03 +08:00
|
|
|
if (atomic_dec_and_test(&file->device->refcount))
|
|
|
|
ib_uverbs_comp_dev(file->device);
|
2005-10-29 06:38:26 +08:00
|
|
|
|
2019-01-24 20:33:12 +08:00
|
|
|
if (file->async_file)
|
|
|
|
kref_put(&file->async_file->ref,
|
|
|
|
ib_uverbs_release_async_event_file);
|
2018-09-05 14:47:59 +08:00
|
|
|
put_device(&file->device->dev);
|
2019-04-16 19:07:28 +08:00
|
|
|
|
|
|
|
if (file->disassociate_page)
|
|
|
|
__free_pages(file->disassociate_page, 0);
|
2005-07-08 08:57:13 +08:00
|
|
|
kfree(file);
|
|
|
|
}
|
|
|
|
|
2017-04-18 17:03:42 +08:00
|
|
|
static ssize_t ib_uverbs_event_read(struct ib_uverbs_event_queue *ev_queue,
|
2017-04-04 18:31:47 +08:00
|
|
|
struct ib_uverbs_file *uverbs_file,
|
|
|
|
struct file *filp, char __user *buf,
|
|
|
|
size_t count, loff_t *pos,
|
2017-04-18 17:03:41 +08:00
|
|
|
size_t eventsz)
|
2005-07-08 08:57:13 +08:00
|
|
|
{
|
2005-09-10 06:55:08 +08:00
|
|
|
struct ib_uverbs_event *event;
|
2005-07-08 08:57:13 +08:00
|
|
|
int ret = 0;
|
|
|
|
|
2017-04-18 17:03:42 +08:00
|
|
|
spin_lock_irq(&ev_queue->lock);
|
2005-07-08 08:57:13 +08:00
|
|
|
|
2017-04-18 17:03:42 +08:00
|
|
|
while (list_empty(&ev_queue->event_list)) {
|
|
|
|
spin_unlock_irq(&ev_queue->lock);
|
2005-07-08 08:57:13 +08:00
|
|
|
|
|
|
|
if (filp->f_flags & O_NONBLOCK)
|
|
|
|
return -EAGAIN;
|
|
|
|
|
2017-04-18 17:03:42 +08:00
|
|
|
if (wait_event_interruptible(ev_queue->poll_wait,
|
|
|
|
(!list_empty(&ev_queue->event_list) ||
|
2015-08-13 23:32:05 +08:00
|
|
|
/* The barriers built into wait_event_interruptible()
|
|
|
|
* and wake_up() guarentee this will see the null set
|
|
|
|
* without using RCU
|
|
|
|
*/
|
2017-04-04 18:31:47 +08:00
|
|
|
!uverbs_file->device->ib_dev)))
|
2005-07-08 08:57:13 +08:00
|
|
|
return -ERESTARTSYS;
|
|
|
|
|
2015-08-13 23:32:05 +08:00
|
|
|
/* If device was disassociated and no event exists set an error */
|
2017-04-18 17:03:42 +08:00
|
|
|
if (list_empty(&ev_queue->event_list) &&
|
2017-04-04 18:31:47 +08:00
|
|
|
!uverbs_file->device->ib_dev)
|
2015-08-13 23:32:05 +08:00
|
|
|
return -EIO;
|
|
|
|
|
2017-04-18 17:03:42 +08:00
|
|
|
spin_lock_irq(&ev_queue->lock);
|
2005-07-08 08:57:13 +08:00
|
|
|
}
|
|
|
|
|
2017-04-18 17:03:42 +08:00
|
|
|
event = list_entry(ev_queue->event_list.next, struct ib_uverbs_event, list);
|
2005-09-10 06:55:08 +08:00
|
|
|
|
2005-07-08 08:57:13 +08:00
|
|
|
if (eventsz > count) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
event = NULL;
|
2005-09-10 06:55:08 +08:00
|
|
|
} else {
|
2017-04-18 17:03:42 +08:00
|
|
|
list_del(ev_queue->event_list.next);
|
2005-09-10 06:55:08 +08:00
|
|
|
if (event->counter) {
|
|
|
|
++(*event->counter);
|
|
|
|
list_del(&event->obj_list);
|
|
|
|
}
|
|
|
|
}
|
2005-07-08 08:57:13 +08:00
|
|
|
|
2017-04-18 17:03:42 +08:00
|
|
|
spin_unlock_irq(&ev_queue->lock);
|
2005-07-08 08:57:13 +08:00
|
|
|
|
|
|
|
if (event) {
|
|
|
|
if (copy_to_user(buf, event, eventsz))
|
|
|
|
ret = -EFAULT;
|
|
|
|
else
|
|
|
|
ret = eventsz;
|
|
|
|
}
|
|
|
|
|
|
|
|
kfree(event);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2017-04-04 18:31:47 +08:00
|
|
|
static ssize_t ib_uverbs_async_event_read(struct file *filp, char __user *buf,
|
|
|
|
size_t count, loff_t *pos)
|
|
|
|
{
|
|
|
|
struct ib_uverbs_async_event_file *file = filp->private_data;
|
|
|
|
|
2017-04-18 17:03:42 +08:00
|
|
|
return ib_uverbs_event_read(&file->ev_queue, file->uverbs_file, filp,
|
2017-04-18 17:03:41 +08:00
|
|
|
buf, count, pos,
|
|
|
|
sizeof(struct ib_uverbs_async_event_desc));
|
2017-04-04 18:31:47 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t ib_uverbs_comp_event_read(struct file *filp, char __user *buf,
|
|
|
|
size_t count, loff_t *pos)
|
|
|
|
{
|
|
|
|
struct ib_uverbs_completion_event_file *comp_ev_file =
|
|
|
|
filp->private_data;
|
|
|
|
|
2017-04-18 17:03:42 +08:00
|
|
|
return ib_uverbs_event_read(&comp_ev_file->ev_queue,
|
2018-07-04 16:32:11 +08:00
|
|
|
comp_ev_file->uobj.ufile, filp,
|
2017-04-18 17:03:41 +08:00
|
|
|
buf, count, pos,
|
|
|
|
sizeof(struct ib_uverbs_comp_event_desc));
|
2017-04-04 18:31:47 +08:00
|
|
|
}
|
|
|
|
|
2017-07-03 18:39:46 +08:00
|
|
|
static __poll_t ib_uverbs_event_poll(struct ib_uverbs_event_queue *ev_queue,
|
2017-04-04 18:31:47 +08:00
|
|
|
struct file *filp,
|
2005-07-08 08:57:13 +08:00
|
|
|
struct poll_table_struct *wait)
|
|
|
|
{
|
2017-07-03 18:39:46 +08:00
|
|
|
__poll_t pollflags = 0;
|
2005-07-08 08:57:13 +08:00
|
|
|
|
2017-04-18 17:03:42 +08:00
|
|
|
poll_wait(filp, &ev_queue->poll_wait, wait);
|
2005-07-08 08:57:13 +08:00
|
|
|
|
2017-04-18 17:03:42 +08:00
|
|
|
spin_lock_irq(&ev_queue->lock);
|
|
|
|
if (!list_empty(&ev_queue->event_list))
|
2018-02-12 06:34:03 +08:00
|
|
|
pollflags = EPOLLIN | EPOLLRDNORM;
|
2017-04-18 17:03:42 +08:00
|
|
|
spin_unlock_irq(&ev_queue->lock);
|
2005-07-08 08:57:13 +08:00
|
|
|
|
|
|
|
return pollflags;
|
|
|
|
}
|
|
|
|
|
2017-07-03 18:39:46 +08:00
|
|
|
static __poll_t ib_uverbs_async_event_poll(struct file *filp,
|
2017-04-04 18:31:47 +08:00
|
|
|
struct poll_table_struct *wait)
|
|
|
|
{
|
|
|
|
return ib_uverbs_event_poll(filp->private_data, filp, wait);
|
|
|
|
}
|
|
|
|
|
2017-07-03 18:39:46 +08:00
|
|
|
static __poll_t ib_uverbs_comp_event_poll(struct file *filp,
|
2017-04-04 18:31:47 +08:00
|
|
|
struct poll_table_struct *wait)
|
|
|
|
{
|
|
|
|
struct ib_uverbs_completion_event_file *comp_ev_file =
|
|
|
|
filp->private_data;
|
|
|
|
|
2017-04-18 17:03:42 +08:00
|
|
|
return ib_uverbs_event_poll(&comp_ev_file->ev_queue, filp, wait);
|
2017-04-04 18:31:47 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int ib_uverbs_async_event_fasync(int fd, struct file *filp, int on)
|
2005-07-28 05:40:00 +08:00
|
|
|
{
|
2017-04-18 17:03:42 +08:00
|
|
|
struct ib_uverbs_event_queue *ev_queue = filp->private_data;
|
2005-07-28 05:40:00 +08:00
|
|
|
|
2017-04-18 17:03:42 +08:00
|
|
|
return fasync_helper(fd, filp, on, &ev_queue->async_queue);
|
2005-07-28 05:40:00 +08:00
|
|
|
}
|
|
|
|
|
2017-04-04 18:31:47 +08:00
|
|
|
static int ib_uverbs_comp_event_fasync(int fd, struct file *filp, int on)
|
2005-07-08 08:57:13 +08:00
|
|
|
{
|
2017-04-04 18:31:47 +08:00
|
|
|
struct ib_uverbs_completion_event_file *comp_ev_file =
|
|
|
|
filp->private_data;
|
|
|
|
|
2017-04-18 17:03:42 +08:00
|
|
|
return fasync_helper(fd, filp, on, &comp_ev_file->ev_queue.async_queue);
|
2017-04-04 18:31:47 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int ib_uverbs_async_event_close(struct inode *inode, struct file *filp)
|
|
|
|
{
|
|
|
|
struct ib_uverbs_async_event_file *file = filp->private_data;
|
|
|
|
struct ib_uverbs_file *uverbs_file = file->uverbs_file;
|
2005-09-27 04:53:25 +08:00
|
|
|
struct ib_uverbs_event *entry, *tmp;
|
2015-08-13 23:32:05 +08:00
|
|
|
int closed_already = 0;
|
2005-09-27 04:53:25 +08:00
|
|
|
|
2017-04-04 18:31:47 +08:00
|
|
|
mutex_lock(&uverbs_file->device->lists_mutex);
|
2017-04-18 17:03:42 +08:00
|
|
|
spin_lock_irq(&file->ev_queue.lock);
|
|
|
|
closed_already = file->ev_queue.is_closed;
|
|
|
|
file->ev_queue.is_closed = 1;
|
|
|
|
list_for_each_entry_safe(entry, tmp, &file->ev_queue.event_list, list) {
|
2005-09-27 04:53:25 +08:00
|
|
|
if (entry->counter)
|
|
|
|
list_del(&entry->obj_list);
|
|
|
|
kfree(entry);
|
|
|
|
}
|
2017-04-18 17:03:42 +08:00
|
|
|
spin_unlock_irq(&file->ev_queue.lock);
|
2015-08-13 23:32:05 +08:00
|
|
|
if (!closed_already) {
|
|
|
|
list_del(&file->list);
|
2017-04-04 18:31:47 +08:00
|
|
|
ib_unregister_event_handler(&uverbs_file->event_handler);
|
|
|
|
}
|
|
|
|
mutex_unlock(&uverbs_file->device->lists_mutex);
|
|
|
|
|
|
|
|
kref_put(&uverbs_file->ref, ib_uverbs_release_file);
|
|
|
|
kref_put(&file->ref, ib_uverbs_release_async_event_file);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ib_uverbs_comp_event_close(struct inode *inode, struct file *filp)
|
|
|
|
{
|
2018-07-04 16:32:11 +08:00
|
|
|
struct ib_uobject *uobj = filp->private_data;
|
|
|
|
struct ib_uverbs_completion_event_file *file = container_of(
|
|
|
|
uobj, struct ib_uverbs_completion_event_file, uobj);
|
2017-04-04 18:31:47 +08:00
|
|
|
struct ib_uverbs_event *entry, *tmp;
|
|
|
|
|
2017-04-18 17:03:42 +08:00
|
|
|
spin_lock_irq(&file->ev_queue.lock);
|
|
|
|
list_for_each_entry_safe(entry, tmp, &file->ev_queue.event_list, list) {
|
2017-04-04 18:31:47 +08:00
|
|
|
if (entry->counter)
|
|
|
|
list_del(&entry->obj_list);
|
|
|
|
kfree(entry);
|
2015-08-13 23:32:05 +08:00
|
|
|
}
|
2018-08-31 22:16:03 +08:00
|
|
|
file->ev_queue.is_closed = 1;
|
2017-04-18 17:03:42 +08:00
|
|
|
spin_unlock_irq(&file->ev_queue.lock);
|
2005-07-08 08:57:13 +08:00
|
|
|
|
2017-04-04 18:31:47 +08:00
|
|
|
uverbs_close_fd(filp);
|
2005-07-08 08:57:13 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-04-04 18:31:47 +08:00
|
|
|
const struct file_operations uverbs_event_fops = {
|
2005-09-27 04:53:25 +08:00
|
|
|
.owner = THIS_MODULE,
|
2017-04-04 18:31:47 +08:00
|
|
|
.read = ib_uverbs_comp_event_read,
|
|
|
|
.poll = ib_uverbs_comp_event_poll,
|
|
|
|
.release = ib_uverbs_comp_event_close,
|
|
|
|
.fasync = ib_uverbs_comp_event_fasync,
|
|
|
|
.llseek = no_llseek,
|
|
|
|
};
|
|
|
|
|
|
|
|
static const struct file_operations uverbs_async_event_fops = {
|
|
|
|
.owner = THIS_MODULE,
|
|
|
|
.read = ib_uverbs_async_event_read,
|
|
|
|
.poll = ib_uverbs_async_event_poll,
|
|
|
|
.release = ib_uverbs_async_event_close,
|
|
|
|
.fasync = ib_uverbs_async_event_fasync,
|
2010-04-10 08:13:50 +08:00
|
|
|
.llseek = no_llseek,
|
2005-07-08 08:57:13 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
void ib_uverbs_comp_handler(struct ib_cq *cq, void *cq_context)
|
|
|
|
{
|
2017-04-18 17:03:42 +08:00
|
|
|
struct ib_uverbs_event_queue *ev_queue = cq_context;
|
2005-09-27 04:53:25 +08:00
|
|
|
struct ib_ucq_object *uobj;
|
|
|
|
struct ib_uverbs_event *entry;
|
|
|
|
unsigned long flags;
|
|
|
|
|
2017-04-18 17:03:42 +08:00
|
|
|
if (!ev_queue)
|
2005-09-27 04:53:25 +08:00
|
|
|
return;
|
|
|
|
|
2017-04-18 17:03:42 +08:00
|
|
|
spin_lock_irqsave(&ev_queue->lock, flags);
|
|
|
|
if (ev_queue->is_closed) {
|
|
|
|
spin_unlock_irqrestore(&ev_queue->lock, flags);
|
2005-09-27 04:53:25 +08:00
|
|
|
return;
|
|
|
|
}
|
2005-07-08 08:57:13 +08:00
|
|
|
|
2018-02-22 00:12:32 +08:00
|
|
|
entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
|
2005-10-12 06:39:38 +08:00
|
|
|
if (!entry) {
|
2017-04-18 17:03:42 +08:00
|
|
|
spin_unlock_irqrestore(&ev_queue->lock, flags);
|
2005-07-08 08:57:13 +08:00
|
|
|
return;
|
2005-10-12 06:39:38 +08:00
|
|
|
}
|
2005-07-08 08:57:13 +08:00
|
|
|
|
2005-09-10 06:55:08 +08:00
|
|
|
uobj = container_of(cq->uobject, struct ib_ucq_object, uobject);
|
|
|
|
|
|
|
|
entry->desc.comp.cq_handle = cq->uobject->user_handle;
|
|
|
|
entry->counter = &uobj->comp_events_reported;
|
2005-07-08 08:57:13 +08:00
|
|
|
|
2017-04-18 17:03:42 +08:00
|
|
|
list_add_tail(&entry->list, &ev_queue->event_list);
|
2005-09-10 06:55:08 +08:00
|
|
|
list_add_tail(&entry->obj_list, &uobj->comp_list);
|
2017-04-18 17:03:42 +08:00
|
|
|
spin_unlock_irqrestore(&ev_queue->lock, flags);
|
2005-07-08 08:57:13 +08:00
|
|
|
|
2017-04-18 17:03:42 +08:00
|
|
|
wake_up_interruptible(&ev_queue->poll_wait);
|
|
|
|
kill_fasync(&ev_queue->async_queue, SIGIO, POLL_IN);
|
2005-07-08 08:57:13 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void ib_uverbs_async_handler(struct ib_uverbs_file *file,
|
2005-09-10 06:55:08 +08:00
|
|
|
__u64 element, __u64 event,
|
|
|
|
struct list_head *obj_list,
|
|
|
|
u32 *counter)
|
2005-07-08 08:57:13 +08:00
|
|
|
{
|
2005-09-10 06:55:08 +08:00
|
|
|
struct ib_uverbs_event *entry;
|
2005-07-08 08:57:13 +08:00
|
|
|
unsigned long flags;
|
|
|
|
|
2017-04-18 17:03:42 +08:00
|
|
|
spin_lock_irqsave(&file->async_file->ev_queue.lock, flags);
|
|
|
|
if (file->async_file->ev_queue.is_closed) {
|
|
|
|
spin_unlock_irqrestore(&file->async_file->ev_queue.lock, flags);
|
2005-09-27 04:53:25 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2018-02-22 00:12:32 +08:00
|
|
|
entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
|
2005-10-12 06:39:38 +08:00
|
|
|
if (!entry) {
|
2017-04-18 17:03:42 +08:00
|
|
|
spin_unlock_irqrestore(&file->async_file->ev_queue.lock, flags);
|
2005-07-08 08:57:13 +08:00
|
|
|
return;
|
2005-10-12 06:39:38 +08:00
|
|
|
}
|
2005-07-08 08:57:13 +08:00
|
|
|
|
2005-09-10 06:55:08 +08:00
|
|
|
entry->desc.async.element = element;
|
|
|
|
entry->desc.async.event_type = event;
|
2014-09-14 21:47:52 +08:00
|
|
|
entry->desc.async.reserved = 0;
|
2005-09-10 06:55:08 +08:00
|
|
|
entry->counter = counter;
|
2005-07-08 08:57:13 +08:00
|
|
|
|
2017-04-18 17:03:42 +08:00
|
|
|
list_add_tail(&entry->list, &file->async_file->ev_queue.event_list);
|
2005-09-10 06:55:08 +08:00
|
|
|
if (obj_list)
|
|
|
|
list_add_tail(&entry->obj_list, obj_list);
|
2017-04-18 17:03:42 +08:00
|
|
|
spin_unlock_irqrestore(&file->async_file->ev_queue.lock, flags);
|
2005-07-08 08:57:13 +08:00
|
|
|
|
2017-04-18 17:03:42 +08:00
|
|
|
wake_up_interruptible(&file->async_file->ev_queue.poll_wait);
|
|
|
|
kill_fasync(&file->async_file->ev_queue.async_queue, SIGIO, POLL_IN);
|
2005-07-08 08:57:13 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void ib_uverbs_cq_event_handler(struct ib_event *event, void *context_ptr)
|
|
|
|
{
|
2005-10-31 01:50:04 +08:00
|
|
|
struct ib_ucq_object *uobj = container_of(event->element.cq->uobject,
|
|
|
|
struct ib_ucq_object, uobject);
|
2005-09-10 06:55:08 +08:00
|
|
|
|
2018-07-07 01:40:39 +08:00
|
|
|
ib_uverbs_async_handler(uobj->uobject.ufile, uobj->uobject.user_handle,
|
2005-09-10 06:55:08 +08:00
|
|
|
event->event, &uobj->async_list,
|
|
|
|
&uobj->async_events_reported);
|
2005-07-08 08:57:13 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void ib_uverbs_qp_event_handler(struct ib_event *event, void *context_ptr)
|
|
|
|
{
|
2005-09-10 06:55:08 +08:00
|
|
|
struct ib_uevent_object *uobj;
|
|
|
|
|
IB/core: Fix XRC race condition in ib_uverbs_open_qp
In ib_uverbs_open_qp, the sharable xrc target qp is created as a
"pseudo" qp and added to a list of qp's sharing the same physical
QP. This is done before the "pseudo" qp is assigned a uobject.
There is a race condition here if an async event arrives at the
physical qp. If the event is handled after the pseudo qp is added to
the list, but before it is assigned a uobject, the kernel crashes in
ib_uverbs_qp_event_handler, due to trying to dereference a NULL
uobject pointer.
Note that simply checking for non-NULL is not enough, due to error
flows in ib_uverbs_open_qp. If the failure is after assigning the
uobject, but before the qp has fully been created, we still have a
problem.
Thus, in ib_uverbs_qp_event_handler, we test that the uobject is
present, and also that it is live.
Reported-by: Matthew Finlay <matt@mellanox.com>
Signed-off-by: Jack Morgenstein <jackm@dev.mellanox.co.il>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: Roland Dreier <roland@purestorage.com>
2014-09-23 17:38:26 +08:00
|
|
|
/* for XRC target qp's, check that qp is live */
|
2017-04-04 18:31:44 +08:00
|
|
|
if (!event->element.qp->uobject)
|
IB/core: Fix XRC race condition in ib_uverbs_open_qp
In ib_uverbs_open_qp, the sharable xrc target qp is created as a
"pseudo" qp and added to a list of qp's sharing the same physical
QP. This is done before the "pseudo" qp is assigned a uobject.
There is a race condition here if an async event arrives at the
physical qp. If the event is handled after the pseudo qp is added to
the list, but before it is assigned a uobject, the kernel crashes in
ib_uverbs_qp_event_handler, due to trying to dereference a NULL
uobject pointer.
Note that simply checking for non-NULL is not enough, due to error
flows in ib_uverbs_open_qp. If the failure is after assigning the
uobject, but before the qp has fully been created, we still have a
problem.
Thus, in ib_uverbs_qp_event_handler, we test that the uobject is
present, and also that it is live.
Reported-by: Matthew Finlay <matt@mellanox.com>
Signed-off-by: Jack Morgenstein <jackm@dev.mellanox.co.il>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: Roland Dreier <roland@purestorage.com>
2014-09-23 17:38:26 +08:00
|
|
|
return;
|
|
|
|
|
2005-09-10 06:55:08 +08:00
|
|
|
uobj = container_of(event->element.qp->uobject,
|
|
|
|
struct ib_uevent_object, uobject);
|
|
|
|
|
|
|
|
ib_uverbs_async_handler(context_ptr, uobj->uobject.user_handle,
|
|
|
|
event->event, &uobj->event_list,
|
|
|
|
&uobj->events_reported);
|
2005-07-08 08:57:13 +08:00
|
|
|
}
|
|
|
|
|
2016-05-23 20:20:49 +08:00
|
|
|
void ib_uverbs_wq_event_handler(struct ib_event *event, void *context_ptr)
|
|
|
|
{
|
|
|
|
struct ib_uevent_object *uobj = container_of(event->element.wq->uobject,
|
|
|
|
struct ib_uevent_object, uobject);
|
|
|
|
|
|
|
|
ib_uverbs_async_handler(context_ptr, uobj->uobject.user_handle,
|
|
|
|
event->event, &uobj->event_list,
|
|
|
|
&uobj->events_reported);
|
|
|
|
}
|
|
|
|
|
2005-08-19 03:24:13 +08:00
|
|
|
void ib_uverbs_srq_event_handler(struct ib_event *event, void *context_ptr)
|
|
|
|
{
|
2005-09-10 06:55:08 +08:00
|
|
|
struct ib_uevent_object *uobj;
|
|
|
|
|
|
|
|
uobj = container_of(event->element.srq->uobject,
|
|
|
|
struct ib_uevent_object, uobject);
|
|
|
|
|
|
|
|
ib_uverbs_async_handler(context_ptr, uobj->uobject.user_handle,
|
|
|
|
event->event, &uobj->event_list,
|
|
|
|
&uobj->events_reported);
|
2005-08-19 03:24:13 +08:00
|
|
|
}
|
|
|
|
|
2005-09-27 04:53:25 +08:00
|
|
|
void ib_uverbs_event_handler(struct ib_event_handler *handler,
|
|
|
|
struct ib_event *event)
|
2005-07-08 08:57:13 +08:00
|
|
|
{
|
|
|
|
struct ib_uverbs_file *file =
|
|
|
|
container_of(handler, struct ib_uverbs_file, event_handler);
|
|
|
|
|
2005-09-10 06:55:08 +08:00
|
|
|
ib_uverbs_async_handler(file, event->element.port_num, event->event,
|
|
|
|
NULL, NULL);
|
2005-07-08 08:57:13 +08:00
|
|
|
}
|
|
|
|
|
2015-08-13 23:32:02 +08:00
|
|
|
void ib_uverbs_free_async_event_file(struct ib_uverbs_file *file)
|
|
|
|
{
|
2017-04-04 18:31:47 +08:00
|
|
|
kref_put(&file->async_file->ref, ib_uverbs_release_async_event_file);
|
2015-08-13 23:32:02 +08:00
|
|
|
file->async_file = NULL;
|
|
|
|
}
|
|
|
|
|
2017-04-18 17:03:42 +08:00
|
|
|
void ib_uverbs_init_event_queue(struct ib_uverbs_event_queue *ev_queue)
|
2005-07-08 08:57:13 +08:00
|
|
|
{
|
2017-04-18 17:03:42 +08:00
|
|
|
spin_lock_init(&ev_queue->lock);
|
|
|
|
INIT_LIST_HEAD(&ev_queue->event_list);
|
|
|
|
init_waitqueue_head(&ev_queue->poll_wait);
|
|
|
|
ev_queue->is_closed = 0;
|
|
|
|
ev_queue->async_queue = NULL;
|
2017-04-04 18:31:47 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
struct file *ib_uverbs_alloc_async_event_file(struct ib_uverbs_file *uverbs_file,
|
|
|
|
struct ib_device *ib_dev)
|
|
|
|
{
|
|
|
|
struct ib_uverbs_async_event_file *ev_file;
|
2005-07-08 08:57:13 +08:00
|
|
|
struct file *filp;
|
|
|
|
|
2015-08-13 23:32:02 +08:00
|
|
|
ev_file = kzalloc(sizeof(*ev_file), GFP_KERNEL);
|
2005-09-27 04:53:25 +08:00
|
|
|
if (!ev_file)
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
|
2017-04-18 17:03:42 +08:00
|
|
|
ib_uverbs_init_event_queue(&ev_file->ev_queue);
|
2005-09-27 04:53:25 +08:00
|
|
|
ev_file->uverbs_file = uverbs_file;
|
2015-08-13 23:32:02 +08:00
|
|
|
kref_get(&ev_file->uverbs_file->ref);
|
2017-04-04 18:31:47 +08:00
|
|
|
kref_init(&ev_file->ref);
|
|
|
|
filp = anon_inode_getfile("[infinibandevent]", &uverbs_async_event_fops,
|
2010-02-25 08:51:20 +08:00
|
|
|
ev_file, O_RDONLY);
|
2010-01-18 14:38:00 +08:00
|
|
|
if (IS_ERR(filp))
|
2015-08-13 23:32:02 +08:00
|
|
|
goto err_put_refs;
|
|
|
|
|
2015-08-13 23:32:05 +08:00
|
|
|
mutex_lock(&uverbs_file->device->lists_mutex);
|
|
|
|
list_add_tail(&ev_file->list,
|
|
|
|
&uverbs_file->device->uverbs_events_file_list);
|
|
|
|
mutex_unlock(&uverbs_file->device->lists_mutex);
|
|
|
|
|
2017-04-04 18:31:47 +08:00
|
|
|
WARN_ON(uverbs_file->async_file);
|
|
|
|
uverbs_file->async_file = ev_file;
|
|
|
|
kref_get(&uverbs_file->async_file->ref);
|
|
|
|
INIT_IB_EVENT_HANDLER(&uverbs_file->event_handler,
|
|
|
|
ib_dev,
|
|
|
|
ib_uverbs_event_handler);
|
2017-08-17 20:50:36 +08:00
|
|
|
ib_register_event_handler(&uverbs_file->event_handler);
|
2017-04-04 18:31:47 +08:00
|
|
|
/* At that point async file stuff was fully set */
|
2015-08-13 23:32:02 +08:00
|
|
|
|
|
|
|
return filp;
|
|
|
|
|
|
|
|
err_put_refs:
|
|
|
|
kref_put(&ev_file->uverbs_file->ref, ib_uverbs_release_file);
|
2017-04-04 18:31:47 +08:00
|
|
|
kref_put(&ev_file->ref, ib_uverbs_release_async_event_file);
|
2005-09-27 04:53:25 +08:00
|
|
|
return filp;
|
|
|
|
}
|
|
|
|
|
2018-02-22 00:12:41 +08:00
|
|
|
static ssize_t verify_hdr(struct ib_uverbs_cmd_hdr *hdr,
|
2018-11-26 02:51:17 +08:00
|
|
|
struct ib_uverbs_ex_cmd_hdr *ex_hdr, size_t count,
|
|
|
|
const struct uverbs_api_write_method *method_elm)
|
2018-02-22 00:12:41 +08:00
|
|
|
{
|
2018-11-26 02:51:17 +08:00
|
|
|
if (method_elm->is_ex) {
|
2018-02-22 00:12:41 +08:00
|
|
|
count -= sizeof(*hdr) + sizeof(*ex_hdr);
|
|
|
|
|
|
|
|
if ((hdr->in_words + ex_hdr->provider_in_words) * 8 != count)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2018-11-26 02:51:17 +08:00
|
|
|
if (hdr->in_words * 8 < method_elm->req_size)
|
|
|
|
return -ENOSPC;
|
|
|
|
|
2018-02-22 00:12:41 +08:00
|
|
|
if (ex_hdr->cmd_hdr_reserved)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (ex_hdr->response) {
|
|
|
|
if (!hdr->out_words && !ex_hdr->provider_out_words)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2018-11-26 02:51:17 +08:00
|
|
|
if (hdr->out_words * 8 < method_elm->resp_size)
|
|
|
|
return -ENOSPC;
|
|
|
|
|
Remove 'type' argument from access_ok() function
Nobody has actually used the type (VERIFY_READ vs VERIFY_WRITE) argument
of the user address range verification function since we got rid of the
old racy i386-only code to walk page tables by hand.
It existed because the original 80386 would not honor the write protect
bit when in kernel mode, so you had to do COW by hand before doing any
user access. But we haven't supported that in a long time, and these
days the 'type' argument is a purely historical artifact.
A discussion about extending 'user_access_begin()' to do the range
checking resulted this patch, because there is no way we're going to
move the old VERIFY_xyz interface to that model. And it's best done at
the end of the merge window when I've done most of my merges, so let's
just get this done once and for all.
This patch was mostly done with a sed-script, with manual fix-ups for
the cases that weren't of the trivial 'access_ok(VERIFY_xyz' form.
There were a couple of notable cases:
- csky still had the old "verify_area()" name as an alias.
- the iter_iov code had magical hardcoded knowledge of the actual
values of VERIFY_{READ,WRITE} (not that they mattered, since nothing
really used it)
- microblaze used the type argument for a debug printout
but other than those oddities this should be a total no-op patch.
I tried to fix up all architectures, did fairly extensive grepping for
access_ok() uses, and the changes are trivial, but I may have missed
something. Any missed conversion should be trivially fixable, though.
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2019-01-04 10:57:57 +08:00
|
|
|
if (!access_ok(u64_to_user_ptr(ex_hdr->response),
|
2018-02-22 00:12:41 +08:00
|
|
|
(hdr->out_words + ex_hdr->provider_out_words) * 8))
|
|
|
|
return -EFAULT;
|
|
|
|
} else {
|
|
|
|
if (hdr->out_words || ex_hdr->provider_out_words)
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* not extended command */
|
|
|
|
if (hdr->in_words * 4 != count)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2018-11-26 02:51:17 +08:00
|
|
|
if (count < method_elm->req_size + sizeof(hdr)) {
|
|
|
|
/*
|
|
|
|
* rdma-core v18 and v19 have a bug where they send DESTROY_CQ
|
|
|
|
* with a 16 byte write instead of 24. Old kernels didn't
|
|
|
|
* check the size so they allowed this. Now that the size is
|
|
|
|
* checked provide a compatibility work around to not break
|
|
|
|
* those userspaces.
|
|
|
|
*/
|
|
|
|
if (hdr->command == IB_USER_VERBS_CMD_DESTROY_CQ &&
|
|
|
|
count == 16) {
|
|
|
|
hdr->in_words = 6;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
return -ENOSPC;
|
|
|
|
}
|
|
|
|
if (hdr->out_words * 4 < method_elm->resp_size)
|
|
|
|
return -ENOSPC;
|
|
|
|
|
2018-02-22 00:12:41 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2005-07-08 08:57:13 +08:00
|
|
|
static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
|
|
|
|
size_t count, loff_t *pos)
|
|
|
|
{
|
|
|
|
struct ib_uverbs_file *file = filp->private_data;
|
2018-11-13 04:59:56 +08:00
|
|
|
const struct uverbs_api_write_method *method_elm;
|
|
|
|
struct uverbs_api *uapi = file->device->uapi;
|
2018-02-22 00:12:34 +08:00
|
|
|
struct ib_uverbs_ex_cmd_hdr ex_hdr;
|
2005-07-08 08:57:13 +08:00
|
|
|
struct ib_uverbs_cmd_hdr hdr;
|
2018-11-26 02:51:13 +08:00
|
|
|
struct uverbs_attr_bundle bundle;
|
2015-08-13 23:32:05 +08:00
|
|
|
int srcu_key;
|
|
|
|
ssize_t ret;
|
2015-08-13 23:32:04 +08:00
|
|
|
|
infiniband: remove WARN that is not kernel bug
On Mon, Nov 21, 2016 at 09:52:53AM -0700, Jason Gunthorpe wrote:
> On Mon, Nov 21, 2016 at 02:14:08PM +0200, Leon Romanovsky wrote:
> > >
> > > In ib_ucm_write function there is a wrong prefix:
> > >
> > > + pr_err_once("ucm_write: process %d (%s) tried to do something hinky\n",
> >
> > I did it intentionally to have the same errors for all flows.
>
> Lets actually use a good message too please?
>
> pr_err_once("ucm_write: process %d (%s) changed security contexts after opening FD, this is not allowed.\n",
>
> Jason
>From 70f95b2d35aea42e5b97e7d27ab2f4e8effcbe67 Mon Sep 17 00:00:00 2001
From: Leon Romanovsky <leonro@mellanox.com>
Date: Mon, 21 Nov 2016 13:30:59 +0200
Subject: [PATCH rdma-next V2] IB/{core, qib}: Remove WARN that is not kernel bug
WARNINGs mean kernel bugs, in this case, they are placed
to mark programming errors and/or malicious attempts.
BUG/WARNs that are not kernel bugs hinder automated testing efforts.
Signed-off-by: Dmitry Vyukov <dvyukov@google.com>
Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
Signed-off-by: Doug Ledford <dledford@redhat.com>
2016-11-22 01:38:20 +08:00
|
|
|
if (!ib_safe_file_access(filp)) {
|
|
|
|
pr_err_once("uverbs_write: process %d (%s) changed security contexts after opening file descriptor, this is not allowed.\n",
|
|
|
|
task_tgid_vnr(current), current->comm);
|
2016-04-11 09:13:13 +08:00
|
|
|
return -EACCES;
|
infiniband: remove WARN that is not kernel bug
On Mon, Nov 21, 2016 at 09:52:53AM -0700, Jason Gunthorpe wrote:
> On Mon, Nov 21, 2016 at 02:14:08PM +0200, Leon Romanovsky wrote:
> > >
> > > In ib_ucm_write function there is a wrong prefix:
> > >
> > > + pr_err_once("ucm_write: process %d (%s) tried to do something hinky\n",
> >
> > I did it intentionally to have the same errors for all flows.
>
> Lets actually use a good message too please?
>
> pr_err_once("ucm_write: process %d (%s) changed security contexts after opening FD, this is not allowed.\n",
>
> Jason
>From 70f95b2d35aea42e5b97e7d27ab2f4e8effcbe67 Mon Sep 17 00:00:00 2001
From: Leon Romanovsky <leonro@mellanox.com>
Date: Mon, 21 Nov 2016 13:30:59 +0200
Subject: [PATCH rdma-next V2] IB/{core, qib}: Remove WARN that is not kernel bug
WARNINGs mean kernel bugs, in this case, they are placed
to mark programming errors and/or malicious attempts.
BUG/WARNs that are not kernel bugs hinder automated testing efforts.
Signed-off-by: Dmitry Vyukov <dvyukov@google.com>
Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
Signed-off-by: Doug Ledford <dledford@redhat.com>
2016-11-22 01:38:20 +08:00
|
|
|
}
|
2016-04-11 09:13:13 +08:00
|
|
|
|
2018-02-22 00:12:32 +08:00
|
|
|
if (count < sizeof(hdr))
|
2005-07-08 08:57:13 +08:00
|
|
|
return -EINVAL;
|
|
|
|
|
2018-02-22 00:12:32 +08:00
|
|
|
if (copy_from_user(&hdr, buf, sizeof(hdr)))
|
2005-07-08 08:57:13 +08:00
|
|
|
return -EFAULT;
|
|
|
|
|
2018-11-13 04:59:56 +08:00
|
|
|
method_elm = uapi_get_method(uapi, hdr.command);
|
|
|
|
if (IS_ERR(method_elm))
|
|
|
|
return PTR_ERR(method_elm);
|
2018-02-22 00:12:37 +08:00
|
|
|
|
2018-11-13 04:59:56 +08:00
|
|
|
if (method_elm->is_ex) {
|
2018-02-22 00:12:40 +08:00
|
|
|
if (count < (sizeof(hdr) + sizeof(ex_hdr)))
|
|
|
|
return -EINVAL;
|
|
|
|
if (copy_from_user(&ex_hdr, buf + sizeof(hdr), sizeof(ex_hdr)))
|
|
|
|
return -EFAULT;
|
|
|
|
}
|
2018-02-22 00:12:37 +08:00
|
|
|
|
2018-11-26 02:51:17 +08:00
|
|
|
ret = verify_hdr(&hdr, &ex_hdr, count, method_elm);
|
2018-02-22 00:12:41 +08:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2015-08-13 23:32:05 +08:00
|
|
|
srcu_key = srcu_read_lock(&file->device->disassociate_srcu);
|
|
|
|
|
2018-02-22 00:12:41 +08:00
|
|
|
buf += sizeof(hdr);
|
2013-08-14 18:58:29 +08:00
|
|
|
|
2019-01-11 14:21:44 +08:00
|
|
|
memset(bundle.attr_present, 0, sizeof(bundle.attr_present));
|
2018-11-26 02:51:13 +08:00
|
|
|
bundle.ufile = file;
|
2019-02-08 00:44:47 +08:00
|
|
|
bundle.context = NULL; /* only valid if bundle has uobject */
|
2018-11-13 04:59:56 +08:00
|
|
|
if (!method_elm->is_ex) {
|
2018-11-26 02:51:19 +08:00
|
|
|
size_t in_len = hdr.in_words * 4 - sizeof(hdr);
|
|
|
|
size_t out_len = hdr.out_words * 4;
|
2018-11-26 02:58:35 +08:00
|
|
|
u64 response = 0;
|
2018-11-26 02:51:19 +08:00
|
|
|
|
|
|
|
if (method_elm->has_udata) {
|
|
|
|
bundle.driver_udata.inlen =
|
|
|
|
in_len - method_elm->req_size;
|
|
|
|
in_len = method_elm->req_size;
|
|
|
|
if (bundle.driver_udata.inlen)
|
|
|
|
bundle.driver_udata.inbuf = buf + in_len;
|
|
|
|
else
|
|
|
|
bundle.driver_udata.inbuf = NULL;
|
|
|
|
} else {
|
|
|
|
memset(&bundle.driver_udata, 0,
|
|
|
|
sizeof(bundle.driver_udata));
|
|
|
|
}
|
|
|
|
|
|
|
|
if (method_elm->has_resp) {
|
|
|
|
/*
|
|
|
|
* The macros check that if has_resp is set
|
|
|
|
* then the command request structure starts
|
|
|
|
* with a '__aligned u64 response' member.
|
|
|
|
*/
|
|
|
|
ret = get_user(response, (const u64 *)buf);
|
|
|
|
if (ret)
|
|
|
|
goto out_unlock;
|
|
|
|
|
|
|
|
if (method_elm->has_udata) {
|
|
|
|
bundle.driver_udata.outlen =
|
|
|
|
out_len - method_elm->resp_size;
|
|
|
|
out_len = method_elm->resp_size;
|
|
|
|
if (bundle.driver_udata.outlen)
|
|
|
|
bundle.driver_udata.outbuf =
|
|
|
|
u64_to_user_ptr(response +
|
|
|
|
out_len);
|
|
|
|
else
|
|
|
|
bundle.driver_udata.outbuf = NULL;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
bundle.driver_udata.outlen = 0;
|
|
|
|
bundle.driver_udata.outbuf = NULL;
|
|
|
|
}
|
|
|
|
|
2018-11-26 02:58:35 +08:00
|
|
|
ib_uverbs_init_udata_buf_or_null(
|
|
|
|
&bundle.ucore, buf, u64_to_user_ptr(response),
|
|
|
|
in_len, out_len);
|
2018-02-22 00:12:33 +08:00
|
|
|
} else {
|
2018-02-22 00:12:41 +08:00
|
|
|
buf += sizeof(ex_hdr);
|
IB/core: extended command: an improved infrastructure for uverbs commands
Commit 400dbc96583f ("IB/core: Infrastructure for extensible uverbs
commands") added an infrastructure for extensible uverbs commands
while later commit 436f2ad05a0b ("IB/core: Export ib_create/destroy_flow
through uverbs") exported ib_create_flow()/ib_destroy_flow() functions
using this new infrastructure.
According to the commit 400dbc96583f, the purpose of this
infrastructure is to support passing around provider (eg. hardware)
specific buffers when userspace issue commands to the kernel, so that
it would be possible to extend uverbs (eg. core) buffers independently
from the provider buffers.
But the new kernel command function prototypes were not modified to
take advantage of this extension. This issue was exposed by Roland
Dreier in a previous review[1].
So the following patch is an attempt to a revised extensible command
infrastructure.
This improved extensible command infrastructure distinguish between
core (eg. legacy)'s command/response buffers from provider
(eg. hardware)'s command/response buffers: each extended command
implementing function is given a struct ib_udata to hold core
(eg. uverbs) input and output buffers, and another struct ib_udata to
hold the hw (eg. provider) input and output buffers.
Having those buffers identified separately make it easier to increase
one buffer to support extension without having to add some code to
guess the exact size of each command/response parts: This should make
the extended functions more reliable.
Additionally, instead of relying on command identifier being greater
than IB_USER_VERBS_CMD_THRESHOLD, the proposed infrastructure rely on
unused bits in command field: on the 32 bits provided by command
field, only 6 bits are really needed to encode the identifier of
commands currently supported by the kernel. (Even using only 6 bits
leaves room for about 23 new commands).
So this patch makes use of some high order bits in command field to
store flags, leaving enough room for more command identifiers than one
will ever need (eg. 256).
The new flags are used to specify if the command should be processed
as an extended one or a legacy one. While designing the new command
format, care was taken to make usage of flags itself extensible.
Using high order bits of the commands field ensure that newer
libibverbs on older kernel will properly fail when trying to call
extended commands. On the other hand, older libibverbs on newer kernel
will never be able to issue calls to extended commands.
The extended command header includes the optional response pointer so
that output buffer length and output buffer pointer are located
together in the command, allowing proper parameters checking. This
should make implementing functions easier and safer.
Additionally the extended header ensure 64bits alignment, while making
all sizes multiple of 8 bytes, extending the maximum buffer size:
legacy extended
Maximum command buffer: 256KBytes 1024KBytes (512KBytes + 512KBytes)
Maximum response buffer: 256KBytes 1024KBytes (512KBytes + 512KBytes)
For the purpose of doing proper buffer size accounting, the headers
size are no more taken in account in "in_words".
One of the odds of the current extensible infrastructure, reading
twice the "legacy" command header, is fixed by removing the "legacy"
command header from the extended command header: they are processed as
two different parts of the command: memory is read once and
information are not duplicated: it's making clear that's an extended
command scheme and not a different command scheme.
The proposed scheme will format input (command) and output (response)
buffers this way:
- command:
legacy header +
extended header +
command data (core + hw):
+----------------------------------------+
| flags | 00 00 | command |
| in_words | out_words |
+----------------------------------------+
| response |
| response |
| provider_in_words | provider_out_words |
| padding |
+----------------------------------------+
| |
. <uverbs input> .
. (in_words * 8) .
| |
+----------------------------------------+
| |
. <provider input> .
. (provider_in_words * 8) .
| |
+----------------------------------------+
- response, if present:
+----------------------------------------+
| |
. <uverbs output space> .
. (out_words * 8) .
| |
+----------------------------------------+
| |
. <provider output space> .
. (provider_out_words * 8) .
| |
+----------------------------------------+
The overall design is to ensure that the extensible infrastructure is
itself extensible while begin more reliable with more input and bound
checking.
Note:
The unused field in the extended header would be perfect candidate to
hold the command "comp_mask" (eg. bit field used to handle
compatibility). This was suggested by Roland Dreier in a previous
review[2]. But "comp_mask" field is likely to be present in the uverb
input and/or provider input, likewise for the response, as noted by
Matan Barak[3], so it doesn't make sense to put "comp_mask" in the
header.
[1]:
http://marc.info/?i=CAL1RGDWxmM17W2o_era24A-TTDeKyoL6u3NRu_=t_dhV_ZA9MA@mail.gmail.com
[2]:
http://marc.info/?i=CAL1RGDXJtrc849M6_XNZT5xO1+ybKtLWGq6yg6LhoSsKpsmkYA@mail.gmail.com
[3]:
http://marc.info/?i=525C1149.6000701@mellanox.com
Signed-off-by: Yann Droneaud <ydroneaud@opteya.com>
Link: http://marc.info/?i=cover.1383773832.git.ydroneaud@opteya.com
[ Convert "ret ? ret : 0" to the equivalent "ret". - Roland ]
Signed-off-by: Roland Dreier <roland@purestorage.com>
2013-11-07 06:21:49 +08:00
|
|
|
|
2018-11-26 02:58:35 +08:00
|
|
|
ib_uverbs_init_udata_buf_or_null(&bundle.ucore, buf,
|
2017-09-07 05:34:25 +08:00
|
|
|
u64_to_user_ptr(ex_hdr.response),
|
|
|
|
hdr.in_words * 8, hdr.out_words * 8);
|
2013-12-20 00:37:03 +08:00
|
|
|
|
2018-11-26 02:58:35 +08:00
|
|
|
ib_uverbs_init_udata_buf_or_null(
|
|
|
|
&bundle.driver_udata, buf + bundle.ucore.inlen,
|
|
|
|
u64_to_user_ptr(ex_hdr.response) + bundle.ucore.outlen,
|
|
|
|
ex_hdr.provider_in_words * 8,
|
|
|
|
ex_hdr.provider_out_words * 8);
|
IB/core: extended command: an improved infrastructure for uverbs commands
Commit 400dbc96583f ("IB/core: Infrastructure for extensible uverbs
commands") added an infrastructure for extensible uverbs commands
while later commit 436f2ad05a0b ("IB/core: Export ib_create/destroy_flow
through uverbs") exported ib_create_flow()/ib_destroy_flow() functions
using this new infrastructure.
According to the commit 400dbc96583f, the purpose of this
infrastructure is to support passing around provider (eg. hardware)
specific buffers when userspace issue commands to the kernel, so that
it would be possible to extend uverbs (eg. core) buffers independently
from the provider buffers.
But the new kernel command function prototypes were not modified to
take advantage of this extension. This issue was exposed by Roland
Dreier in a previous review[1].
So the following patch is an attempt to a revised extensible command
infrastructure.
This improved extensible command infrastructure distinguish between
core (eg. legacy)'s command/response buffers from provider
(eg. hardware)'s command/response buffers: each extended command
implementing function is given a struct ib_udata to hold core
(eg. uverbs) input and output buffers, and another struct ib_udata to
hold the hw (eg. provider) input and output buffers.
Having those buffers identified separately make it easier to increase
one buffer to support extension without having to add some code to
guess the exact size of each command/response parts: This should make
the extended functions more reliable.
Additionally, instead of relying on command identifier being greater
than IB_USER_VERBS_CMD_THRESHOLD, the proposed infrastructure rely on
unused bits in command field: on the 32 bits provided by command
field, only 6 bits are really needed to encode the identifier of
commands currently supported by the kernel. (Even using only 6 bits
leaves room for about 23 new commands).
So this patch makes use of some high order bits in command field to
store flags, leaving enough room for more command identifiers than one
will ever need (eg. 256).
The new flags are used to specify if the command should be processed
as an extended one or a legacy one. While designing the new command
format, care was taken to make usage of flags itself extensible.
Using high order bits of the commands field ensure that newer
libibverbs on older kernel will properly fail when trying to call
extended commands. On the other hand, older libibverbs on newer kernel
will never be able to issue calls to extended commands.
The extended command header includes the optional response pointer so
that output buffer length and output buffer pointer are located
together in the command, allowing proper parameters checking. This
should make implementing functions easier and safer.
Additionally the extended header ensure 64bits alignment, while making
all sizes multiple of 8 bytes, extending the maximum buffer size:
legacy extended
Maximum command buffer: 256KBytes 1024KBytes (512KBytes + 512KBytes)
Maximum response buffer: 256KBytes 1024KBytes (512KBytes + 512KBytes)
For the purpose of doing proper buffer size accounting, the headers
size are no more taken in account in "in_words".
One of the odds of the current extensible infrastructure, reading
twice the "legacy" command header, is fixed by removing the "legacy"
command header from the extended command header: they are processed as
two different parts of the command: memory is read once and
information are not duplicated: it's making clear that's an extended
command scheme and not a different command scheme.
The proposed scheme will format input (command) and output (response)
buffers this way:
- command:
legacy header +
extended header +
command data (core + hw):
+----------------------------------------+
| flags | 00 00 | command |
| in_words | out_words |
+----------------------------------------+
| response |
| response |
| provider_in_words | provider_out_words |
| padding |
+----------------------------------------+
| |
. <uverbs input> .
. (in_words * 8) .
| |
+----------------------------------------+
| |
. <provider input> .
. (provider_in_words * 8) .
| |
+----------------------------------------+
- response, if present:
+----------------------------------------+
| |
. <uverbs output space> .
. (out_words * 8) .
| |
+----------------------------------------+
| |
. <provider output space> .
. (provider_out_words * 8) .
| |
+----------------------------------------+
The overall design is to ensure that the extensible infrastructure is
itself extensible while begin more reliable with more input and bound
checking.
Note:
The unused field in the extended header would be perfect candidate to
hold the command "comp_mask" (eg. bit field used to handle
compatibility). This was suggested by Roland Dreier in a previous
review[2]. But "comp_mask" field is likely to be present in the uverb
input and/or provider input, likewise for the response, as noted by
Matan Barak[3], so it doesn't make sense to put "comp_mask" in the
header.
[1]:
http://marc.info/?i=CAL1RGDWxmM17W2o_era24A-TTDeKyoL6u3NRu_=t_dhV_ZA9MA@mail.gmail.com
[2]:
http://marc.info/?i=CAL1RGDXJtrc849M6_XNZT5xO1+ybKtLWGq6yg6LhoSsKpsmkYA@mail.gmail.com
[3]:
http://marc.info/?i=525C1149.6000701@mellanox.com
Signed-off-by: Yann Droneaud <ydroneaud@opteya.com>
Link: http://marc.info/?i=cover.1383773832.git.ydroneaud@opteya.com
[ Convert "ret ? ret : 0" to the equivalent "ret". - Roland ]
Signed-off-by: Roland Dreier <roland@purestorage.com>
2013-11-07 06:21:49 +08:00
|
|
|
|
2013-08-14 18:58:29 +08:00
|
|
|
}
|
IB/core: extended command: an improved infrastructure for uverbs commands
Commit 400dbc96583f ("IB/core: Infrastructure for extensible uverbs
commands") added an infrastructure for extensible uverbs commands
while later commit 436f2ad05a0b ("IB/core: Export ib_create/destroy_flow
through uverbs") exported ib_create_flow()/ib_destroy_flow() functions
using this new infrastructure.
According to the commit 400dbc96583f, the purpose of this
infrastructure is to support passing around provider (eg. hardware)
specific buffers when userspace issue commands to the kernel, so that
it would be possible to extend uverbs (eg. core) buffers independently
from the provider buffers.
But the new kernel command function prototypes were not modified to
take advantage of this extension. This issue was exposed by Roland
Dreier in a previous review[1].
So the following patch is an attempt to a revised extensible command
infrastructure.
This improved extensible command infrastructure distinguish between
core (eg. legacy)'s command/response buffers from provider
(eg. hardware)'s command/response buffers: each extended command
implementing function is given a struct ib_udata to hold core
(eg. uverbs) input and output buffers, and another struct ib_udata to
hold the hw (eg. provider) input and output buffers.
Having those buffers identified separately make it easier to increase
one buffer to support extension without having to add some code to
guess the exact size of each command/response parts: This should make
the extended functions more reliable.
Additionally, instead of relying on command identifier being greater
than IB_USER_VERBS_CMD_THRESHOLD, the proposed infrastructure rely on
unused bits in command field: on the 32 bits provided by command
field, only 6 bits are really needed to encode the identifier of
commands currently supported by the kernel. (Even using only 6 bits
leaves room for about 23 new commands).
So this patch makes use of some high order bits in command field to
store flags, leaving enough room for more command identifiers than one
will ever need (eg. 256).
The new flags are used to specify if the command should be processed
as an extended one or a legacy one. While designing the new command
format, care was taken to make usage of flags itself extensible.
Using high order bits of the commands field ensure that newer
libibverbs on older kernel will properly fail when trying to call
extended commands. On the other hand, older libibverbs on newer kernel
will never be able to issue calls to extended commands.
The extended command header includes the optional response pointer so
that output buffer length and output buffer pointer are located
together in the command, allowing proper parameters checking. This
should make implementing functions easier and safer.
Additionally the extended header ensure 64bits alignment, while making
all sizes multiple of 8 bytes, extending the maximum buffer size:
legacy extended
Maximum command buffer: 256KBytes 1024KBytes (512KBytes + 512KBytes)
Maximum response buffer: 256KBytes 1024KBytes (512KBytes + 512KBytes)
For the purpose of doing proper buffer size accounting, the headers
size are no more taken in account in "in_words".
One of the odds of the current extensible infrastructure, reading
twice the "legacy" command header, is fixed by removing the "legacy"
command header from the extended command header: they are processed as
two different parts of the command: memory is read once and
information are not duplicated: it's making clear that's an extended
command scheme and not a different command scheme.
The proposed scheme will format input (command) and output (response)
buffers this way:
- command:
legacy header +
extended header +
command data (core + hw):
+----------------------------------------+
| flags | 00 00 | command |
| in_words | out_words |
+----------------------------------------+
| response |
| response |
| provider_in_words | provider_out_words |
| padding |
+----------------------------------------+
| |
. <uverbs input> .
. (in_words * 8) .
| |
+----------------------------------------+
| |
. <provider input> .
. (provider_in_words * 8) .
| |
+----------------------------------------+
- response, if present:
+----------------------------------------+
| |
. <uverbs output space> .
. (out_words * 8) .
| |
+----------------------------------------+
| |
. <provider output space> .
. (provider_out_words * 8) .
| |
+----------------------------------------+
The overall design is to ensure that the extensible infrastructure is
itself extensible while begin more reliable with more input and bound
checking.
Note:
The unused field in the extended header would be perfect candidate to
hold the command "comp_mask" (eg. bit field used to handle
compatibility). This was suggested by Roland Dreier in a previous
review[2]. But "comp_mask" field is likely to be present in the uverb
input and/or provider input, likewise for the response, as noted by
Matan Barak[3], so it doesn't make sense to put "comp_mask" in the
header.
[1]:
http://marc.info/?i=CAL1RGDWxmM17W2o_era24A-TTDeKyoL6u3NRu_=t_dhV_ZA9MA@mail.gmail.com
[2]:
http://marc.info/?i=CAL1RGDXJtrc849M6_XNZT5xO1+ybKtLWGq6yg6LhoSsKpsmkYA@mail.gmail.com
[3]:
http://marc.info/?i=525C1149.6000701@mellanox.com
Signed-off-by: Yann Droneaud <ydroneaud@opteya.com>
Link: http://marc.info/?i=cover.1383773832.git.ydroneaud@opteya.com
[ Convert "ret ? ret : 0" to the equivalent "ret". - Roland ]
Signed-off-by: Roland Dreier <roland@purestorage.com>
2013-11-07 06:21:49 +08:00
|
|
|
|
2018-11-26 02:58:45 +08:00
|
|
|
ret = method_elm->handler(&bundle);
|
2018-11-26 02:51:19 +08:00
|
|
|
out_unlock:
|
2015-08-13 23:32:05 +08:00
|
|
|
srcu_read_unlock(&file->device->disassociate_srcu, srcu_key);
|
2018-11-26 02:51:14 +08:00
|
|
|
return (ret) ? : count;
|
2005-07-08 08:57:13 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int ib_uverbs_mmap(struct file *filp, struct vm_area_struct *vma)
|
|
|
|
{
|
|
|
|
struct ib_uverbs_file *file = filp->private_data;
|
2018-07-11 03:43:06 +08:00
|
|
|
struct ib_ucontext *ucontext;
|
2015-08-13 23:32:05 +08:00
|
|
|
int ret = 0;
|
|
|
|
int srcu_key;
|
2005-07-08 08:57:13 +08:00
|
|
|
|
2015-08-13 23:32:05 +08:00
|
|
|
srcu_key = srcu_read_lock(&file->device->disassociate_srcu);
|
2018-11-26 02:51:13 +08:00
|
|
|
ucontext = ib_uverbs_get_ucontext_file(file);
|
2018-07-11 03:43:06 +08:00
|
|
|
if (IS_ERR(ucontext)) {
|
|
|
|
ret = PTR_ERR(ucontext);
|
2015-08-13 23:32:05 +08:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2018-12-11 03:09:48 +08:00
|
|
|
ret = ucontext->device->ops.mmap(ucontext, vma);
|
2015-08-13 23:32:05 +08:00
|
|
|
out:
|
|
|
|
srcu_read_unlock(&file->device->disassociate_srcu, srcu_key);
|
|
|
|
return ret;
|
2005-07-08 08:57:13 +08:00
|
|
|
}
|
|
|
|
|
2018-09-17 01:43:08 +08:00
|
|
|
/*
|
|
|
|
* Each time we map IO memory into user space this keeps track of the mapping.
|
|
|
|
* When the device is hot-unplugged we 'zap' the mmaps in user space to point
|
|
|
|
* to the zero page and allow the hot unplug to proceed.
|
|
|
|
*
|
|
|
|
* This is necessary for cases like PCI physical hot unplug as the actual BAR
|
|
|
|
* memory may vanish after this and access to it from userspace could MCE.
|
|
|
|
*
|
|
|
|
* RDMA drivers supporting disassociation must have their user space designed
|
|
|
|
* to cope in some way with their IO pages going to the zero page.
|
|
|
|
*/
|
|
|
|
struct rdma_umap_priv {
|
|
|
|
struct vm_area_struct *vma;
|
|
|
|
struct list_head list;
|
|
|
|
};
|
|
|
|
|
|
|
|
static const struct vm_operations_struct rdma_umap_ops;
|
|
|
|
|
|
|
|
static void rdma_umap_priv_init(struct rdma_umap_priv *priv,
|
|
|
|
struct vm_area_struct *vma)
|
|
|
|
{
|
|
|
|
struct ib_uverbs_file *ufile = vma->vm_file->private_data;
|
|
|
|
|
|
|
|
priv->vma = vma;
|
|
|
|
vma->vm_private_data = priv;
|
|
|
|
vma->vm_ops = &rdma_umap_ops;
|
|
|
|
|
|
|
|
mutex_lock(&ufile->umap_lock);
|
|
|
|
list_add(&priv->list, &ufile->umaps);
|
|
|
|
mutex_unlock(&ufile->umap_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The VMA has been dup'd, initialize the vm_private_data with a new tracking
|
|
|
|
* struct
|
|
|
|
*/
|
|
|
|
static void rdma_umap_open(struct vm_area_struct *vma)
|
|
|
|
{
|
|
|
|
struct ib_uverbs_file *ufile = vma->vm_file->private_data;
|
|
|
|
struct rdma_umap_priv *opriv = vma->vm_private_data;
|
|
|
|
struct rdma_umap_priv *priv;
|
|
|
|
|
|
|
|
if (!opriv)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* We are racing with disassociation */
|
|
|
|
if (!down_read_trylock(&ufile->hw_destroy_rwsem))
|
|
|
|
goto out_zap;
|
|
|
|
/*
|
|
|
|
* Disassociation already completed, the VMA should already be zapped.
|
|
|
|
*/
|
|
|
|
if (!ufile->ucontext)
|
|
|
|
goto out_unlock;
|
|
|
|
|
|
|
|
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
|
|
|
|
if (!priv)
|
|
|
|
goto out_unlock;
|
|
|
|
rdma_umap_priv_init(priv, vma);
|
|
|
|
|
|
|
|
up_read(&ufile->hw_destroy_rwsem);
|
|
|
|
return;
|
|
|
|
|
|
|
|
out_unlock:
|
|
|
|
up_read(&ufile->hw_destroy_rwsem);
|
|
|
|
out_zap:
|
|
|
|
/*
|
|
|
|
* We can't allow the VMA to be created with the actual IO pages, that
|
|
|
|
* would break our API contract, and it can't be stopped at this
|
|
|
|
* point, so zap it.
|
|
|
|
*/
|
|
|
|
vma->vm_private_data = NULL;
|
|
|
|
zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void rdma_umap_close(struct vm_area_struct *vma)
|
|
|
|
{
|
|
|
|
struct ib_uverbs_file *ufile = vma->vm_file->private_data;
|
|
|
|
struct rdma_umap_priv *priv = vma->vm_private_data;
|
|
|
|
|
|
|
|
if (!priv)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The vma holds a reference on the struct file that created it, which
|
|
|
|
* in turn means that the ib_uverbs_file is guaranteed to exist at
|
|
|
|
* this point.
|
|
|
|
*/
|
|
|
|
mutex_lock(&ufile->umap_lock);
|
|
|
|
list_del(&priv->list);
|
|
|
|
mutex_unlock(&ufile->umap_lock);
|
|
|
|
kfree(priv);
|
|
|
|
}
|
|
|
|
|
2019-04-16 19:07:28 +08:00
|
|
|
/*
|
|
|
|
* Once the zap_vma_ptes has been called touches to the VMA will come here and
|
|
|
|
* we return a dummy writable zero page for all the pfns.
|
|
|
|
*/
|
|
|
|
static vm_fault_t rdma_umap_fault(struct vm_fault *vmf)
|
|
|
|
{
|
|
|
|
struct ib_uverbs_file *ufile = vmf->vma->vm_file->private_data;
|
|
|
|
struct rdma_umap_priv *priv = vmf->vma->vm_private_data;
|
|
|
|
vm_fault_t ret = 0;
|
|
|
|
|
|
|
|
if (!priv)
|
|
|
|
return VM_FAULT_SIGBUS;
|
|
|
|
|
|
|
|
/* Read only pages can just use the system zero page. */
|
|
|
|
if (!(vmf->vma->vm_flags & (VM_WRITE | VM_MAYWRITE))) {
|
|
|
|
vmf->page = ZERO_PAGE(vmf->vm_start);
|
|
|
|
get_page(vmf->page);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
mutex_lock(&ufile->umap_lock);
|
|
|
|
if (!ufile->disassociate_page)
|
|
|
|
ufile->disassociate_page =
|
|
|
|
alloc_pages(vmf->gfp_mask | __GFP_ZERO, 0);
|
|
|
|
|
|
|
|
if (ufile->disassociate_page) {
|
|
|
|
/*
|
|
|
|
* This VMA is forced to always be shared so this doesn't have
|
|
|
|
* to worry about COW.
|
|
|
|
*/
|
|
|
|
vmf->page = ufile->disassociate_page;
|
|
|
|
get_page(vmf->page);
|
|
|
|
} else {
|
|
|
|
ret = VM_FAULT_SIGBUS;
|
|
|
|
}
|
|
|
|
mutex_unlock(&ufile->umap_lock);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2018-09-17 01:43:08 +08:00
|
|
|
static const struct vm_operations_struct rdma_umap_ops = {
|
|
|
|
.open = rdma_umap_open,
|
|
|
|
.close = rdma_umap_close,
|
2019-04-16 19:07:28 +08:00
|
|
|
.fault = rdma_umap_fault,
|
2018-09-17 01:43:08 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
static struct rdma_umap_priv *rdma_user_mmap_pre(struct ib_ucontext *ucontext,
|
|
|
|
struct vm_area_struct *vma,
|
|
|
|
unsigned long size)
|
|
|
|
{
|
|
|
|
struct ib_uverbs_file *ufile = ucontext->ufile;
|
|
|
|
struct rdma_umap_priv *priv;
|
|
|
|
|
2019-04-16 19:07:28 +08:00
|
|
|
if (!(vma->vm_flags & VM_SHARED))
|
|
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
|
2018-09-17 01:43:08 +08:00
|
|
|
if (vma->vm_end - vma->vm_start != size)
|
|
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
|
|
|
|
/* Driver is using this wrong, must be called by ib_uverbs_mmap */
|
|
|
|
if (WARN_ON(!vma->vm_file ||
|
|
|
|
vma->vm_file->private_data != ufile))
|
|
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
lockdep_assert_held(&ufile->device->disassociate_srcu);
|
|
|
|
|
|
|
|
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
|
|
|
|
if (!priv)
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
return priv;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Map IO memory into a process. This is to be called by drivers as part of
|
|
|
|
* their mmap() functions if they wish to send something like PCI-E BAR memory
|
|
|
|
* to userspace.
|
|
|
|
*/
|
|
|
|
int rdma_user_mmap_io(struct ib_ucontext *ucontext, struct vm_area_struct *vma,
|
|
|
|
unsigned long pfn, unsigned long size, pgprot_t prot)
|
|
|
|
{
|
|
|
|
struct rdma_umap_priv *priv = rdma_user_mmap_pre(ucontext, vma, size);
|
|
|
|
|
|
|
|
if (IS_ERR(priv))
|
|
|
|
return PTR_ERR(priv);
|
|
|
|
|
|
|
|
vma->vm_page_prot = prot;
|
|
|
|
if (io_remap_pfn_range(vma, vma->vm_start, pfn, size, prot)) {
|
|
|
|
kfree(priv);
|
|
|
|
return -EAGAIN;
|
|
|
|
}
|
|
|
|
|
|
|
|
rdma_umap_priv_init(priv, vma);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(rdma_user_mmap_io);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The page case is here for a slightly different reason, the driver expects
|
|
|
|
* to be able to free the page it is sharing to user space when it destroys
|
|
|
|
* its ucontext, which means we need to zap the user space references.
|
|
|
|
*
|
|
|
|
* We could handle this differently by providing an API to allocate a shared
|
|
|
|
* page and then only freeing the shared page when the last ufile is
|
|
|
|
* destroyed.
|
|
|
|
*/
|
|
|
|
int rdma_user_mmap_page(struct ib_ucontext *ucontext,
|
|
|
|
struct vm_area_struct *vma, struct page *page,
|
|
|
|
unsigned long size)
|
|
|
|
{
|
|
|
|
struct rdma_umap_priv *priv = rdma_user_mmap_pre(ucontext, vma, size);
|
|
|
|
|
|
|
|
if (IS_ERR(priv))
|
|
|
|
return PTR_ERR(priv);
|
|
|
|
|
|
|
|
if (remap_pfn_range(vma, vma->vm_start, page_to_pfn(page), size,
|
|
|
|
vma->vm_page_prot)) {
|
|
|
|
kfree(priv);
|
|
|
|
return -EAGAIN;
|
|
|
|
}
|
|
|
|
|
|
|
|
rdma_umap_priv_init(priv, vma);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(rdma_user_mmap_page);
|
|
|
|
|
|
|
|
void uverbs_user_mmap_disassociate(struct ib_uverbs_file *ufile)
|
|
|
|
{
|
|
|
|
struct rdma_umap_priv *priv, *next_priv;
|
|
|
|
|
|
|
|
lockdep_assert_held(&ufile->hw_destroy_rwsem);
|
|
|
|
|
|
|
|
while (1) {
|
|
|
|
struct mm_struct *mm = NULL;
|
|
|
|
|
|
|
|
/* Get an arbitrary mm pointer that hasn't been cleaned yet */
|
|
|
|
mutex_lock(&ufile->umap_lock);
|
2019-01-27 16:11:27 +08:00
|
|
|
while (!list_empty(&ufile->umaps)) {
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
priv = list_first_entry(&ufile->umaps,
|
|
|
|
struct rdma_umap_priv, list);
|
|
|
|
mm = priv->vma->vm_mm;
|
|
|
|
ret = mmget_not_zero(mm);
|
|
|
|
if (!ret) {
|
|
|
|
list_del_init(&priv->list);
|
|
|
|
mm = NULL;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
break;
|
2018-09-17 01:43:08 +08:00
|
|
|
}
|
|
|
|
mutex_unlock(&ufile->umap_lock);
|
|
|
|
if (!mm)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The umap_lock is nested under mmap_sem since it used within
|
|
|
|
* the vma_ops callbacks, so we have to clean the list one mm
|
|
|
|
* at a time to get the lock ordering right. Typically there
|
|
|
|
* will only be one mm, so no big deal.
|
|
|
|
*/
|
2019-04-16 19:07:28 +08:00
|
|
|
down_read(&mm->mmap_sem);
|
2019-04-19 08:50:52 +08:00
|
|
|
if (!mmget_still_valid(mm))
|
|
|
|
goto skip_mm;
|
2018-09-17 01:43:08 +08:00
|
|
|
mutex_lock(&ufile->umap_lock);
|
|
|
|
list_for_each_entry_safe (priv, next_priv, &ufile->umaps,
|
|
|
|
list) {
|
|
|
|
struct vm_area_struct *vma = priv->vma;
|
|
|
|
|
|
|
|
if (vma->vm_mm != mm)
|
|
|
|
continue;
|
|
|
|
list_del_init(&priv->list);
|
|
|
|
|
|
|
|
zap_vma_ptes(vma, vma->vm_start,
|
|
|
|
vma->vm_end - vma->vm_start);
|
|
|
|
}
|
|
|
|
mutex_unlock(&ufile->umap_lock);
|
2019-04-19 08:50:52 +08:00
|
|
|
skip_mm:
|
2019-04-16 19:07:28 +08:00
|
|
|
up_read(&mm->mmap_sem);
|
2018-09-17 01:43:08 +08:00
|
|
|
mmput(mm);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-06-28 05:43:20 +08:00
|
|
|
/*
|
|
|
|
* ib_uverbs_open() does not need the BKL:
|
|
|
|
*
|
2010-02-03 03:07:54 +08:00
|
|
|
* - the ib_uverbs_device structures are properly reference counted and
|
2008-06-28 05:43:20 +08:00
|
|
|
* everything else is purely local to the file being created, so
|
|
|
|
* races against other open calls are not a problem;
|
|
|
|
* - there is no ioctl method to race against;
|
2010-02-03 03:07:54 +08:00
|
|
|
* - the open method will either immediately run -ENXIO, or all
|
|
|
|
* required initialization will be done.
|
2008-06-28 05:43:20 +08:00
|
|
|
*/
|
2005-07-08 08:57:13 +08:00
|
|
|
static int ib_uverbs_open(struct inode *inode, struct file *filp)
|
|
|
|
{
|
2005-10-29 06:38:26 +08:00
|
|
|
struct ib_uverbs_device *dev;
|
2005-07-08 08:57:13 +08:00
|
|
|
struct ib_uverbs_file *file;
|
2015-08-13 23:32:05 +08:00
|
|
|
struct ib_device *ib_dev;
|
2005-10-29 06:38:26 +08:00
|
|
|
int ret;
|
2015-08-13 23:32:05 +08:00
|
|
|
int module_dependent;
|
|
|
|
int srcu_key;
|
2005-07-08 08:57:13 +08:00
|
|
|
|
2010-02-03 03:07:54 +08:00
|
|
|
dev = container_of(inode->i_cdev, struct ib_uverbs_device, cdev);
|
2015-08-13 23:32:03 +08:00
|
|
|
if (!atomic_inc_not_zero(&dev->refcount))
|
2005-10-29 06:38:26 +08:00
|
|
|
return -ENXIO;
|
|
|
|
|
2018-09-05 14:47:59 +08:00
|
|
|
get_device(&dev->dev);
|
2015-08-13 23:32:05 +08:00
|
|
|
srcu_key = srcu_read_lock(&dev->disassociate_srcu);
|
|
|
|
mutex_lock(&dev->lists_mutex);
|
|
|
|
ib_dev = srcu_dereference(dev->ib_dev,
|
|
|
|
&dev->disassociate_srcu);
|
|
|
|
if (!ib_dev) {
|
|
|
|
ret = -EIO;
|
2005-10-29 06:38:26 +08:00
|
|
|
goto err;
|
|
|
|
}
|
2005-07-08 08:57:13 +08:00
|
|
|
|
2015-08-13 23:32:05 +08:00
|
|
|
/* In case IB device supports disassociate ucontext, there is no hard
|
|
|
|
* dependency between uverbs device and its low level device.
|
|
|
|
*/
|
2018-12-11 03:09:48 +08:00
|
|
|
module_dependent = !(ib_dev->ops.disassociate_ucontext);
|
2015-08-13 23:32:05 +08:00
|
|
|
|
|
|
|
if (module_dependent) {
|
|
|
|
if (!try_module_get(ib_dev->owner)) {
|
|
|
|
ret = -ENODEV;
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
file = kzalloc(sizeof(*file), GFP_KERNEL);
|
2005-09-27 04:01:03 +08:00
|
|
|
if (!file) {
|
2005-10-29 06:38:26 +08:00
|
|
|
ret = -ENOMEM;
|
2015-08-13 23:32:05 +08:00
|
|
|
if (module_dependent)
|
|
|
|
goto err_module;
|
|
|
|
|
|
|
|
goto err;
|
2005-09-27 04:01:03 +08:00
|
|
|
}
|
2005-07-08 08:57:13 +08:00
|
|
|
|
2005-10-29 06:38:26 +08:00
|
|
|
file->device = dev;
|
2005-07-08 08:57:13 +08:00
|
|
|
kref_init(&file->ref);
|
2018-07-11 10:55:19 +08:00
|
|
|
mutex_init(&file->ucontext_lock);
|
2005-07-08 08:57:13 +08:00
|
|
|
|
2018-07-11 10:55:18 +08:00
|
|
|
spin_lock_init(&file->uobjects_lock);
|
2018-07-04 16:32:07 +08:00
|
|
|
INIT_LIST_HEAD(&file->uobjects);
|
2018-07-11 10:55:18 +08:00
|
|
|
init_rwsem(&file->hw_destroy_rwsem);
|
2018-09-17 01:43:08 +08:00
|
|
|
mutex_init(&file->umap_lock);
|
|
|
|
INIT_LIST_HEAD(&file->umaps);
|
2018-07-04 16:32:07 +08:00
|
|
|
|
2005-07-08 08:57:13 +08:00
|
|
|
filp->private_data = file;
|
2015-08-13 23:32:05 +08:00
|
|
|
list_add_tail(&file->list, &dev->uverbs_file_list);
|
|
|
|
mutex_unlock(&dev->lists_mutex);
|
|
|
|
srcu_read_unlock(&dev->disassociate_srcu, srcu_key);
|
2005-07-08 08:57:13 +08:00
|
|
|
|
2018-07-26 11:40:20 +08:00
|
|
|
setup_ufile_idr_uobject(file);
|
|
|
|
|
2010-04-10 08:13:50 +08:00
|
|
|
return nonseekable_open(inode, filp);
|
2005-10-29 06:38:26 +08:00
|
|
|
|
|
|
|
err_module:
|
2015-08-13 23:32:05 +08:00
|
|
|
module_put(ib_dev->owner);
|
2005-10-29 06:38:26 +08:00
|
|
|
|
|
|
|
err:
|
2015-08-13 23:32:05 +08:00
|
|
|
mutex_unlock(&dev->lists_mutex);
|
|
|
|
srcu_read_unlock(&dev->disassociate_srcu, srcu_key);
|
2015-08-13 23:32:03 +08:00
|
|
|
if (atomic_dec_and_test(&dev->refcount))
|
|
|
|
ib_uverbs_comp_dev(dev);
|
|
|
|
|
2018-09-05 14:47:59 +08:00
|
|
|
put_device(&dev->dev);
|
2005-10-29 06:38:26 +08:00
|
|
|
return ret;
|
2005-07-08 08:57:13 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int ib_uverbs_close(struct inode *inode, struct file *filp)
|
|
|
|
{
|
|
|
|
struct ib_uverbs_file *file = filp->private_data;
|
2016-07-03 20:28:18 +08:00
|
|
|
|
2018-07-11 10:55:19 +08:00
|
|
|
uverbs_destroy_ufile_hw(file, RDMA_REMOVE_CLOSE);
|
2015-08-13 23:32:05 +08:00
|
|
|
|
|
|
|
mutex_lock(&file->device->lists_mutex);
|
2018-09-17 01:37:38 +08:00
|
|
|
list_del_init(&file->list);
|
2015-08-13 23:32:05 +08:00
|
|
|
mutex_unlock(&file->device->lists_mutex);
|
2005-10-29 06:38:26 +08:00
|
|
|
|
2005-07-08 08:57:13 +08:00
|
|
|
kref_put(&file->ref, ib_uverbs_release_file);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2007-02-12 16:55:32 +08:00
|
|
|
static const struct file_operations uverbs_fops = {
|
2010-02-03 03:08:19 +08:00
|
|
|
.owner = THIS_MODULE,
|
|
|
|
.write = ib_uverbs_write,
|
|
|
|
.open = ib_uverbs_open,
|
2010-04-10 08:13:50 +08:00
|
|
|
.release = ib_uverbs_close,
|
|
|
|
.llseek = no_llseek,
|
2017-08-03 21:07:07 +08:00
|
|
|
.unlocked_ioctl = ib_uverbs_ioctl,
|
2018-02-13 18:18:34 +08:00
|
|
|
.compat_ioctl = ib_uverbs_ioctl,
|
2005-07-08 08:57:13 +08:00
|
|
|
};
|
|
|
|
|
2007-02-12 16:55:32 +08:00
|
|
|
static const struct file_operations uverbs_mmap_fops = {
|
2010-02-03 03:08:19 +08:00
|
|
|
.owner = THIS_MODULE,
|
|
|
|
.write = ib_uverbs_write,
|
2005-07-08 08:57:13 +08:00
|
|
|
.mmap = ib_uverbs_mmap,
|
2010-02-03 03:08:19 +08:00
|
|
|
.open = ib_uverbs_open,
|
2010-04-10 08:13:50 +08:00
|
|
|
.release = ib_uverbs_close,
|
|
|
|
.llseek = no_llseek,
|
2017-08-03 21:07:07 +08:00
|
|
|
.unlocked_ioctl = ib_uverbs_ioctl,
|
2018-02-13 18:18:34 +08:00
|
|
|
.compat_ioctl = ib_uverbs_ioctl,
|
2005-07-08 08:57:13 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
static struct ib_client uverbs_client = {
|
|
|
|
.name = "uverbs",
|
2019-01-22 16:08:22 +08:00
|
|
|
.no_kverbs_req = true,
|
2005-07-08 08:57:13 +08:00
|
|
|
.add = ib_uverbs_add_one,
|
|
|
|
.remove = ib_uverbs_remove_one
|
|
|
|
};
|
|
|
|
|
2018-09-05 14:48:00 +08:00
|
|
|
static ssize_t ibdev_show(struct device *device, struct device_attribute *attr,
|
2008-02-22 07:13:36 +08:00
|
|
|
char *buf)
|
2005-07-08 08:57:13 +08:00
|
|
|
{
|
2018-09-05 14:47:59 +08:00
|
|
|
struct ib_uverbs_device *dev =
|
|
|
|
container_of(device, struct ib_uverbs_device, dev);
|
2015-08-13 23:32:05 +08:00
|
|
|
int ret = -ENODEV;
|
|
|
|
int srcu_key;
|
|
|
|
struct ib_device *ib_dev;
|
2005-10-29 06:38:26 +08:00
|
|
|
|
2015-08-13 23:32:05 +08:00
|
|
|
srcu_key = srcu_read_lock(&dev->disassociate_srcu);
|
|
|
|
ib_dev = srcu_dereference(dev->ib_dev, &dev->disassociate_srcu);
|
|
|
|
if (ib_dev)
|
2018-09-21 06:42:25 +08:00
|
|
|
ret = sprintf(buf, "%s\n", dev_name(&ib_dev->dev));
|
2015-08-13 23:32:05 +08:00
|
|
|
srcu_read_unlock(&dev->disassociate_srcu, srcu_key);
|
|
|
|
|
|
|
|
return ret;
|
2005-07-08 08:57:13 +08:00
|
|
|
}
|
2018-09-05 14:48:00 +08:00
|
|
|
static DEVICE_ATTR_RO(ibdev);
|
2005-07-08 08:57:13 +08:00
|
|
|
|
2018-09-05 14:48:00 +08:00
|
|
|
static ssize_t abi_version_show(struct device *device,
|
|
|
|
struct device_attribute *attr, char *buf)
|
2005-09-30 05:17:48 +08:00
|
|
|
{
|
2018-09-05 14:47:59 +08:00
|
|
|
struct ib_uverbs_device *dev =
|
|
|
|
container_of(device, struct ib_uverbs_device, dev);
|
2015-08-13 23:32:05 +08:00
|
|
|
int ret = -ENODEV;
|
|
|
|
int srcu_key;
|
|
|
|
struct ib_device *ib_dev;
|
2005-10-29 06:38:26 +08:00
|
|
|
|
2015-08-13 23:32:05 +08:00
|
|
|
srcu_key = srcu_read_lock(&dev->disassociate_srcu);
|
|
|
|
ib_dev = srcu_dereference(dev->ib_dev, &dev->disassociate_srcu);
|
|
|
|
if (ib_dev)
|
|
|
|
ret = sprintf(buf, "%d\n", ib_dev->uverbs_abi_ver);
|
|
|
|
srcu_read_unlock(&dev->disassociate_srcu, srcu_key);
|
2005-09-30 05:17:48 +08:00
|
|
|
|
2015-08-13 23:32:05 +08:00
|
|
|
return ret;
|
2005-09-30 05:17:48 +08:00
|
|
|
}
|
2018-09-05 14:48:00 +08:00
|
|
|
static DEVICE_ATTR_RO(abi_version);
|
|
|
|
|
|
|
|
static struct attribute *ib_dev_attrs[] = {
|
|
|
|
&dev_attr_abi_version.attr,
|
|
|
|
&dev_attr_ibdev.attr,
|
|
|
|
NULL,
|
|
|
|
};
|
|
|
|
|
|
|
|
static const struct attribute_group dev_attr_group = {
|
|
|
|
.attrs = ib_dev_attrs,
|
|
|
|
};
|
2005-09-30 05:17:48 +08:00
|
|
|
|
2010-01-05 19:48:09 +08:00
|
|
|
static CLASS_ATTR_STRING(abi_version, S_IRUGO,
|
|
|
|
__stringify(IB_USER_VERBS_ABI_VERSION));
|
2005-07-08 08:57:13 +08:00
|
|
|
|
2018-08-10 10:14:35 +08:00
|
|
|
static int ib_uverbs_create_uapi(struct ib_device *device,
|
|
|
|
struct ib_uverbs_device *uverbs_dev)
|
|
|
|
{
|
2018-08-10 10:14:36 +08:00
|
|
|
struct uverbs_api *uapi;
|
2018-08-10 10:14:35 +08:00
|
|
|
|
2018-11-13 04:59:52 +08:00
|
|
|
uapi = uverbs_alloc_api(device);
|
2018-08-10 10:14:43 +08:00
|
|
|
if (IS_ERR(uapi))
|
2018-08-10 10:14:36 +08:00
|
|
|
return PTR_ERR(uapi);
|
|
|
|
|
|
|
|
uverbs_dev->uapi = uapi;
|
2018-08-10 10:14:35 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2005-07-08 08:57:13 +08:00
|
|
|
static void ib_uverbs_add_one(struct ib_device *device)
|
|
|
|
{
|
2010-02-03 03:07:59 +08:00
|
|
|
int devnum;
|
2010-02-03 03:08:04 +08:00
|
|
|
dev_t base;
|
2005-07-08 08:57:13 +08:00
|
|
|
struct ib_uverbs_device *uverbs_dev;
|
2015-08-13 23:32:05 +08:00
|
|
|
int ret;
|
2005-07-08 08:57:13 +08:00
|
|
|
|
2018-12-11 03:09:48 +08:00
|
|
|
if (!device->ops.alloc_ucontext)
|
2005-07-08 08:57:13 +08:00
|
|
|
return;
|
|
|
|
|
2018-02-22 00:12:32 +08:00
|
|
|
uverbs_dev = kzalloc(sizeof(*uverbs_dev), GFP_KERNEL);
|
2005-07-08 08:57:13 +08:00
|
|
|
if (!uverbs_dev)
|
|
|
|
return;
|
|
|
|
|
2015-08-13 23:32:05 +08:00
|
|
|
ret = init_srcu_struct(&uverbs_dev->disassociate_srcu);
|
|
|
|
if (ret) {
|
|
|
|
kfree(uverbs_dev);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2018-09-18 05:44:46 +08:00
|
|
|
device_initialize(&uverbs_dev->dev);
|
|
|
|
uverbs_dev->dev.class = uverbs_class;
|
|
|
|
uverbs_dev->dev.parent = device->dev.parent;
|
|
|
|
uverbs_dev->dev.release = ib_uverbs_release_dev;
|
|
|
|
uverbs_dev->groups[0] = &dev_attr_group;
|
|
|
|
uverbs_dev->dev.groups = uverbs_dev->groups;
|
2015-08-13 23:32:03 +08:00
|
|
|
atomic_set(&uverbs_dev->refcount, 1);
|
2006-08-04 01:56:42 +08:00
|
|
|
init_completion(&uverbs_dev->comp);
|
2011-05-24 23:33:46 +08:00
|
|
|
uverbs_dev->xrcd_tree = RB_ROOT;
|
|
|
|
mutex_init(&uverbs_dev->xrcd_tree_mutex);
|
2015-08-13 23:32:05 +08:00
|
|
|
mutex_init(&uverbs_dev->lists_mutex);
|
|
|
|
INIT_LIST_HEAD(&uverbs_dev->uverbs_file_list);
|
|
|
|
INIT_LIST_HEAD(&uverbs_dev->uverbs_events_file_list);
|
2018-09-18 05:44:46 +08:00
|
|
|
rcu_assign_pointer(uverbs_dev->ib_dev, device);
|
|
|
|
uverbs_dev->num_comp_vectors = device->num_comp_vectors;
|
2005-10-29 06:38:26 +08:00
|
|
|
|
2018-10-02 16:13:29 +08:00
|
|
|
devnum = ida_alloc_max(&uverbs_ida, IB_UVERBS_MAX_DEVICES - 1,
|
|
|
|
GFP_KERNEL);
|
|
|
|
if (devnum < 0)
|
2018-01-08 18:15:38 +08:00
|
|
|
goto err;
|
|
|
|
uverbs_dev->devnum = devnum;
|
|
|
|
if (devnum >= IB_UVERBS_NUM_FIXED_MINOR)
|
|
|
|
base = dynamic_uverbs_dev + devnum - IB_UVERBS_NUM_FIXED_MINOR;
|
|
|
|
else
|
|
|
|
base = IB_UVERBS_BASE_DEV + devnum;
|
2005-07-08 08:57:13 +08:00
|
|
|
|
2018-08-10 10:14:35 +08:00
|
|
|
if (ib_uverbs_create_uapi(device, uverbs_dev))
|
2018-09-05 14:47:57 +08:00
|
|
|
goto err_uapi;
|
2018-08-10 10:14:35 +08:00
|
|
|
|
2018-09-05 14:47:59 +08:00
|
|
|
uverbs_dev->dev.devt = base;
|
|
|
|
dev_set_name(&uverbs_dev->dev, "uverbs%d", uverbs_dev->devnum);
|
|
|
|
|
|
|
|
cdev_init(&uverbs_dev->cdev,
|
2018-12-11 03:09:48 +08:00
|
|
|
device->ops.mmap ? &uverbs_mmap_fops : &uverbs_fops);
|
2010-02-03 03:07:49 +08:00
|
|
|
uverbs_dev->cdev.owner = THIS_MODULE;
|
2005-07-08 08:57:13 +08:00
|
|
|
|
2018-09-05 14:47:59 +08:00
|
|
|
ret = cdev_device_add(&uverbs_dev->cdev, &uverbs_dev->dev);
|
|
|
|
if (ret)
|
2018-09-18 05:44:46 +08:00
|
|
|
goto err_uapi;
|
2005-07-08 08:57:13 +08:00
|
|
|
|
|
|
|
ib_set_client_data(device, &uverbs_client, uverbs_dev);
|
|
|
|
return;
|
|
|
|
|
2018-09-05 14:47:57 +08:00
|
|
|
err_uapi:
|
2018-10-02 16:13:29 +08:00
|
|
|
ida_free(&uverbs_ida, devnum);
|
2005-07-08 08:57:13 +08:00
|
|
|
err:
|
2015-08-13 23:32:03 +08:00
|
|
|
if (atomic_dec_and_test(&uverbs_dev->refcount))
|
|
|
|
ib_uverbs_comp_dev(uverbs_dev);
|
2006-08-04 01:56:42 +08:00
|
|
|
wait_for_completion(&uverbs_dev->comp);
|
2018-09-18 05:44:46 +08:00
|
|
|
put_device(&uverbs_dev->dev);
|
2005-07-08 08:57:13 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2015-08-13 23:32:05 +08:00
|
|
|
static void ib_uverbs_free_hw_resources(struct ib_uverbs_device *uverbs_dev,
|
|
|
|
struct ib_device *ib_dev)
|
|
|
|
{
|
|
|
|
struct ib_uverbs_file *file;
|
2017-04-04 18:31:47 +08:00
|
|
|
struct ib_uverbs_async_event_file *event_file;
|
2015-08-13 23:32:05 +08:00
|
|
|
struct ib_event event;
|
|
|
|
|
|
|
|
/* Pending running commands to terminate */
|
2018-08-10 10:14:36 +08:00
|
|
|
uverbs_disassociate_api_pre(uverbs_dev);
|
2015-08-13 23:32:05 +08:00
|
|
|
event.event = IB_EVENT_DEVICE_FATAL;
|
|
|
|
event.element.port_num = 0;
|
|
|
|
event.device = ib_dev;
|
|
|
|
|
|
|
|
mutex_lock(&uverbs_dev->lists_mutex);
|
|
|
|
while (!list_empty(&uverbs_dev->uverbs_file_list)) {
|
|
|
|
file = list_first_entry(&uverbs_dev->uverbs_file_list,
|
|
|
|
struct ib_uverbs_file, list);
|
2018-09-17 01:37:38 +08:00
|
|
|
list_del_init(&file->list);
|
2015-08-13 23:32:05 +08:00
|
|
|
kref_get(&file->ref);
|
2016-07-03 20:28:18 +08:00
|
|
|
|
2018-07-11 10:55:19 +08:00
|
|
|
/* We must release the mutex before going ahead and calling
|
|
|
|
* uverbs_cleanup_ufile, as it might end up indirectly calling
|
|
|
|
* uverbs_close, for example due to freeing the resources (e.g
|
|
|
|
* mmput).
|
2016-07-03 20:28:18 +08:00
|
|
|
*/
|
2018-07-11 10:55:19 +08:00
|
|
|
mutex_unlock(&uverbs_dev->lists_mutex);
|
2015-08-13 23:32:05 +08:00
|
|
|
|
2018-07-11 10:55:19 +08:00
|
|
|
ib_uverbs_event_handler(&file->event_handler, &event);
|
|
|
|
uverbs_destroy_ufile_hw(file, RDMA_REMOVE_DRIVER_REMOVE);
|
2015-08-13 23:32:05 +08:00
|
|
|
kref_put(&file->ref, ib_uverbs_release_file);
|
2018-07-11 10:55:19 +08:00
|
|
|
|
|
|
|
mutex_lock(&uverbs_dev->lists_mutex);
|
2015-08-13 23:32:05 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
while (!list_empty(&uverbs_dev->uverbs_events_file_list)) {
|
|
|
|
event_file = list_first_entry(&uverbs_dev->
|
|
|
|
uverbs_events_file_list,
|
2017-04-04 18:31:47 +08:00
|
|
|
struct ib_uverbs_async_event_file,
|
2015-08-13 23:32:05 +08:00
|
|
|
list);
|
2017-04-18 17:03:42 +08:00
|
|
|
spin_lock_irq(&event_file->ev_queue.lock);
|
|
|
|
event_file->ev_queue.is_closed = 1;
|
|
|
|
spin_unlock_irq(&event_file->ev_queue.lock);
|
2015-08-13 23:32:05 +08:00
|
|
|
|
|
|
|
list_del(&event_file->list);
|
2017-04-04 18:31:47 +08:00
|
|
|
ib_unregister_event_handler(
|
|
|
|
&event_file->uverbs_file->event_handler);
|
|
|
|
event_file->uverbs_file->event_handler.device =
|
|
|
|
NULL;
|
2015-08-13 23:32:05 +08:00
|
|
|
|
2017-04-18 17:03:42 +08:00
|
|
|
wake_up_interruptible(&event_file->ev_queue.poll_wait);
|
|
|
|
kill_fasync(&event_file->ev_queue.async_queue, SIGIO, POLL_IN);
|
2015-08-13 23:32:05 +08:00
|
|
|
}
|
|
|
|
mutex_unlock(&uverbs_dev->lists_mutex);
|
2018-08-10 10:14:36 +08:00
|
|
|
|
|
|
|
uverbs_disassociate_api(uverbs_dev->uapi);
|
2015-08-13 23:32:05 +08:00
|
|
|
}
|
|
|
|
|
2015-07-30 22:50:14 +08:00
|
|
|
static void ib_uverbs_remove_one(struct ib_device *device, void *client_data)
|
2005-07-08 08:57:13 +08:00
|
|
|
{
|
2015-07-30 22:50:14 +08:00
|
|
|
struct ib_uverbs_device *uverbs_dev = client_data;
|
2015-08-13 23:32:05 +08:00
|
|
|
int wait_clients = 1;
|
2005-07-08 08:57:13 +08:00
|
|
|
|
|
|
|
if (!uverbs_dev)
|
|
|
|
return;
|
|
|
|
|
2018-09-05 14:47:59 +08:00
|
|
|
cdev_device_del(&uverbs_dev->cdev, &uverbs_dev->dev);
|
2018-10-02 16:13:29 +08:00
|
|
|
ida_free(&uverbs_ida, uverbs_dev->devnum);
|
2006-08-04 01:56:42 +08:00
|
|
|
|
2018-12-11 03:09:48 +08:00
|
|
|
if (device->ops.disassociate_ucontext) {
|
2015-08-13 23:32:05 +08:00
|
|
|
/* We disassociate HW resources and immediately return.
|
|
|
|
* Userspace will see a EIO errno for all future access.
|
|
|
|
* Upon returning, ib_device may be freed internally and is not
|
|
|
|
* valid any more.
|
|
|
|
* uverbs_device is still available until all clients close
|
|
|
|
* their files, then the uverbs device ref count will be zero
|
|
|
|
* and its resources will be freed.
|
|
|
|
* Note: At this point no more files can be opened since the
|
|
|
|
* cdev was deleted, however active clients can still issue
|
|
|
|
* commands and close their open files.
|
|
|
|
*/
|
|
|
|
ib_uverbs_free_hw_resources(uverbs_dev, device);
|
|
|
|
wait_clients = 0;
|
|
|
|
}
|
|
|
|
|
2015-08-13 23:32:03 +08:00
|
|
|
if (atomic_dec_and_test(&uverbs_dev->refcount))
|
|
|
|
ib_uverbs_comp_dev(uverbs_dev);
|
2015-08-13 23:32:05 +08:00
|
|
|
if (wait_clients)
|
|
|
|
wait_for_completion(&uverbs_dev->comp);
|
2017-08-03 21:07:06 +08:00
|
|
|
|
2018-09-05 14:47:59 +08:00
|
|
|
put_device(&uverbs_dev->dev);
|
2005-07-08 08:57:13 +08:00
|
|
|
}
|
|
|
|
|
2011-07-24 08:24:48 +08:00
|
|
|
static char *uverbs_devnode(struct device *dev, umode_t *mode)
|
2011-05-24 02:10:05 +08:00
|
|
|
{
|
2011-07-05 00:26:57 +08:00
|
|
|
if (mode)
|
|
|
|
*mode = 0666;
|
2011-05-24 02:10:05 +08:00
|
|
|
return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev));
|
|
|
|
}
|
|
|
|
|
2005-07-08 08:57:13 +08:00
|
|
|
static int __init ib_uverbs_init(void)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
2018-01-08 18:15:38 +08:00
|
|
|
ret = register_chrdev_region(IB_UVERBS_BASE_DEV,
|
|
|
|
IB_UVERBS_NUM_FIXED_MINOR,
|
2005-07-08 08:57:13 +08:00
|
|
|
"infiniband_verbs");
|
|
|
|
if (ret) {
|
2016-03-02 03:20:29 +08:00
|
|
|
pr_err("user_verbs: couldn't register device number\n");
|
2005-07-08 08:57:13 +08:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2018-01-08 18:15:38 +08:00
|
|
|
ret = alloc_chrdev_region(&dynamic_uverbs_dev, 0,
|
|
|
|
IB_UVERBS_NUM_DYNAMIC_MINOR,
|
|
|
|
"infiniband_verbs");
|
|
|
|
if (ret) {
|
|
|
|
pr_err("couldn't register dynamic device number\n");
|
|
|
|
goto out_alloc;
|
|
|
|
}
|
|
|
|
|
2005-10-29 06:38:26 +08:00
|
|
|
uverbs_class = class_create(THIS_MODULE, "infiniband_verbs");
|
|
|
|
if (IS_ERR(uverbs_class)) {
|
|
|
|
ret = PTR_ERR(uverbs_class);
|
2016-03-02 03:20:29 +08:00
|
|
|
pr_err("user_verbs: couldn't create class infiniband_verbs\n");
|
2005-07-08 08:57:13 +08:00
|
|
|
goto out_chrdev;
|
|
|
|
}
|
|
|
|
|
2011-05-24 02:10:05 +08:00
|
|
|
uverbs_class->devnode = uverbs_devnode;
|
|
|
|
|
2010-01-05 19:48:09 +08:00
|
|
|
ret = class_create_file(uverbs_class, &class_attr_abi_version.attr);
|
2005-07-08 08:57:13 +08:00
|
|
|
if (ret) {
|
2016-03-02 03:20:29 +08:00
|
|
|
pr_err("user_verbs: couldn't create abi_version attribute\n");
|
2005-07-08 08:57:13 +08:00
|
|
|
goto out_class;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = ib_register_client(&uverbs_client);
|
|
|
|
if (ret) {
|
2016-03-02 03:20:29 +08:00
|
|
|
pr_err("user_verbs: couldn't register client\n");
|
2010-02-25 08:51:20 +08:00
|
|
|
goto out_class;
|
2005-07-08 08:57:13 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
out_class:
|
2005-10-29 06:38:26 +08:00
|
|
|
class_destroy(uverbs_class);
|
2005-07-08 08:57:13 +08:00
|
|
|
|
|
|
|
out_chrdev:
|
2018-01-08 18:15:38 +08:00
|
|
|
unregister_chrdev_region(dynamic_uverbs_dev,
|
|
|
|
IB_UVERBS_NUM_DYNAMIC_MINOR);
|
|
|
|
|
|
|
|
out_alloc:
|
|
|
|
unregister_chrdev_region(IB_UVERBS_BASE_DEV,
|
|
|
|
IB_UVERBS_NUM_FIXED_MINOR);
|
2005-07-08 08:57:13 +08:00
|
|
|
|
|
|
|
out:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __exit ib_uverbs_cleanup(void)
|
|
|
|
{
|
|
|
|
ib_unregister_client(&uverbs_client);
|
2005-10-29 06:38:26 +08:00
|
|
|
class_destroy(uverbs_class);
|
2018-01-08 18:15:38 +08:00
|
|
|
unregister_chrdev_region(IB_UVERBS_BASE_DEV,
|
|
|
|
IB_UVERBS_NUM_FIXED_MINOR);
|
|
|
|
unregister_chrdev_region(dynamic_uverbs_dev,
|
|
|
|
IB_UVERBS_NUM_DYNAMIC_MINOR);
|
2005-07-08 08:57:13 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
module_init(ib_uverbs_init);
|
|
|
|
module_exit(ib_uverbs_cleanup);
|