2013-09-10 11:38:16 +08:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2005 Topspin Communications. All rights reserved.
|
|
|
|
* Copyright (c) 2005 Mellanox Technologies. All rights reserved.
|
|
|
|
* Copyright (c) 2013 Cisco Systems. All rights reserved.
|
|
|
|
*
|
|
|
|
* This software is available to you under a choice of one of two
|
|
|
|
* licenses. You may choose to be licensed under the terms of the GNU
|
|
|
|
* General Public License (GPL) Version 2, available from the file
|
|
|
|
* COPYING in the main directory of this source tree, or the
|
2015-10-01 04:34:00 +08:00
|
|
|
* BSD license below:
|
2013-09-10 11:38:16 +08:00
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or
|
|
|
|
* without modification, are permitted provided that the following
|
|
|
|
* conditions are met:
|
|
|
|
*
|
|
|
|
* - Redistributions of source code must retain the above
|
|
|
|
* copyright notice, this list of conditions and the following
|
|
|
|
* disclaimer.
|
|
|
|
*
|
|
|
|
* - Redistributions in binary form must reproduce the above
|
|
|
|
* copyright notice, this list of conditions and the following
|
|
|
|
* disclaimer in the documentation and/or other materials
|
|
|
|
* provided with the distribution.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
|
|
|
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
|
|
|
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
|
|
|
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
|
|
|
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
|
|
|
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
|
|
|
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
|
|
* SOFTWARE.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/mm.h>
|
|
|
|
#include <linux/dma-mapping.h>
|
2017-02-09 01:51:30 +08:00
|
|
|
#include <linux/sched/signal.h>
|
2017-02-09 01:51:29 +08:00
|
|
|
#include <linux/sched/mm.h>
|
2013-09-10 11:38:16 +08:00
|
|
|
#include <linux/hugetlb.h>
|
|
|
|
#include <linux/iommu.h>
|
|
|
|
#include <linux/workqueue.h>
|
|
|
|
#include <linux/list.h>
|
|
|
|
#include <linux/pci.h>
|
2018-06-14 01:19:42 +08:00
|
|
|
#include <rdma/ib_verbs.h>
|
2013-09-10 11:38:16 +08:00
|
|
|
|
|
|
|
#include "usnic_log.h"
|
|
|
|
#include "usnic_uiom.h"
|
|
|
|
#include "usnic_uiom_interval_tree.h"
|
|
|
|
|
|
|
|
#define USNIC_UIOM_PAGE_CHUNK \
|
|
|
|
((PAGE_SIZE - offsetof(struct usnic_uiom_chunk, page_list)) /\
|
|
|
|
((void *) &((struct usnic_uiom_chunk *) 0)->page_list[1] - \
|
|
|
|
(void *) &((struct usnic_uiom_chunk *) 0)->page_list[0]))
|
|
|
|
|
|
|
|
static int usnic_uiom_dma_fault(struct iommu_domain *domain,
|
|
|
|
struct device *dev,
|
|
|
|
unsigned long iova, int flags,
|
|
|
|
void *token)
|
|
|
|
{
|
|
|
|
usnic_err("Device %s iommu fault domain 0x%pK va 0x%lx flags 0x%x\n",
|
|
|
|
dev_name(dev),
|
|
|
|
domain, iova, flags);
|
|
|
|
return -ENOSYS;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void usnic_uiom_put_pages(struct list_head *chunk_list, int dirty)
|
|
|
|
{
|
|
|
|
struct usnic_uiom_chunk *chunk, *tmp;
|
|
|
|
struct page *page;
|
2014-01-10 06:48:45 +08:00
|
|
|
struct scatterlist *sg;
|
2013-09-10 11:38:16 +08:00
|
|
|
int i;
|
|
|
|
dma_addr_t pa;
|
|
|
|
|
|
|
|
list_for_each_entry_safe(chunk, tmp, chunk_list, list) {
|
2014-01-10 06:48:45 +08:00
|
|
|
for_each_sg(chunk->page_list, sg, chunk->nents, i) {
|
|
|
|
page = sg_page(sg);
|
|
|
|
pa = sg_phys(sg);
|
2020-01-31 14:13:35 +08:00
|
|
|
unpin_user_pages_dirty_lock(&page, 1, dirty);
|
2013-09-10 11:38:16 +08:00
|
|
|
usnic_dbg("pa: %pa\n", &pa);
|
|
|
|
}
|
|
|
|
kfree(chunk);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
|
2018-09-17 01:44:46 +08:00
|
|
|
int dmasync, struct usnic_uiom_reg *uiomr)
|
2013-09-10 11:38:16 +08:00
|
|
|
{
|
2018-09-17 01:44:46 +08:00
|
|
|
struct list_head *chunk_list = &uiomr->chunk_list;
|
2013-09-10 11:38:16 +08:00
|
|
|
struct page **page_list;
|
2014-01-10 06:48:45 +08:00
|
|
|
struct scatterlist *sg;
|
2013-09-10 11:38:16 +08:00
|
|
|
struct usnic_uiom_chunk *chunk;
|
|
|
|
unsigned long locked;
|
|
|
|
unsigned long lock_limit;
|
|
|
|
unsigned long cur_base;
|
|
|
|
unsigned long npages;
|
|
|
|
int ret;
|
|
|
|
int off;
|
|
|
|
int i;
|
|
|
|
int flags;
|
|
|
|
dma_addr_t pa;
|
2016-10-13 08:20:16 +08:00
|
|
|
unsigned int gup_flags;
|
2018-09-17 01:44:46 +08:00
|
|
|
struct mm_struct *mm;
|
2013-09-10 11:38:16 +08:00
|
|
|
|
2018-06-14 01:19:42 +08:00
|
|
|
/*
|
|
|
|
* If the combination of the addr and size requested for this memory
|
|
|
|
* region causes an integer overflow, return error.
|
|
|
|
*/
|
|
|
|
if (((addr + size) < addr) || PAGE_ALIGN(addr + size) < (addr + size))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (!size)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2013-09-10 11:38:16 +08:00
|
|
|
if (!can_do_mlock())
|
|
|
|
return -EPERM;
|
|
|
|
|
|
|
|
INIT_LIST_HEAD(chunk_list);
|
|
|
|
|
|
|
|
page_list = (struct page **) __get_free_page(GFP_KERNEL);
|
|
|
|
if (!page_list)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
npages = PAGE_ALIGN(size + (addr & ~PAGE_MASK)) >> PAGE_SHIFT;
|
|
|
|
|
2018-09-17 01:44:46 +08:00
|
|
|
uiomr->owning_mm = mm = current->mm;
|
2019-02-07 01:59:19 +08:00
|
|
|
down_read(&mm->mmap_sem);
|
2013-09-10 11:38:16 +08:00
|
|
|
|
2019-02-07 01:59:19 +08:00
|
|
|
locked = atomic64_add_return(npages, ¤t->mm->pinned_vm);
|
2013-09-10 11:38:16 +08:00
|
|
|
lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
|
|
|
|
|
|
|
|
if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
flags = IOMMU_READ | IOMMU_CACHE;
|
|
|
|
flags |= (writable) ? IOMMU_WRITE : 0;
|
2016-10-13 08:20:16 +08:00
|
|
|
gup_flags = FOLL_WRITE;
|
|
|
|
gup_flags |= (writable) ? 0 : FOLL_FORCE;
|
2013-09-10 11:38:16 +08:00
|
|
|
cur_base = addr & PAGE_MASK;
|
|
|
|
ret = 0;
|
|
|
|
|
|
|
|
while (npages) {
|
2020-01-31 14:13:02 +08:00
|
|
|
ret = pin_user_pages(cur_base,
|
mm/gup: replace get_user_pages_longterm() with FOLL_LONGTERM
Pach series "Add FOLL_LONGTERM to GUP fast and use it".
HFI1, qib, and mthca, use get_user_pages_fast() due to its performance
advantages. These pages can be held for a significant time. But
get_user_pages_fast() does not protect against mapping FS DAX pages.
Introduce FOLL_LONGTERM and use this flag in get_user_pages_fast() which
retains the performance while also adding the FS DAX checks. XDP has also
shown interest in using this functionality.[1]
In addition we change get_user_pages() to use the new FOLL_LONGTERM flag
and remove the specialized get_user_pages_longterm call.
[1] https://lkml.org/lkml/2019/3/19/939
"longterm" is a relative thing and at this point is probably a misnomer.
This is really flagging a pin which is going to be given to hardware and
can't move. I've thought of a couple of alternative names but I think we
have to settle on if we are going to use FL_LAYOUT or something else to
solve the "longterm" problem. Then I think we can change the flag to a
better name.
Secondly, it depends on how often you are registering memory. I have
spoken with some RDMA users who consider MR in the performance path...
For the overall application performance. I don't have the numbers as the
tests for HFI1 were done a long time ago. But there was a significant
advantage. Some of which is probably due to the fact that you don't have
to hold mmap_sem.
Finally, architecturally I think it would be good for everyone to use
*_fast. There are patches submitted to the RDMA list which would allow
the use of *_fast (they reworking the use of mmap_sem) and as soon as they
are accepted I'll submit a patch to convert the RDMA core as well. Also
to this point others are looking to use *_fast.
As an aside, Jasons pointed out in my previous submission that *_fast and
*_unlocked look very much the same. I agree and I think further cleanup
will be coming. But I'm focused on getting the final solution for DAX at
the moment.
This patch (of 7):
This patch starts a series which aims to support FOLL_LONGTERM in
get_user_pages_fast(). Some callers who would like to do a longterm (user
controlled pin) of pages with the fast variant of GUP for performance
purposes.
Rather than have a separate get_user_pages_longterm() call, introduce
FOLL_LONGTERM and change the longterm callers to use it.
This patch does not change any functionality. In the short term
"longterm" or user controlled pins are unsafe for Filesystems and FS DAX
in particular has been blocked. However, callers of get_user_pages_fast()
were not "protected".
FOLL_LONGTERM can _only_ be supported with get_user_pages[_fast]() as it
requires vmas to determine if DAX is in use.
NOTE: In merging with the CMA changes we opt to change the
get_user_pages() call in check_and_migrate_cma_pages() to a call of
__get_user_pages_locked() on the newly migrated pages. This makes the
code read better in that we are calling __get_user_pages_locked() on the
pages before and after a potential migration.
As a side affect some of the interfaces are cleaned up but this is not the
primary purpose of the series.
In review[1] it was asked:
<quote>
> This I don't get - if you do lock down long term mappings performance
> of the actual get_user_pages call shouldn't matter to start with.
>
> What do I miss?
A couple of points.
First "longterm" is a relative thing and at this point is probably a
misnomer. This is really flagging a pin which is going to be given to
hardware and can't move. I've thought of a couple of alternative names
but I think we have to settle on if we are going to use FL_LAYOUT or
something else to solve the "longterm" problem. Then I think we can
change the flag to a better name.
Second, It depends on how often you are registering memory. I have spoken
with some RDMA users who consider MR in the performance path... For the
overall application performance. I don't have the numbers as the tests
for HFI1 were done a long time ago. But there was a significant
advantage. Some of which is probably due to the fact that you don't have
to hold mmap_sem.
Finally, architecturally I think it would be good for everyone to use
*_fast. There are patches submitted to the RDMA list which would allow
the use of *_fast (they reworking the use of mmap_sem) and as soon as they
are accepted I'll submit a patch to convert the RDMA core as well. Also
to this point others are looking to use *_fast.
As an asside, Jasons pointed out in my previous submission that *_fast and
*_unlocked look very much the same. I agree and I think further cleanup
will be coming. But I'm focused on getting the final solution for DAX at
the moment.
</quote>
[1] https://lore.kernel.org/lkml/20190220180255.GA12020@iweiny-DESK2.sc.intel.com/T/#md6abad2569f3bf6c1f03686c8097ab6563e94965
[ira.weiny@intel.com: v3]
Link: http://lkml.kernel.org/r/20190328084422.29911-2-ira.weiny@intel.com
Link: http://lkml.kernel.org/r/20190328084422.29911-2-ira.weiny@intel.com
Link: http://lkml.kernel.org/r/20190317183438.2057-2-ira.weiny@intel.com
Signed-off-by: Ira Weiny <ira.weiny@intel.com>
Reviewed-by: Andrew Morton <akpm@linux-foundation.org>
Cc: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Jason Gunthorpe <jgg@ziepe.ca>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Rich Felker <dalias@libc.org>
Cc: Yoshinori Sato <ysato@users.sourceforge.jp>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: James Hogan <jhogan@kernel.org>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Mike Marshall <hubcap@omnibond.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2019-05-14 08:17:03 +08:00
|
|
|
min_t(unsigned long, npages,
|
|
|
|
PAGE_SIZE / sizeof(struct page *)),
|
|
|
|
gup_flags | FOLL_LONGTERM,
|
|
|
|
page_list, NULL);
|
2013-09-10 11:38:16 +08:00
|
|
|
|
|
|
|
if (ret < 0)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
npages -= ret;
|
|
|
|
off = 0;
|
|
|
|
|
|
|
|
while (ret) {
|
2019-01-08 01:39:54 +08:00
|
|
|
chunk = kmalloc(struct_size(chunk, page_list,
|
|
|
|
min_t(int, ret, USNIC_UIOM_PAGE_CHUNK)),
|
2013-09-10 11:38:16 +08:00
|
|
|
GFP_KERNEL);
|
|
|
|
if (!chunk) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
chunk->nents = min_t(int, ret, USNIC_UIOM_PAGE_CHUNK);
|
|
|
|
sg_init_table(chunk->page_list, chunk->nents);
|
2014-01-10 06:48:45 +08:00
|
|
|
for_each_sg(chunk->page_list, sg, chunk->nents, i) {
|
|
|
|
sg_set_page(sg, page_list[i + off],
|
|
|
|
PAGE_SIZE, 0);
|
|
|
|
pa = sg_phys(sg);
|
2013-09-10 11:38:16 +08:00
|
|
|
usnic_dbg("va: 0x%lx pa: %pa\n",
|
|
|
|
cur_base + i*PAGE_SIZE, &pa);
|
|
|
|
}
|
|
|
|
cur_base += chunk->nents * PAGE_SIZE;
|
|
|
|
ret -= chunk->nents;
|
|
|
|
off += chunk->nents;
|
|
|
|
list_add_tail(&chunk->list, chunk_list);
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
out:
|
2019-02-07 01:59:19 +08:00
|
|
|
if (ret < 0) {
|
2013-09-10 11:38:16 +08:00
|
|
|
usnic_uiom_put_pages(chunk_list, 0);
|
2019-02-07 01:59:19 +08:00
|
|
|
atomic64_sub(npages, ¤t->mm->pinned_vm);
|
|
|
|
} else
|
2018-09-17 01:44:46 +08:00
|
|
|
mmgrab(uiomr->owning_mm);
|
2013-09-10 11:38:16 +08:00
|
|
|
|
2019-02-07 01:59:19 +08:00
|
|
|
up_read(&mm->mmap_sem);
|
2013-09-10 11:38:16 +08:00
|
|
|
free_page((unsigned long) page_list);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void usnic_uiom_unmap_sorted_intervals(struct list_head *intervals,
|
|
|
|
struct usnic_uiom_pd *pd)
|
|
|
|
{
|
|
|
|
struct usnic_uiom_interval_node *interval, *tmp;
|
|
|
|
long unsigned va, size;
|
|
|
|
|
|
|
|
list_for_each_entry_safe(interval, tmp, intervals, link) {
|
|
|
|
va = interval->start << PAGE_SHIFT;
|
|
|
|
size = ((interval->last - interval->start) + 1) << PAGE_SHIFT;
|
|
|
|
while (size > 0) {
|
|
|
|
/* Workaround for RH 970401 */
|
|
|
|
usnic_dbg("va 0x%lx size 0x%lx", va, PAGE_SIZE);
|
|
|
|
iommu_unmap(pd->domain, va, PAGE_SIZE);
|
|
|
|
va += PAGE_SIZE;
|
|
|
|
size -= PAGE_SIZE;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __usnic_uiom_reg_release(struct usnic_uiom_pd *pd,
|
|
|
|
struct usnic_uiom_reg *uiomr,
|
|
|
|
int dirty)
|
|
|
|
{
|
|
|
|
int npages;
|
|
|
|
unsigned long vpn_start, vpn_last;
|
|
|
|
struct usnic_uiom_interval_node *interval, *tmp;
|
|
|
|
int writable = 0;
|
|
|
|
LIST_HEAD(rm_intervals);
|
|
|
|
|
|
|
|
npages = PAGE_ALIGN(uiomr->length + uiomr->offset) >> PAGE_SHIFT;
|
|
|
|
vpn_start = (uiomr->va & PAGE_MASK) >> PAGE_SHIFT;
|
|
|
|
vpn_last = vpn_start + npages - 1;
|
|
|
|
|
|
|
|
spin_lock(&pd->lock);
|
2017-09-09 07:15:08 +08:00
|
|
|
usnic_uiom_remove_interval(&pd->root, vpn_start,
|
2013-09-10 11:38:16 +08:00
|
|
|
vpn_last, &rm_intervals);
|
|
|
|
usnic_uiom_unmap_sorted_intervals(&rm_intervals, pd);
|
|
|
|
|
|
|
|
list_for_each_entry_safe(interval, tmp, &rm_intervals, link) {
|
|
|
|
if (interval->flags & IOMMU_WRITE)
|
|
|
|
writable = 1;
|
|
|
|
list_del(&interval->link);
|
|
|
|
kfree(interval);
|
|
|
|
}
|
|
|
|
|
|
|
|
usnic_uiom_put_pages(&uiomr->chunk_list, dirty & writable);
|
|
|
|
spin_unlock(&pd->lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int usnic_uiom_map_sorted_intervals(struct list_head *intervals,
|
|
|
|
struct usnic_uiom_reg *uiomr)
|
|
|
|
{
|
|
|
|
int i, err;
|
|
|
|
size_t size;
|
|
|
|
struct usnic_uiom_chunk *chunk;
|
|
|
|
struct usnic_uiom_interval_node *interval_node;
|
|
|
|
dma_addr_t pa;
|
|
|
|
dma_addr_t pa_start = 0;
|
|
|
|
dma_addr_t pa_end = 0;
|
|
|
|
long int va_start = -EINVAL;
|
|
|
|
struct usnic_uiom_pd *pd = uiomr->pd;
|
|
|
|
long int va = uiomr->va & PAGE_MASK;
|
|
|
|
int flags = IOMMU_READ | IOMMU_CACHE;
|
|
|
|
|
|
|
|
flags |= (uiomr->writable) ? IOMMU_WRITE : 0;
|
|
|
|
chunk = list_first_entry(&uiomr->chunk_list, struct usnic_uiom_chunk,
|
|
|
|
list);
|
|
|
|
list_for_each_entry(interval_node, intervals, link) {
|
|
|
|
iter_chunk:
|
|
|
|
for (i = 0; i < chunk->nents; i++, va += PAGE_SIZE) {
|
|
|
|
pa = sg_phys(&chunk->page_list[i]);
|
|
|
|
if ((va >> PAGE_SHIFT) < interval_node->start)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if ((va >> PAGE_SHIFT) == interval_node->start) {
|
|
|
|
/* First page of the interval */
|
|
|
|
va_start = va;
|
|
|
|
pa_start = pa;
|
|
|
|
pa_end = pa;
|
|
|
|
}
|
|
|
|
|
|
|
|
WARN_ON(va_start == -EINVAL);
|
|
|
|
|
|
|
|
if ((pa_end + PAGE_SIZE != pa) &&
|
|
|
|
(pa != pa_start)) {
|
|
|
|
/* PAs are not contiguous */
|
|
|
|
size = pa_end - pa_start + PAGE_SIZE;
|
|
|
|
usnic_dbg("va 0x%lx pa %pa size 0x%zx flags 0x%x",
|
|
|
|
va_start, &pa_start, size, flags);
|
|
|
|
err = iommu_map(pd->domain, va_start, pa_start,
|
|
|
|
size, flags);
|
|
|
|
if (err) {
|
2014-02-18 20:54:27 +08:00
|
|
|
usnic_err("Failed to map va 0x%lx pa %pa size 0x%zx with err %d\n",
|
2013-09-10 11:38:16 +08:00
|
|
|
va_start, &pa_start, size, err);
|
|
|
|
goto err_out;
|
|
|
|
}
|
|
|
|
va_start = va;
|
|
|
|
pa_start = pa;
|
|
|
|
pa_end = pa;
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((va >> PAGE_SHIFT) == interval_node->last) {
|
|
|
|
/* Last page of the interval */
|
|
|
|
size = pa - pa_start + PAGE_SIZE;
|
|
|
|
usnic_dbg("va 0x%lx pa %pa size 0x%zx flags 0x%x\n",
|
|
|
|
va_start, &pa_start, size, flags);
|
|
|
|
err = iommu_map(pd->domain, va_start, pa_start,
|
|
|
|
size, flags);
|
|
|
|
if (err) {
|
|
|
|
usnic_err("Failed to map va 0x%lx pa %pa size 0x%zx with err %d\n",
|
|
|
|
va_start, &pa_start, size, err);
|
|
|
|
goto err_out;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (pa != pa_start)
|
|
|
|
pa_end += PAGE_SIZE;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (i == chunk->nents) {
|
|
|
|
/*
|
|
|
|
* Hit last entry of the chunk,
|
|
|
|
* hence advance to next chunk
|
|
|
|
*/
|
|
|
|
chunk = list_first_entry(&chunk->list,
|
|
|
|
struct usnic_uiom_chunk,
|
|
|
|
list);
|
|
|
|
goto iter_chunk;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_out:
|
|
|
|
usnic_uiom_unmap_sorted_intervals(intervals, pd);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct usnic_uiom_reg *usnic_uiom_reg_get(struct usnic_uiom_pd *pd,
|
|
|
|
unsigned long addr, size_t size,
|
|
|
|
int writable, int dmasync)
|
|
|
|
{
|
|
|
|
struct usnic_uiom_reg *uiomr;
|
|
|
|
unsigned long va_base, vpn_start, vpn_last;
|
|
|
|
unsigned long npages;
|
|
|
|
int offset, err;
|
|
|
|
LIST_HEAD(sorted_diff_intervals);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Intel IOMMU map throws an error if a translation entry is
|
|
|
|
* changed from read to write. This module may not unmap
|
|
|
|
* and then remap the entry after fixing the permission
|
|
|
|
* b/c this open up a small windows where hw DMA may page fault
|
|
|
|
* Hence, make all entries to be writable.
|
|
|
|
*/
|
|
|
|
writable = 1;
|
|
|
|
|
|
|
|
va_base = addr & PAGE_MASK;
|
|
|
|
offset = addr & ~PAGE_MASK;
|
|
|
|
npages = PAGE_ALIGN(size + offset) >> PAGE_SHIFT;
|
|
|
|
vpn_start = (addr & PAGE_MASK) >> PAGE_SHIFT;
|
|
|
|
vpn_last = vpn_start + npages - 1;
|
|
|
|
|
|
|
|
uiomr = kmalloc(sizeof(*uiomr), GFP_KERNEL);
|
|
|
|
if (!uiomr)
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
|
|
|
|
uiomr->va = va_base;
|
|
|
|
uiomr->offset = offset;
|
|
|
|
uiomr->length = size;
|
|
|
|
uiomr->writable = writable;
|
|
|
|
uiomr->pd = pd;
|
|
|
|
|
|
|
|
err = usnic_uiom_get_pages(addr, size, writable, dmasync,
|
2018-09-17 01:44:46 +08:00
|
|
|
uiomr);
|
2013-09-10 11:38:16 +08:00
|
|
|
if (err) {
|
|
|
|
usnic_err("Failed get_pages vpn [0x%lx,0x%lx] err %d\n",
|
|
|
|
vpn_start, vpn_last, err);
|
|
|
|
goto out_free_uiomr;
|
|
|
|
}
|
|
|
|
|
|
|
|
spin_lock(&pd->lock);
|
|
|
|
err = usnic_uiom_get_intervals_diff(vpn_start, vpn_last,
|
|
|
|
(writable) ? IOMMU_WRITE : 0,
|
|
|
|
IOMMU_WRITE,
|
2017-09-09 07:15:08 +08:00
|
|
|
&pd->root,
|
2013-09-10 11:38:16 +08:00
|
|
|
&sorted_diff_intervals);
|
|
|
|
if (err) {
|
|
|
|
usnic_err("Failed disjoint interval vpn [0x%lx,0x%lx] err %d\n",
|
|
|
|
vpn_start, vpn_last, err);
|
|
|
|
goto out_put_pages;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = usnic_uiom_map_sorted_intervals(&sorted_diff_intervals, uiomr);
|
|
|
|
if (err) {
|
|
|
|
usnic_err("Failed map interval vpn [0x%lx,0x%lx] err %d\n",
|
|
|
|
vpn_start, vpn_last, err);
|
|
|
|
goto out_put_intervals;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2017-09-09 07:15:08 +08:00
|
|
|
err = usnic_uiom_insert_interval(&pd->root, vpn_start, vpn_last,
|
2013-09-10 11:38:16 +08:00
|
|
|
(writable) ? IOMMU_WRITE : 0);
|
|
|
|
if (err) {
|
|
|
|
usnic_err("Failed insert interval vpn [0x%lx,0x%lx] err %d\n",
|
|
|
|
vpn_start, vpn_last, err);
|
|
|
|
goto out_unmap_intervals;
|
|
|
|
}
|
|
|
|
|
|
|
|
usnic_uiom_put_interval_set(&sorted_diff_intervals);
|
|
|
|
spin_unlock(&pd->lock);
|
|
|
|
|
|
|
|
return uiomr;
|
|
|
|
|
|
|
|
out_unmap_intervals:
|
|
|
|
usnic_uiom_unmap_sorted_intervals(&sorted_diff_intervals, pd);
|
|
|
|
out_put_intervals:
|
|
|
|
usnic_uiom_put_interval_set(&sorted_diff_intervals);
|
|
|
|
out_put_pages:
|
|
|
|
usnic_uiom_put_pages(&uiomr->chunk_list, 0);
|
|
|
|
spin_unlock(&pd->lock);
|
2018-09-17 01:44:46 +08:00
|
|
|
mmdrop(uiomr->owning_mm);
|
2013-09-10 11:38:16 +08:00
|
|
|
out_free_uiomr:
|
|
|
|
kfree(uiomr);
|
|
|
|
return ERR_PTR(err);
|
|
|
|
}
|
|
|
|
|
2018-09-17 01:44:46 +08:00
|
|
|
static void __usnic_uiom_release_tail(struct usnic_uiom_reg *uiomr)
|
2013-09-10 11:38:16 +08:00
|
|
|
{
|
2018-09-17 01:44:46 +08:00
|
|
|
mmdrop(uiomr->owning_mm);
|
|
|
|
kfree(uiomr);
|
|
|
|
}
|
2013-09-10 11:38:16 +08:00
|
|
|
|
2018-09-17 01:44:46 +08:00
|
|
|
static inline size_t usnic_uiom_num_pages(struct usnic_uiom_reg *uiomr)
|
|
|
|
{
|
|
|
|
return PAGE_ALIGN(uiomr->length + uiomr->offset) >> PAGE_SHIFT;
|
|
|
|
}
|
2013-09-10 11:38:16 +08:00
|
|
|
|
2019-04-01 00:10:06 +08:00
|
|
|
void usnic_uiom_reg_release(struct usnic_uiom_reg *uiomr)
|
2018-09-17 01:44:46 +08:00
|
|
|
{
|
|
|
|
__usnic_uiom_reg_release(uiomr->pd, uiomr, 1);
|
2013-09-10 11:38:16 +08:00
|
|
|
|
2019-02-07 01:59:15 +08:00
|
|
|
atomic64_sub(usnic_uiom_num_pages(uiomr), &uiomr->owning_mm->pinned_vm);
|
2018-09-17 01:44:46 +08:00
|
|
|
__usnic_uiom_release_tail(uiomr);
|
2013-09-10 11:38:16 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
struct usnic_uiom_pd *usnic_uiom_alloc_pd(void)
|
|
|
|
{
|
|
|
|
struct usnic_uiom_pd *pd;
|
|
|
|
void *domain;
|
|
|
|
|
|
|
|
pd = kzalloc(sizeof(*pd), GFP_KERNEL);
|
|
|
|
if (!pd)
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
|
|
|
|
pd->domain = domain = iommu_domain_alloc(&pci_bus_type);
|
2015-06-04 17:02:18 +08:00
|
|
|
if (!domain) {
|
|
|
|
usnic_err("Failed to allocate IOMMU domain");
|
2013-09-10 11:38:16 +08:00
|
|
|
kfree(pd);
|
2015-06-04 17:02:18 +08:00
|
|
|
return ERR_PTR(-ENOMEM);
|
2013-09-10 11:38:16 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
iommu_set_fault_handler(pd->domain, usnic_uiom_dma_fault, NULL);
|
|
|
|
|
|
|
|
spin_lock_init(&pd->lock);
|
|
|
|
INIT_LIST_HEAD(&pd->devs);
|
|
|
|
|
|
|
|
return pd;
|
|
|
|
}
|
|
|
|
|
|
|
|
void usnic_uiom_dealloc_pd(struct usnic_uiom_pd *pd)
|
|
|
|
{
|
|
|
|
iommu_domain_free(pd->domain);
|
|
|
|
kfree(pd);
|
|
|
|
}
|
|
|
|
|
|
|
|
int usnic_uiom_attach_dev_to_pd(struct usnic_uiom_pd *pd, struct device *dev)
|
|
|
|
{
|
|
|
|
struct usnic_uiom_dev *uiom_dev;
|
|
|
|
int err;
|
|
|
|
|
2014-01-20 18:32:49 +08:00
|
|
|
uiom_dev = kzalloc(sizeof(*uiom_dev), GFP_ATOMIC);
|
2013-09-10 11:38:16 +08:00
|
|
|
if (!uiom_dev)
|
|
|
|
return -ENOMEM;
|
|
|
|
uiom_dev->dev = dev;
|
|
|
|
|
|
|
|
err = iommu_attach_device(pd->domain, dev);
|
|
|
|
if (err)
|
|
|
|
goto out_free_dev;
|
|
|
|
|
2014-09-05 18:22:27 +08:00
|
|
|
if (!iommu_capable(dev->bus, IOMMU_CAP_CACHE_COHERENCY)) {
|
2013-09-10 11:38:16 +08:00
|
|
|
usnic_err("IOMMU of %s does not support cache coherency\n",
|
|
|
|
dev_name(dev));
|
|
|
|
err = -EINVAL;
|
|
|
|
goto out_detach_device;
|
|
|
|
}
|
|
|
|
|
|
|
|
spin_lock(&pd->lock);
|
|
|
|
list_add_tail(&uiom_dev->link, &pd->devs);
|
|
|
|
pd->dev_cnt++;
|
|
|
|
spin_unlock(&pd->lock);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
out_detach_device:
|
|
|
|
iommu_detach_device(pd->domain, dev);
|
|
|
|
out_free_dev:
|
|
|
|
kfree(uiom_dev);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
void usnic_uiom_detach_dev_from_pd(struct usnic_uiom_pd *pd, struct device *dev)
|
|
|
|
{
|
|
|
|
struct usnic_uiom_dev *uiom_dev;
|
|
|
|
int found = 0;
|
|
|
|
|
|
|
|
spin_lock(&pd->lock);
|
|
|
|
list_for_each_entry(uiom_dev, &pd->devs, link) {
|
|
|
|
if (uiom_dev->dev == dev) {
|
|
|
|
found = 1;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!found) {
|
|
|
|
usnic_err("Unable to free dev %s - not found\n",
|
|
|
|
dev_name(dev));
|
|
|
|
spin_unlock(&pd->lock);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
list_del(&uiom_dev->link);
|
|
|
|
pd->dev_cnt--;
|
|
|
|
spin_unlock(&pd->lock);
|
|
|
|
|
|
|
|
return iommu_detach_device(pd->domain, dev);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct device **usnic_uiom_get_dev_list(struct usnic_uiom_pd *pd)
|
|
|
|
{
|
|
|
|
struct usnic_uiom_dev *uiom_dev;
|
|
|
|
struct device **devs;
|
|
|
|
int i = 0;
|
|
|
|
|
|
|
|
spin_lock(&pd->lock);
|
|
|
|
devs = kcalloc(pd->dev_cnt + 1, sizeof(*devs), GFP_ATOMIC);
|
|
|
|
if (!devs) {
|
|
|
|
devs = ERR_PTR(-ENOMEM);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
list_for_each_entry(uiom_dev, &pd->devs, link) {
|
|
|
|
devs[i++] = uiom_dev->dev;
|
|
|
|
}
|
|
|
|
out:
|
|
|
|
spin_unlock(&pd->lock);
|
|
|
|
return devs;
|
|
|
|
}
|
|
|
|
|
|
|
|
void usnic_uiom_free_dev_list(struct device **devs)
|
|
|
|
{
|
|
|
|
kfree(devs);
|
|
|
|
}
|
|
|
|
|
|
|
|
int usnic_uiom_init(char *drv_name)
|
|
|
|
{
|
|
|
|
if (!iommu_present(&pci_bus_type)) {
|
|
|
|
usnic_err("IOMMU required but not present or enabled. USNIC QPs will not function w/o enabling IOMMU\n");
|
|
|
|
return -EPERM;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|