2019-06-01 16:08:42 +08:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-only */
|
2007-10-22 07:41:48 +08:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2006, Intel Corporation.
|
|
|
|
*
|
2008-02-24 07:23:35 +08:00
|
|
|
* Copyright (C) 2006-2008 Intel Corporation
|
|
|
|
* Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
|
2007-10-22 07:41:48 +08:00
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef _IOVA_H_
|
|
|
|
#define _IOVA_H_
|
|
|
|
|
|
|
|
#include <linux/types.h>
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/rbtree.h>
|
2021-12-20 20:34:48 +08:00
|
|
|
#include <linux/dma-mapping.h>
|
2007-10-22 07:41:48 +08:00
|
|
|
|
|
|
|
/* iova structure */
|
|
|
|
struct iova {
|
|
|
|
struct rb_node node;
|
iommu/iova: introduce per-cpu caching to iova allocation
IOVA allocation has two problems that impede high-throughput I/O.
First, it can do a linear search over the allocated IOVA ranges.
Second, the rbtree spinlock that serializes IOVA allocations becomes
contended.
Address these problems by creating an API for caching allocated IOVA
ranges, so that the IOVA allocator isn't accessed frequently. This
patch adds a per-CPU cache, from which CPUs can alloc/free IOVAs
without taking the rbtree spinlock. The per-CPU caches are backed by
a global cache, to avoid invoking the (linear-time) IOVA allocator
without needing to make the per-CPU cache size excessive. This design
is based on magazines, as described in "Magazines and Vmem: Extending
the Slab Allocator to Many CPUs and Arbitrary Resources" (currently
available at https://www.usenix.org/legacy/event/usenix01/bonwick.html)
Adding caching on top of the existing rbtree allocator maintains the
property that IOVAs are densely packed in the IO virtual address space,
which is important for keeping IOMMU page table usage low.
To keep the cache size reasonable, we bound the IOVA space a CPU can
cache by 32 MiB (we cache a bounded number of IOVA ranges, and only
ranges of size <= 128 KiB). The shared global cache is bounded at
4 MiB of IOVA space.
Signed-off-by: Omer Peleg <omer@cs.technion.ac.il>
[mad@cs.technion.ac.il: rebased, cleaned up and reworded the commit message]
Signed-off-by: Adam Morrison <mad@cs.technion.ac.il>
Reviewed-by: Shaohua Li <shli@fb.com>
Reviewed-by: Ben Serebrin <serebrin@google.com>
[dwmw2: split out VT-d part into a separate patch]
Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
2016-04-20 16:34:11 +08:00
|
|
|
unsigned long pfn_hi; /* Highest allocated pfn */
|
|
|
|
unsigned long pfn_lo; /* Lowest allocated pfn */
|
|
|
|
};
|
|
|
|
|
|
|
|
|
2022-02-03 17:59:20 +08:00
|
|
|
struct iova_rcache;
|
2007-10-22 07:41:48 +08:00
|
|
|
|
|
|
|
/* holds all the iova translations for a domain */
|
|
|
|
struct iova_domain {
|
|
|
|
spinlock_t iova_rbtree_lock; /* Lock to protect update of rbtree */
|
|
|
|
struct rb_root rbroot; /* iova domain rbtree root */
|
2017-09-21 23:52:44 +08:00
|
|
|
struct rb_node *cached_node; /* Save last alloced node */
|
|
|
|
struct rb_node *cached32_node; /* Save last 32-bit alloced node */
|
2015-01-13 01:51:16 +08:00
|
|
|
unsigned long granule; /* pfn granularity for this domain */
|
2015-01-13 01:51:15 +08:00
|
|
|
unsigned long start_pfn; /* Lower limit for this domain */
|
2008-02-06 17:36:23 +08:00
|
|
|
unsigned long dma_32bit_pfn;
|
2018-09-05 12:27:36 +08:00
|
|
|
unsigned long max32_alloc_size; /* Size of last failed allocation */
|
2017-09-21 23:52:46 +08:00
|
|
|
struct iova anchor; /* rbtree lookup anchor */
|
2017-08-10 20:44:28 +08:00
|
|
|
|
2022-02-03 17:59:20 +08:00
|
|
|
struct iova_rcache *rcaches;
|
2021-03-25 20:29:58 +08:00
|
|
|
struct hlist_node cpuhp_dead;
|
2007-10-22 07:41:48 +08:00
|
|
|
};
|
|
|
|
|
2014-07-11 14:19:36 +08:00
|
|
|
static inline unsigned long iova_size(struct iova *iova)
|
|
|
|
{
|
|
|
|
return iova->pfn_hi - iova->pfn_lo + 1;
|
|
|
|
}
|
|
|
|
|
2015-01-13 01:51:16 +08:00
|
|
|
static inline unsigned long iova_shift(struct iova_domain *iovad)
|
|
|
|
{
|
|
|
|
return __ffs(iovad->granule);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline unsigned long iova_mask(struct iova_domain *iovad)
|
|
|
|
{
|
|
|
|
return iovad->granule - 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline size_t iova_offset(struct iova_domain *iovad, dma_addr_t iova)
|
|
|
|
{
|
|
|
|
return iova & iova_mask(iovad);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline size_t iova_align(struct iova_domain *iovad, size_t size)
|
|
|
|
{
|
|
|
|
return ALIGN(size, iovad->granule);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline dma_addr_t iova_dma_addr(struct iova_domain *iovad, struct iova *iova)
|
|
|
|
{
|
|
|
|
return (dma_addr_t)iova->pfn_lo << iova_shift(iovad);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline unsigned long iova_pfn(struct iova_domain *iovad, dma_addr_t iova)
|
|
|
|
{
|
|
|
|
return iova >> iova_shift(iovad);
|
|
|
|
}
|
|
|
|
|
2022-09-13 19:47:20 +08:00
|
|
|
#if IS_REACHABLE(CONFIG_IOMMU_IOVA)
|
2015-07-13 19:31:28 +08:00
|
|
|
int iova_cache_get(void);
|
|
|
|
void iova_cache_put(void);
|
2015-01-13 01:51:14 +08:00
|
|
|
|
2022-07-14 19:15:25 +08:00
|
|
|
unsigned long iova_rcache_range(void);
|
|
|
|
|
2007-10-22 07:41:48 +08:00
|
|
|
void free_iova(struct iova_domain *iovad, unsigned long pfn);
|
|
|
|
void __free_iova(struct iova_domain *iovad, struct iova *iova);
|
|
|
|
struct iova *alloc_iova(struct iova_domain *iovad, unsigned long size,
|
2007-10-22 07:41:58 +08:00
|
|
|
unsigned long limit_pfn,
|
|
|
|
bool size_aligned);
|
iommu/iova: introduce per-cpu caching to iova allocation
IOVA allocation has two problems that impede high-throughput I/O.
First, it can do a linear search over the allocated IOVA ranges.
Second, the rbtree spinlock that serializes IOVA allocations becomes
contended.
Address these problems by creating an API for caching allocated IOVA
ranges, so that the IOVA allocator isn't accessed frequently. This
patch adds a per-CPU cache, from which CPUs can alloc/free IOVAs
without taking the rbtree spinlock. The per-CPU caches are backed by
a global cache, to avoid invoking the (linear-time) IOVA allocator
without needing to make the per-CPU cache size excessive. This design
is based on magazines, as described in "Magazines and Vmem: Extending
the Slab Allocator to Many CPUs and Arbitrary Resources" (currently
available at https://www.usenix.org/legacy/event/usenix01/bonwick.html)
Adding caching on top of the existing rbtree allocator maintains the
property that IOVAs are densely packed in the IO virtual address space,
which is important for keeping IOMMU page table usage low.
To keep the cache size reasonable, we bound the IOVA space a CPU can
cache by 32 MiB (we cache a bounded number of IOVA ranges, and only
ranges of size <= 128 KiB). The shared global cache is bounded at
4 MiB of IOVA space.
Signed-off-by: Omer Peleg <omer@cs.technion.ac.il>
[mad@cs.technion.ac.il: rebased, cleaned up and reworded the commit message]
Signed-off-by: Adam Morrison <mad@cs.technion.ac.il>
Reviewed-by: Shaohua Li <shli@fb.com>
Reviewed-by: Ben Serebrin <serebrin@google.com>
[dwmw2: split out VT-d part into a separate patch]
Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
2016-04-20 16:34:11 +08:00
|
|
|
void free_iova_fast(struct iova_domain *iovad, unsigned long pfn,
|
|
|
|
unsigned long size);
|
|
|
|
unsigned long alloc_iova_fast(struct iova_domain *iovad, unsigned long size,
|
2017-09-20 16:52:02 +08:00
|
|
|
unsigned long limit_pfn, bool flush_rcache);
|
2007-10-22 07:41:48 +08:00
|
|
|
struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo,
|
|
|
|
unsigned long pfn_hi);
|
2015-01-13 01:51:16 +08:00
|
|
|
void init_iova_domain(struct iova_domain *iovad, unsigned long granule,
|
2017-09-21 23:52:45 +08:00
|
|
|
unsigned long start_pfn);
|
2022-02-03 17:59:20 +08:00
|
|
|
int iova_domain_init_rcaches(struct iova_domain *iovad);
|
2007-10-22 07:41:48 +08:00
|
|
|
struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn);
|
|
|
|
void put_iova_domain(struct iova_domain *iovad);
|
2017-03-21 03:11:28 +08:00
|
|
|
#else
|
|
|
|
static inline int iova_cache_get(void)
|
|
|
|
{
|
|
|
|
return -ENOTSUPP;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void iova_cache_put(void)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void free_iova(struct iova_domain *iovad, unsigned long pfn)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void __free_iova(struct iova_domain *iovad, struct iova *iova)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline struct iova *alloc_iova(struct iova_domain *iovad,
|
|
|
|
unsigned long size,
|
|
|
|
unsigned long limit_pfn,
|
|
|
|
bool size_aligned)
|
|
|
|
{
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void free_iova_fast(struct iova_domain *iovad,
|
|
|
|
unsigned long pfn,
|
|
|
|
unsigned long size)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline unsigned long alloc_iova_fast(struct iova_domain *iovad,
|
|
|
|
unsigned long size,
|
2017-09-20 16:52:02 +08:00
|
|
|
unsigned long limit_pfn,
|
|
|
|
bool flush_rcache)
|
2017-03-21 03:11:28 +08:00
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline struct iova *reserve_iova(struct iova_domain *iovad,
|
|
|
|
unsigned long pfn_lo,
|
|
|
|
unsigned long pfn_hi)
|
|
|
|
{
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void init_iova_domain(struct iova_domain *iovad,
|
|
|
|
unsigned long granule,
|
2017-09-21 23:52:45 +08:00
|
|
|
unsigned long start_pfn)
|
2017-03-21 03:11:28 +08:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline struct iova *find_iova(struct iova_domain *iovad,
|
|
|
|
unsigned long pfn)
|
|
|
|
{
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void put_iova_domain(struct iova_domain *iovad)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif
|
2007-10-22 07:41:48 +08:00
|
|
|
|
|
|
|
#endif
|