dma-mapping fixes for Linux 6.0
- revert a panic on swiotlb initialization failure (Yu Zhao) - fix the lookup for partial syncs in dma-debug (Robin Murphy) - fix a shift overflow in swiotlb (Chao Gao) - fix a comment typo in swiotlb (Chao Gao) - mark a function static now that all abusers are gone (Christoph Hellwig) -----BEGIN PGP SIGNATURE----- iQI/BAABCgApFiEEgdbnc3r/njty3Iq9D55TZVIEUYMFAmMcNIsLHGhjaEBsc3Qu ZGUACgkQD55TZVIEUYNUXw/+JP3zHQ7yGEq4Kbzzb9v7JG4A325YwSH29IaQLZ+Q Uz8ow81BcHOQPXYRM8wAEsrGXo9eYOhM6OxvAH2UO9buzzqACAOG6eYn4YfXtqeM nmdK9+9fDXHyetjEPcZvShptqs2PC/e7REXMO3lIdDkVVvlv5vpzB/JgL0fNCTza fwWHTQpxgNAShGr8yomafKiDKCtgHbCdzqqeU7zcvOLcTtQu0JOhhtQ1KUy2TOr8 1W1tVg1cPV1aEOId3FQsliL9hGx44FBaaDev5dFe/weFFxj4JgPhMljejM5tefyE V/xvXFHj/DkuLbODg72hq+x6b+DjW5moCAwdUQ038zYIGVdKL+MFyqr5FzNJdXap U3QJTgbXv0gPtCZYq1SRaKYVikcZGGTNTCH4dDZQJvZGjRjeJIQXClf3S+DlqgXm UnM4jn6aRmQ8w70pgF0qVh33L4siVpQEK0KFIQ+qsjywfVplhKPwYGhkySW13Hye S2objt5n8VO/PS5wVh6ICtRAcMB2WjpT4t9fMOYXVD3hJRXCFhUKPIsK+BINw3IE kck8LtABdcKkT95ES5Y0jQvAyq07JtXba+mjDLdEXp7KHepprQSXb+BZyuWMSQPy IXfiuzFX8s1aZicnp0MLXx65xeA7Q1gkOxiyZ0dR/IQFjZ4fGn6X1ixpHiKzYb53 /6w= =n7sX -----END PGP SIGNATURE----- Merge tag 'dma-mapping-6.0-2022-09-10' of git://git.infradead.org/users/hch/dma-mapping Pull dma-mapping fixes from Christoph Hellwig: - revert a panic on swiotlb initialization failure (Yu Zhao) - fix the lookup for partial syncs in dma-debug (Robin Murphy) - fix a shift overflow in swiotlb (Chao Gao) - fix a comment typo in swiotlb (Chao Gao) - mark a function static now that all abusers are gone (Christoph Hellwig) * tag 'dma-mapping-6.0-2022-09-10' of git://git.infradead.org/users/hch/dma-mapping: dma-mapping: mark dma_supported static swiotlb: fix a typo swiotlb: avoid potential left shift overflow dma-debug: improve search for partial syncs Revert "swiotlb: panic if nslabs is too small"
This commit is contained in:
commit
16547b21b1
|
@ -139,7 +139,6 @@ int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
|
|||
void *cpu_addr, dma_addr_t dma_addr, size_t size,
|
||||
unsigned long attrs);
|
||||
bool dma_can_mmap(struct device *dev);
|
||||
int dma_supported(struct device *dev, u64 mask);
|
||||
bool dma_pci_p2pdma_supported(struct device *dev);
|
||||
int dma_set_mask(struct device *dev, u64 mask);
|
||||
int dma_set_coherent_mask(struct device *dev, u64 mask);
|
||||
|
@ -248,10 +247,6 @@ static inline bool dma_can_mmap(struct device *dev)
|
|||
{
|
||||
return false;
|
||||
}
|
||||
static inline int dma_supported(struct device *dev, u64 mask)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline bool dma_pci_p2pdma_supported(struct device *dev)
|
||||
{
|
||||
return false;
|
||||
|
|
|
@ -350,11 +350,10 @@ static struct dma_debug_entry *bucket_find_contain(struct hash_bucket **bucket,
|
|||
unsigned long *flags)
|
||||
{
|
||||
|
||||
unsigned int max_range = dma_get_max_seg_size(ref->dev);
|
||||
struct dma_debug_entry *entry, index = *ref;
|
||||
unsigned int range = 0;
|
||||
int limit = min(HASH_SIZE, (index.dev_addr >> HASH_FN_SHIFT) + 1);
|
||||
|
||||
while (range <= max_range) {
|
||||
for (int i = 0; i < limit; i++) {
|
||||
entry = __hash_bucket_find(*bucket, ref, containing_match);
|
||||
|
||||
if (entry)
|
||||
|
@ -364,7 +363,6 @@ static struct dma_debug_entry *bucket_find_contain(struct hash_bucket **bucket,
|
|||
* Nothing found, go back a hash bucket
|
||||
*/
|
||||
put_hash_bucket(*bucket, *flags);
|
||||
range += (1 << HASH_FN_SHIFT);
|
||||
index.dev_addr -= (1 << HASH_FN_SHIFT);
|
||||
*bucket = get_hash_bucket(&index, flags);
|
||||
}
|
||||
|
|
|
@ -707,7 +707,7 @@ int dma_mmap_noncontiguous(struct device *dev, struct vm_area_struct *vma,
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(dma_mmap_noncontiguous);
|
||||
|
||||
int dma_supported(struct device *dev, u64 mask)
|
||||
static int dma_supported(struct device *dev, u64 mask)
|
||||
{
|
||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
|
||||
|
@ -721,7 +721,6 @@ int dma_supported(struct device *dev, u64 mask)
|
|||
return 1;
|
||||
return ops->dma_supported(dev, mask);
|
||||
}
|
||||
EXPORT_SYMBOL(dma_supported);
|
||||
|
||||
bool dma_pci_p2pdma_supported(struct device *dev)
|
||||
{
|
||||
|
|
|
@ -326,9 +326,6 @@ void __init swiotlb_init_remap(bool addressing_limit, unsigned int flags,
|
|||
swiotlb_adjust_nareas(num_possible_cpus());
|
||||
|
||||
nslabs = default_nslabs;
|
||||
if (nslabs < IO_TLB_MIN_SLABS)
|
||||
panic("%s: nslabs = %lu too small\n", __func__, nslabs);
|
||||
|
||||
/*
|
||||
* By default allocate the bounce buffer memory from low memory, but
|
||||
* allow to pick a location everywhere for hypervisors with guest
|
||||
|
@ -341,8 +338,7 @@ retry:
|
|||
else
|
||||
tlb = memblock_alloc_low(bytes, PAGE_SIZE);
|
||||
if (!tlb) {
|
||||
pr_warn("%s: Failed to allocate %zu bytes tlb structure\n",
|
||||
__func__, bytes);
|
||||
pr_warn("%s: failed to allocate tlb structure\n", __func__);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -579,7 +575,10 @@ static void swiotlb_bounce(struct device *dev, phys_addr_t tlb_addr, size_t size
|
|||
}
|
||||
}
|
||||
|
||||
#define slot_addr(start, idx) ((start) + ((idx) << IO_TLB_SHIFT))
|
||||
static inline phys_addr_t slot_addr(phys_addr_t start, phys_addr_t idx)
|
||||
{
|
||||
return start + (idx << IO_TLB_SHIFT);
|
||||
}
|
||||
|
||||
/*
|
||||
* Carefully handle integer overflow which can occur when boundary_mask == ~0UL.
|
||||
|
@ -765,7 +764,7 @@ phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr,
|
|||
/*
|
||||
* When dir == DMA_FROM_DEVICE we could omit the copy from the orig
|
||||
* to the tlb buffer, if we knew for sure the device will
|
||||
* overwirte the entire current content. But we don't. Thus
|
||||
* overwrite the entire current content. But we don't. Thus
|
||||
* unconditional bounce may prevent leaking swiotlb content (i.e.
|
||||
* kernel memory) to user-space.
|
||||
*/
|
||||
|
|
Loading…
Reference in New Issue