dma-direct: Force unencrypted DMA under SME for certain DMA masks
If a device doesn't support DMA to a physical address that includes the encryption bit (currently bit 47, so 48-bit DMA), then the DMA must occur to unencrypted memory. SWIOTLB is used to satisfy that requirement if an IOMMU is not active (enabled or configured in passthrough mode). However, commitfafadcd165
("swiotlb: don't dip into swiotlb pool for coherent allocations") modified the coherent allocation support in SWIOTLB to use the DMA direct coherent allocation support. When an IOMMU is not active, this resulted in dma_alloc_coherent() failing for devices that didn't support DMA addresses that included the encryption bit. Addressing this requires changes to the force_dma_unencrypted() function in kernel/dma/direct.c. Since the function is now non-trivial and SME/SEV specific, update the DMA direct support to add an arch override for the force_dma_unencrypted() function. The arch override is selected when CONFIG_AMD_MEM_ENCRYPT is set. The arch override function resides in the arch/x86/mm/mem_encrypt.c file and forces unencrypted DMA when either SEV is active or SME is active and the device does not support DMA to physical addresses that include the encryption bit. Fixes:fafadcd165
("swiotlb: don't dip into swiotlb pool for coherent allocations") Suggested-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com> Acked-by: Thomas Gleixner <tglx@linutronix.de> [hch: moved the force_dma_unencrypted declaration to dma-mapping.h, fold the s390 fix from Halil Pasic] Signed-off-by: Christoph Hellwig <hch@lst.de>
This commit is contained in:
parent
9637d51734
commit
9087c37584
|
@ -189,6 +189,7 @@ config S390
|
|||
select VIRT_CPU_ACCOUNTING
|
||||
select ARCH_HAS_SCALED_CPUTIME
|
||||
select HAVE_NMI
|
||||
select ARCH_HAS_FORCE_DMA_UNENCRYPTED
|
||||
select SWIOTLB
|
||||
select GENERIC_ALLOCATOR
|
||||
|
||||
|
|
|
@ -30,7 +30,7 @@
|
|||
#include <linux/export.h>
|
||||
#include <linux/cma.h>
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/dma-direct.h>
|
||||
#include <asm/processor.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <asm/pgtable.h>
|
||||
|
@ -161,6 +161,11 @@ bool sev_active(void)
|
|||
return is_prot_virt_guest();
|
||||
}
|
||||
|
||||
bool force_dma_unencrypted(struct device *dev)
|
||||
{
|
||||
return sev_active();
|
||||
}
|
||||
|
||||
/* protected virtualization */
|
||||
static void pv_init(void)
|
||||
{
|
||||
|
|
|
@ -1528,6 +1528,7 @@ config AMD_MEM_ENCRYPT
|
|||
depends on X86_64 && CPU_SUP_AMD
|
||||
select DYNAMIC_PHYSICAL_MASK
|
||||
select ARCH_USE_MEMREMAP_PROT
|
||||
select ARCH_HAS_FORCE_DMA_UNENCRYPTED
|
||||
---help---
|
||||
Say yes to enable support for the encryption of system memory.
|
||||
This requires an AMD processor that supports Secure Memory
|
||||
|
|
|
@ -15,6 +15,10 @@
|
|||
#include <linux/dma-direct.h>
|
||||
#include <linux/swiotlb.h>
|
||||
#include <linux/mem_encrypt.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/fixmap.h>
|
||||
|
@ -348,6 +352,32 @@ bool sev_active(void)
|
|||
}
|
||||
EXPORT_SYMBOL(sev_active);
|
||||
|
||||
/* Override for DMA direct allocation check - ARCH_HAS_FORCE_DMA_UNENCRYPTED */
|
||||
bool force_dma_unencrypted(struct device *dev)
|
||||
{
|
||||
/*
|
||||
* For SEV, all DMA must be to unencrypted addresses.
|
||||
*/
|
||||
if (sev_active())
|
||||
return true;
|
||||
|
||||
/*
|
||||
* For SME, all DMA must be to unencrypted addresses if the
|
||||
* device does not support DMA to addresses that include the
|
||||
* encryption mask.
|
||||
*/
|
||||
if (sme_active()) {
|
||||
u64 dma_enc_mask = DMA_BIT_MASK(__ffs64(sme_me_mask));
|
||||
u64 dma_dev_mask = min_not_zero(dev->coherent_dma_mask,
|
||||
dev->bus_dma_mask);
|
||||
|
||||
if (dma_dev_mask <= dma_enc_mask)
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Architecture __weak replacement functions */
|
||||
void __init mem_encrypt_free_decrypted_mem(void)
|
||||
{
|
||||
|
|
|
@ -32,6 +32,15 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
|
|||
}
|
||||
#endif /* !CONFIG_ARCH_HAS_PHYS_TO_DMA */
|
||||
|
||||
#ifdef CONFIG_ARCH_HAS_FORCE_DMA_UNENCRYPTED
|
||||
bool force_dma_unencrypted(struct device *dev);
|
||||
#else
|
||||
static inline bool force_dma_unencrypted(struct device *dev)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
#endif /* CONFIG_ARCH_HAS_FORCE_DMA_UNENCRYPTED */
|
||||
|
||||
/*
|
||||
* If memory encryption is supported, phys_to_dma will set the memory encryption
|
||||
* bit in the DMA address, and dma_to_phys will clear it. The raw __phys_to_dma
|
||||
|
|
|
@ -48,6 +48,9 @@ config ARCH_HAS_DMA_COHERENT_TO_PFN
|
|||
config ARCH_HAS_DMA_MMAP_PGPROT
|
||||
bool
|
||||
|
||||
config ARCH_HAS_FORCE_DMA_UNENCRYPTED
|
||||
bool
|
||||
|
||||
config DMA_NONCOHERENT_CACHE_SYNC
|
||||
bool
|
||||
|
||||
|
|
|
@ -23,14 +23,6 @@
|
|||
#define ARCH_ZONE_DMA_BITS 24
|
||||
#endif
|
||||
|
||||
/*
|
||||
* For AMD SEV all DMA must be to unencrypted addresses.
|
||||
*/
|
||||
static inline bool force_dma_unencrypted(void)
|
||||
{
|
||||
return sev_active();
|
||||
}
|
||||
|
||||
static void report_addr(struct device *dev, dma_addr_t dma_addr, size_t size)
|
||||
{
|
||||
if (!dev->dma_mask) {
|
||||
|
@ -46,7 +38,7 @@ static void report_addr(struct device *dev, dma_addr_t dma_addr, size_t size)
|
|||
static inline dma_addr_t phys_to_dma_direct(struct device *dev,
|
||||
phys_addr_t phys)
|
||||
{
|
||||
if (force_dma_unencrypted())
|
||||
if (force_dma_unencrypted(dev))
|
||||
return __phys_to_dma(dev, phys);
|
||||
return phys_to_dma(dev, phys);
|
||||
}
|
||||
|
@ -67,7 +59,7 @@ static gfp_t __dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask,
|
|||
if (dev->bus_dma_mask && dev->bus_dma_mask < dma_mask)
|
||||
dma_mask = dev->bus_dma_mask;
|
||||
|
||||
if (force_dma_unencrypted())
|
||||
if (force_dma_unencrypted(dev))
|
||||
*phys_mask = __dma_to_phys(dev, dma_mask);
|
||||
else
|
||||
*phys_mask = dma_to_phys(dev, dma_mask);
|
||||
|
@ -159,7 +151,7 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
|
|||
}
|
||||
|
||||
ret = page_address(page);
|
||||
if (force_dma_unencrypted()) {
|
||||
if (force_dma_unencrypted(dev)) {
|
||||
set_memory_decrypted((unsigned long)ret, 1 << get_order(size));
|
||||
*dma_handle = __phys_to_dma(dev, page_to_phys(page));
|
||||
} else {
|
||||
|
@ -192,7 +184,7 @@ void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
|
|||
return;
|
||||
}
|
||||
|
||||
if (force_dma_unencrypted())
|
||||
if (force_dma_unencrypted(dev))
|
||||
set_memory_encrypted((unsigned long)cpu_addr, 1 << page_order);
|
||||
|
||||
if (IS_ENABLED(CONFIG_ARCH_HAS_UNCACHED_SEGMENT) &&
|
||||
|
|
Loading…
Reference in New Issue