VT-d: Changes to support KVM

This patch extends the VT-d driver to support KVM

[Ben: fixed memory pinning]
[avi: move dma_remapping.h as well]

Signed-off-by: Kay, Allen M <allen.m.kay@intel.com>
Signed-off-by: Weidong Han <weidong.han@intel.com>
Signed-off-by: Ben-Ami Yassour <benami@il.ibm.com>
Signed-off-by: Amit Shah <amit.shah@qumranet.com>
Acked-by: Mark Gross <mgross@linux.intel.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
This commit is contained in:
Kay, Allen M 2008-09-09 18:37:29 +03:00 committed by Avi Kivity
parent aa3a816b6d
commit 3871794642
8 changed files with 139 additions and 11 deletions

View File

@ -28,9 +28,9 @@
#include <linux/pci.h>
#include <linux/dmar.h>
#include <linux/iova.h>
#include <linux/intel-iommu.h>
#include <linux/timer.h>
#include "iova.h"
#include "intel-iommu.h"
#undef PREFIX
#define PREFIX "DMAR:"

View File

@ -33,8 +33,8 @@
#include <linux/dma-mapping.h>
#include <linux/mempool.h>
#include <linux/timer.h>
#include "iova.h"
#include "intel-iommu.h"
#include <linux/iova.h>
#include <linux/intel-iommu.h>
#include <asm/proto.h> /* force_iommu in this header in x86-64*/
#include <asm/cacheflush.h>
#include <asm/iommu.h>
@ -156,7 +156,7 @@ static inline void *alloc_domain_mem(void)
return iommu_kmem_cache_alloc(iommu_domain_cache);
}
static inline void free_domain_mem(void *vaddr)
static void free_domain_mem(void *vaddr)
{
kmem_cache_free(iommu_domain_cache, vaddr);
}
@ -1341,7 +1341,7 @@ static void domain_remove_dev_info(struct dmar_domain *domain)
* find_domain
* Note: we use struct pci_dev->dev.archdata.iommu stores the info
*/
struct dmar_domain *
static struct dmar_domain *
find_domain(struct pci_dev *pdev)
{
struct device_domain_info *info;
@ -2318,3 +2318,111 @@ int __init intel_iommu_init(void)
return 0;
}
void intel_iommu_domain_exit(struct dmar_domain *domain)
{
u64 end;
/* Domain 0 is reserved, so dont process it */
if (!domain)
return;
end = DOMAIN_MAX_ADDR(domain->gaw);
end = end & (~PAGE_MASK_4K);
/* clear ptes */
dma_pte_clear_range(domain, 0, end);
/* free page tables */
dma_pte_free_pagetable(domain, 0, end);
iommu_free_domain(domain);
free_domain_mem(domain);
}
EXPORT_SYMBOL_GPL(intel_iommu_domain_exit);
struct dmar_domain *intel_iommu_domain_alloc(struct pci_dev *pdev)
{
struct dmar_drhd_unit *drhd;
struct dmar_domain *domain;
struct intel_iommu *iommu;
drhd = dmar_find_matched_drhd_unit(pdev);
if (!drhd) {
printk(KERN_ERR "intel_iommu_domain_alloc: drhd == NULL\n");
return NULL;
}
iommu = drhd->iommu;
if (!iommu) {
printk(KERN_ERR
"intel_iommu_domain_alloc: iommu == NULL\n");
return NULL;
}
domain = iommu_alloc_domain(iommu);
if (!domain) {
printk(KERN_ERR
"intel_iommu_domain_alloc: domain == NULL\n");
return NULL;
}
if (domain_init(domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
printk(KERN_ERR
"intel_iommu_domain_alloc: domain_init() failed\n");
intel_iommu_domain_exit(domain);
return NULL;
}
return domain;
}
EXPORT_SYMBOL_GPL(intel_iommu_domain_alloc);
int intel_iommu_context_mapping(
struct dmar_domain *domain, struct pci_dev *pdev)
{
int rc;
rc = domain_context_mapping(domain, pdev);
return rc;
}
EXPORT_SYMBOL_GPL(intel_iommu_context_mapping);
int intel_iommu_page_mapping(
struct dmar_domain *domain, dma_addr_t iova,
u64 hpa, size_t size, int prot)
{
int rc;
rc = domain_page_mapping(domain, iova, hpa, size, prot);
return rc;
}
EXPORT_SYMBOL_GPL(intel_iommu_page_mapping);
void intel_iommu_detach_dev(struct dmar_domain *domain, u8 bus, u8 devfn)
{
detach_domain_for_dev(domain, bus, devfn);
}
EXPORT_SYMBOL_GPL(intel_iommu_detach_dev);
struct dmar_domain *
intel_iommu_find_domain(struct pci_dev *pdev)
{
return find_domain(pdev);
}
EXPORT_SYMBOL_GPL(intel_iommu_find_domain);
int intel_iommu_found(void)
{
return g_num_of_iommus;
}
EXPORT_SYMBOL_GPL(intel_iommu_found);
u64 intel_iommu_iova_to_pfn(struct dmar_domain *domain, u64 iova)
{
struct dma_pte *pte;
u64 pfn;
pfn = 0;
pte = addr_to_dma_pte(domain, iova);
if (pte)
pfn = dma_pte_addr(*pte);
return pfn >> PAGE_SHIFT_4K;
}
EXPORT_SYMBOL_GPL(intel_iommu_iova_to_pfn);

View File

@ -4,7 +4,7 @@
#include <linux/pci.h>
#include <linux/irq.h>
#include <asm/io_apic.h>
#include "intel-iommu.h"
#include <linux/intel-iommu.h>
#include "intr_remapping.h"
static struct ioapic_scope ir_ioapic[MAX_IO_APICS];

View File

@ -1,4 +1,4 @@
#include "intel-iommu.h"
#include <linux/intel-iommu.h>
struct ioapic_scope {
struct intel_iommu *iommu;

View File

@ -7,7 +7,7 @@
* Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
*/
#include "iova.h"
#include <linux/iova.h>
void
init_iova_domain(struct iova_domain *iovad, unsigned long pfn_32bit)

View File

@ -25,10 +25,10 @@
#include <linux/types.h>
#include <linux/msi.h>
#include <linux/sysdev.h>
#include "iova.h"
#include <linux/iova.h>
#include <linux/io.h>
#include <linux/dma_remapping.h>
#include <asm/cacheflush.h>
#include "dma_remapping.h"
/*
* Intel IOMMU register specification per version 1.0 public spec.
@ -304,4 +304,24 @@ extern int dmar_enable_qi(struct intel_iommu *iommu);
extern void qi_global_iec(struct intel_iommu *iommu);
extern void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu);
void intel_iommu_domain_exit(struct dmar_domain *domain);
struct dmar_domain *intel_iommu_domain_alloc(struct pci_dev *pdev);
int intel_iommu_context_mapping(struct dmar_domain *domain,
struct pci_dev *pdev);
int intel_iommu_page_mapping(struct dmar_domain *domain, dma_addr_t iova,
u64 hpa, size_t size, int prot);
void intel_iommu_detach_dev(struct dmar_domain *domain, u8 bus, u8 devfn);
struct dmar_domain *intel_iommu_find_domain(struct pci_dev *pdev);
u64 intel_iommu_iova_to_pfn(struct dmar_domain *domain, u64 iova);
#ifdef CONFIG_DMAR
int intel_iommu_found(void);
#else /* CONFIG_DMAR */
static inline int intel_iommu_found(void)
{
return 0;
}
#endif /* CONFIG_DMAR */
#endif