2008-07-11 02:16:35 +08:00
|
|
|
#ifndef _DMA_REMAPPING_H
|
|
|
|
#define _DMA_REMAPPING_H
|
|
|
|
|
|
|
|
/*
|
2008-10-17 09:02:32 +08:00
|
|
|
* VT-d hardware uses 4KiB page size regardless of host page size.
|
2008-07-11 02:16:35 +08:00
|
|
|
*/
|
2008-10-17 09:02:32 +08:00
|
|
|
#define VTD_PAGE_SHIFT (12)
|
|
|
|
#define VTD_PAGE_SIZE (1UL << VTD_PAGE_SHIFT)
|
|
|
|
#define VTD_PAGE_MASK (((u64)-1) << VTD_PAGE_SHIFT)
|
|
|
|
#define VTD_PAGE_ALIGN(addr) (((addr) + VTD_PAGE_SIZE - 1) & VTD_PAGE_MASK)
|
2008-07-11 02:16:35 +08:00
|
|
|
|
intel-iommu: Enable super page (2MiB, 1GiB, etc.) support
There are no externally-visible changes with this. In the loop in the
internal __domain_mapping() function, we simply detect if we are mapping:
- size >= 2MiB, and
- virtual address aligned to 2MiB, and
- physical address aligned to 2MiB, and
- on hardware that supports superpages.
(and likewise for larger superpages).
We automatically use a superpage for such mappings. We never have to
worry about *breaking* superpages, since we trust that we will always
*unmap* the same range that was mapped. So all we need to do is ensure
that dma_pte_clear_range() will also cope with superpages.
Adjust pfn_to_dma_pte() to take a superpage 'level' as an argument, so
it can return a PTE at the appropriate level rather than always
extending the page tables all the way down to level 1. Again, this is
simplified by the fact that we should never encounter existing small
pages when we're creating a mapping; any old mapping that used the same
virtual range will have been entirely removed and its obsolete page
tables freed.
Provide an 'intel_iommu=sp_off' argument on the command line as a
chicken bit. Not that it should ever be required.
==
The original commit seen in the iommu-2.6.git was Youquan's
implementation (and completion) of my own half-baked code which I'd
typed into an email. Followed by half a dozen subsequent 'fixes'.
I've taken the unusual step of rewriting history and collapsing the
original commits in order to keep the main history simpler, and make
life easier for the people who are going to have to backport this to
older kernels. And also so I can give it a more coherent commit comment
which (hopefully) gives a better explanation of what's going on.
The original sequence of commits leading to identical code was:
Youquan Song (3):
intel-iommu: super page support
intel-iommu: Fix superpage alignment calculation error
intel-iommu: Fix superpage level calculation error in dma_pfn_level_pte()
David Woodhouse (4):
intel-iommu: Precalculate superpage support for dmar_domain
intel-iommu: Fix hardware_largepage_caps()
intel-iommu: Fix inappropriate use of superpages in __domain_mapping()
intel-iommu: Fix phys_pfn in __domain_mapping for sglist pages
Signed-off-by: Youquan Song <youquan.song@intel.com>
Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
2011-05-26 02:13:49 +08:00
|
|
|
#define VTD_STRIDE_SHIFT (9)
|
|
|
|
#define VTD_STRIDE_MASK (((u64)-1) << VTD_STRIDE_SHIFT)
|
|
|
|
|
2008-07-11 02:16:35 +08:00
|
|
|
#define DMA_PTE_READ (1)
|
|
|
|
#define DMA_PTE_WRITE (2)
|
intel-iommu: Enable super page (2MiB, 1GiB, etc.) support
There are no externally-visible changes with this. In the loop in the
internal __domain_mapping() function, we simply detect if we are mapping:
- size >= 2MiB, and
- virtual address aligned to 2MiB, and
- physical address aligned to 2MiB, and
- on hardware that supports superpages.
(and likewise for larger superpages).
We automatically use a superpage for such mappings. We never have to
worry about *breaking* superpages, since we trust that we will always
*unmap* the same range that was mapped. So all we need to do is ensure
that dma_pte_clear_range() will also cope with superpages.
Adjust pfn_to_dma_pte() to take a superpage 'level' as an argument, so
it can return a PTE at the appropriate level rather than always
extending the page tables all the way down to level 1. Again, this is
simplified by the fact that we should never encounter existing small
pages when we're creating a mapping; any old mapping that used the same
virtual range will have been entirely removed and its obsolete page
tables freed.
Provide an 'intel_iommu=sp_off' argument on the command line as a
chicken bit. Not that it should ever be required.
==
The original commit seen in the iommu-2.6.git was Youquan's
implementation (and completion) of my own half-baked code which I'd
typed into an email. Followed by half a dozen subsequent 'fixes'.
I've taken the unusual step of rewriting history and collapsing the
original commits in order to keep the main history simpler, and make
life easier for the people who are going to have to backport this to
older kernels. And also so I can give it a more coherent commit comment
which (hopefully) gives a better explanation of what's going on.
The original sequence of commits leading to identical code was:
Youquan Song (3):
intel-iommu: super page support
intel-iommu: Fix superpage alignment calculation error
intel-iommu: Fix superpage level calculation error in dma_pfn_level_pte()
David Woodhouse (4):
intel-iommu: Precalculate superpage support for dmar_domain
intel-iommu: Fix hardware_largepage_caps()
intel-iommu: Fix inappropriate use of superpages in __domain_mapping()
intel-iommu: Fix phys_pfn in __domain_mapping for sglist pages
Signed-off-by: Youquan Song <youquan.song@intel.com>
Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
2011-05-26 02:13:49 +08:00
|
|
|
#define DMA_PTE_LARGE_PAGE (1 << 7)
|
2009-03-18 15:33:07 +08:00
|
|
|
#define DMA_PTE_SNP (1 << 11)
|
2008-07-11 02:16:35 +08:00
|
|
|
|
2009-04-25 08:30:20 +08:00
|
|
|
#define CONTEXT_TT_MULTI_LEVEL 0
|
2009-05-18 13:51:37 +08:00
|
|
|
#define CONTEXT_TT_DEV_IOTLB 1
|
2009-04-25 08:30:20 +08:00
|
|
|
#define CONTEXT_TT_PASS_THROUGH 2
|
|
|
|
|
2008-07-11 02:16:35 +08:00
|
|
|
struct intel_iommu;
|
2008-11-20 23:49:47 +08:00
|
|
|
struct dmar_domain;
|
|
|
|
struct root_entry;
|
2008-07-11 02:16:35 +08:00
|
|
|
|
2009-01-04 17:55:02 +08:00
|
|
|
|
2011-08-24 08:05:25 +08:00
|
|
|
#ifdef CONFIG_INTEL_IOMMU
|
2008-12-08 15:34:06 +08:00
|
|
|
extern int iommu_calculate_agaw(struct intel_iommu *iommu);
|
2009-04-25 08:30:20 +08:00
|
|
|
extern int iommu_calculate_max_sagaw(struct intel_iommu *iommu);
|
2011-08-24 08:05:22 +08:00
|
|
|
extern int dmar_disabled;
|
2011-11-24 02:42:14 +08:00
|
|
|
extern int intel_iommu_enabled;
|
2009-01-04 17:55:02 +08:00
|
|
|
#else
|
|
|
|
static inline int iommu_calculate_agaw(struct intel_iommu *iommu)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
2009-04-25 08:30:20 +08:00
|
|
|
static inline int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
2011-08-24 08:05:22 +08:00
|
|
|
#define dmar_disabled (1)
|
2011-11-24 02:42:14 +08:00
|
|
|
#define intel_iommu_enabled (0)
|
2009-01-04 17:55:02 +08:00
|
|
|
#endif
|
2008-07-11 02:16:35 +08:00
|
|
|
|
2008-07-11 02:16:43 +08:00
|
|
|
|
2008-07-11 02:16:35 +08:00
|
|
|
#endif
|