drm/vmwgfx: remove code that was using physical page addresses
This code has been unused for a while now. When the explicit checks for whether the driver is running on top of non-coherent swiotlb have been deprecated we lost the ability to fallback to physical mappings. Instead of trying to readd a module parameter to force usage of physical addresses it's better to just force coherent TTM pages via the force_coherent module parameter making this code pointless. Signed-off-by: Zack Rusin <zackr@vmware.com> Reviewed-by: Martin Krastev <krastevm@vmware.com> Link: https://patchwork.freedesktop.org/patch/msgid/20210609172307.131929-6-zackr@vmware.com
This commit is contained in:
parent
74231041d1
commit
f674a218c6
|
@ -272,7 +272,6 @@ static const struct pci_device_id vmw_pci_id_list[] = {
|
||||||
MODULE_DEVICE_TABLE(pci, vmw_pci_id_list);
|
MODULE_DEVICE_TABLE(pci, vmw_pci_id_list);
|
||||||
|
|
||||||
static int enable_fbdev = IS_ENABLED(CONFIG_DRM_VMWGFX_FBCON);
|
static int enable_fbdev = IS_ENABLED(CONFIG_DRM_VMWGFX_FBCON);
|
||||||
static int vmw_force_iommu;
|
|
||||||
static int vmw_restrict_iommu;
|
static int vmw_restrict_iommu;
|
||||||
static int vmw_force_coherent;
|
static int vmw_force_coherent;
|
||||||
static int vmw_restrict_dma_mask;
|
static int vmw_restrict_dma_mask;
|
||||||
|
@ -284,8 +283,6 @@ static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
|
||||||
|
|
||||||
MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev");
|
MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev");
|
||||||
module_param_named(enable_fbdev, enable_fbdev, int, 0600);
|
module_param_named(enable_fbdev, enable_fbdev, int, 0600);
|
||||||
MODULE_PARM_DESC(force_dma_api, "Force using the DMA API for TTM pages");
|
|
||||||
module_param_named(force_dma_api, vmw_force_iommu, int, 0600);
|
|
||||||
MODULE_PARM_DESC(restrict_iommu, "Try to limit IOMMU usage for TTM pages");
|
MODULE_PARM_DESC(restrict_iommu, "Try to limit IOMMU usage for TTM pages");
|
||||||
module_param_named(restrict_iommu, vmw_restrict_iommu, int, 0600);
|
module_param_named(restrict_iommu, vmw_restrict_iommu, int, 0600);
|
||||||
MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages");
|
MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages");
|
||||||
|
@ -645,7 +642,6 @@ static void vmw_get_initial_size(struct vmw_private *dev_priv)
|
||||||
static int vmw_dma_select_mode(struct vmw_private *dev_priv)
|
static int vmw_dma_select_mode(struct vmw_private *dev_priv)
|
||||||
{
|
{
|
||||||
static const char *names[vmw_dma_map_max] = {
|
static const char *names[vmw_dma_map_max] = {
|
||||||
[vmw_dma_phys] = "Using physical TTM page addresses.",
|
|
||||||
[vmw_dma_alloc_coherent] = "Using coherent TTM pages.",
|
[vmw_dma_alloc_coherent] = "Using coherent TTM pages.",
|
||||||
[vmw_dma_map_populate] = "Caching DMA mappings.",
|
[vmw_dma_map_populate] = "Caching DMA mappings.",
|
||||||
[vmw_dma_map_bind] = "Giving up DMA mappings early."};
|
[vmw_dma_map_bind] = "Giving up DMA mappings early."};
|
||||||
|
@ -679,8 +675,7 @@ static int vmw_dma_masks(struct vmw_private *dev_priv)
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
ret = dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64));
|
ret = dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64));
|
||||||
if (dev_priv->map_mode != vmw_dma_phys &&
|
if (sizeof(unsigned long) == 4 || vmw_restrict_dma_mask) {
|
||||||
(sizeof(unsigned long) == 4 || vmw_restrict_dma_mask)) {
|
|
||||||
DRM_INFO("Restricting DMA addresses to 44 bits.\n");
|
DRM_INFO("Restricting DMA addresses to 44 bits.\n");
|
||||||
return dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(44));
|
return dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(44));
|
||||||
}
|
}
|
||||||
|
|
|
@ -314,7 +314,6 @@ struct vmw_res_cache_entry {
|
||||||
* enum vmw_dma_map_mode - indicate how to perform TTM page dma mappings.
|
* enum vmw_dma_map_mode - indicate how to perform TTM page dma mappings.
|
||||||
*/
|
*/
|
||||||
enum vmw_dma_map_mode {
|
enum vmw_dma_map_mode {
|
||||||
vmw_dma_phys, /* Use physical page addresses */
|
|
||||||
vmw_dma_alloc_coherent, /* Use TTM coherent pages */
|
vmw_dma_alloc_coherent, /* Use TTM coherent pages */
|
||||||
vmw_dma_map_populate, /* Unmap from DMA just after unpopulate */
|
vmw_dma_map_populate, /* Unmap from DMA just after unpopulate */
|
||||||
vmw_dma_map_bind, /* Unmap from DMA just before unbind */
|
vmw_dma_map_bind, /* Unmap from DMA just before unbind */
|
||||||
|
|
|
@ -237,21 +237,6 @@ static struct page *__vmw_piter_non_sg_page(struct vmw_piter *viter)
|
||||||
return viter->pages[viter->i];
|
return viter->pages[viter->i];
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* __vmw_piter_phys_addr: Helper functions to return the DMA
|
|
||||||
* address of the current page.
|
|
||||||
*
|
|
||||||
* @viter: Pointer to the iterator
|
|
||||||
*
|
|
||||||
* These functions return the DMA address of the page currently
|
|
||||||
* pointed to by @viter. Functions are selected depending on the
|
|
||||||
* current mapping mode.
|
|
||||||
*/
|
|
||||||
static dma_addr_t __vmw_piter_phys_addr(struct vmw_piter *viter)
|
|
||||||
{
|
|
||||||
return page_to_phys(viter->pages[viter->i]);
|
|
||||||
}
|
|
||||||
|
|
||||||
static dma_addr_t __vmw_piter_dma_addr(struct vmw_piter *viter)
|
static dma_addr_t __vmw_piter_dma_addr(struct vmw_piter *viter)
|
||||||
{
|
{
|
||||||
return viter->addrs[viter->i];
|
return viter->addrs[viter->i];
|
||||||
|
@ -282,10 +267,6 @@ void vmw_piter_start(struct vmw_piter *viter, const struct vmw_sg_table *vsgt,
|
||||||
viter->page = &__vmw_piter_non_sg_page;
|
viter->page = &__vmw_piter_non_sg_page;
|
||||||
viter->pages = vsgt->pages;
|
viter->pages = vsgt->pages;
|
||||||
switch (vsgt->mode) {
|
switch (vsgt->mode) {
|
||||||
case vmw_dma_phys:
|
|
||||||
viter->next = &__vmw_piter_non_sg_next;
|
|
||||||
viter->dma_address = &__vmw_piter_phys_addr;
|
|
||||||
break;
|
|
||||||
case vmw_dma_alloc_coherent:
|
case vmw_dma_alloc_coherent:
|
||||||
viter->next = &__vmw_piter_non_sg_next;
|
viter->next = &__vmw_piter_non_sg_next;
|
||||||
viter->dma_address = &__vmw_piter_dma_addr;
|
viter->dma_address = &__vmw_piter_dma_addr;
|
||||||
|
|
Loading…
Reference in New Issue