Merge branch 'drm-fixes' of git://people.freedesktop.org/~airlied/linux

Pull drm fixes from Dave Airlie:
 "Just back from LCA + some days off, had some fixes from the past 2 weeks,

  Some amdkfd code removal for a feature that wasn't ready, otherwise
  just one fix for core helper sleeping, exynos, i915, and radeon fixes.

  I thought I had some sti fixes but they were already in, and it
  confused me for a few mins this morning"

* 'drm-fixes' of git://people.freedesktop.org/~airlied/linux:
  drm: fb helper should avoid sleeping in panic context
  drm/exynos: fix warning of vblank reference count
  drm/exynos: remove unnecessary runtime pm operations
  drm/exynos: fix reset codes for memory mapped hdmi phy
  drm/radeon: use rv515_ring_start on r5xx
  drm/radeon: add si dpm quirk list
  drm/radeon: don't print error on -ERESTARTSYS
  drm/i915: Fix mutex->owner inspection race under DEBUG_MUTEXES
  drm/i915: Ban Haswell from using RCS flips
  drm/i915: vlv: sanitize RPS interrupt mask during GPU idling
  drm/i915: fix HW lockup due to missing RPS IRQ workaround on GEN6
  drm/i915: gen9: fix RPS interrupt routing to CPU vs. GT
  drm/exynos: remove the redundant machine checking code
  drm/radeon: add a dpm quirk list
  drm/amdkfd: Fix sparse warning (different address space)
  drm/radeon: fix VM flush on CIK (v3)
  drm/radeon: fix VM flush on SI (v3)
  drm/radeon: fix VM flush on cayman/aruba (v3)
  drm/amdkfd: Drop interrupt SW ring buffer
This commit is contained in:
Linus Torvalds 2015-01-21 20:23:33 +12:00
commit 479459a86c
26 changed files with 227 additions and 256 deletions

View File

@ -8,7 +8,6 @@ amdkfd-y := kfd_module.o kfd_device.o kfd_chardev.o kfd_topology.o \
kfd_pasid.o kfd_doorbell.o kfd_flat_memory.o \ kfd_pasid.o kfd_doorbell.o kfd_flat_memory.o \
kfd_process.o kfd_queue.o kfd_mqd_manager.o \ kfd_process.o kfd_queue.o kfd_mqd_manager.o \
kfd_kernel_queue.o kfd_packet_manager.o \ kfd_kernel_queue.o kfd_packet_manager.o \
kfd_process_queue_manager.o kfd_device_queue_manager.o \ kfd_process_queue_manager.o kfd_device_queue_manager.o
kfd_interrupt.o
obj-$(CONFIG_HSA_AMD) += amdkfd.o obj-$(CONFIG_HSA_AMD) += amdkfd.o

View File

@ -192,13 +192,6 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
goto kfd_topology_add_device_error; goto kfd_topology_add_device_error;
} }
if (kfd_interrupt_init(kfd)) {
dev_err(kfd_device,
"Error initializing interrupts for device (%x:%x)\n",
kfd->pdev->vendor, kfd->pdev->device);
goto kfd_interrupt_error;
}
if (!device_iommu_pasid_init(kfd)) { if (!device_iommu_pasid_init(kfd)) {
dev_err(kfd_device, dev_err(kfd_device,
"Error initializing iommuv2 for device (%x:%x)\n", "Error initializing iommuv2 for device (%x:%x)\n",
@ -237,8 +230,6 @@ dqm_start_error:
device_queue_manager_error: device_queue_manager_error:
amd_iommu_free_device(kfd->pdev); amd_iommu_free_device(kfd->pdev);
device_iommu_pasid_error: device_iommu_pasid_error:
kfd_interrupt_exit(kfd);
kfd_interrupt_error:
kfd_topology_remove_device(kfd); kfd_topology_remove_device(kfd);
kfd_topology_add_device_error: kfd_topology_add_device_error:
kfd2kgd->fini_sa_manager(kfd->kgd); kfd2kgd->fini_sa_manager(kfd->kgd);
@ -254,7 +245,6 @@ void kgd2kfd_device_exit(struct kfd_dev *kfd)
if (kfd->init_complete) { if (kfd->init_complete) {
device_queue_manager_uninit(kfd->dqm); device_queue_manager_uninit(kfd->dqm);
amd_iommu_free_device(kfd->pdev); amd_iommu_free_device(kfd->pdev);
kfd_interrupt_exit(kfd);
kfd_topology_remove_device(kfd); kfd_topology_remove_device(kfd);
} }
@ -296,13 +286,5 @@ int kgd2kfd_resume(struct kfd_dev *kfd)
/* This is called directly from KGD at ISR. */ /* This is called directly from KGD at ISR. */
void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry) void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
{ {
if (kfd->init_complete) { /* Process interrupts / schedule work as necessary */
spin_lock(&kfd->interrupt_lock);
if (kfd->interrupts_active
&& enqueue_ih_ring_entry(kfd, ih_ring_entry))
schedule_work(&kfd->interrupt_work);
spin_unlock(&kfd->interrupt_lock);
}
} }

View File

@ -280,7 +280,7 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
q->queue); q->queue);
retval = mqd->load_mqd(mqd, q->mqd, q->pipe, retval = mqd->load_mqd(mqd, q->mqd, q->pipe,
q->queue, q->properties.write_ptr); q->queue, (uint32_t __user *) q->properties.write_ptr);
if (retval != 0) { if (retval != 0) {
deallocate_hqd(dqm, q); deallocate_hqd(dqm, q);
mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj); mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);

View File

@ -1,176 +0,0 @@
/*
* Copyright 2014 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
/*
* KFD Interrupts.
*
* AMD GPUs deliver interrupts by pushing an interrupt description onto the
* interrupt ring and then sending an interrupt. KGD receives the interrupt
* in ISR and sends us a pointer to each new entry on the interrupt ring.
*
* We generally can't process interrupt-signaled events from ISR, so we call
* out to each interrupt client module (currently only the scheduler) to ask if
* each interrupt is interesting. If they return true, then it requires further
* processing so we copy it to an internal interrupt ring and call each
* interrupt client again from a work-queue.
*
* There's no acknowledgment for the interrupts we use. The hardware simply
* queues a new interrupt each time without waiting.
*
* The fixed-size internal queue means that it's possible for us to lose
* interrupts because we have no back-pressure to the hardware.
*/
#include <linux/slab.h>
#include <linux/device.h>
#include "kfd_priv.h"
#define KFD_INTERRUPT_RING_SIZE 256
static void interrupt_wq(struct work_struct *);
int kfd_interrupt_init(struct kfd_dev *kfd)
{
void *interrupt_ring = kmalloc_array(KFD_INTERRUPT_RING_SIZE,
kfd->device_info->ih_ring_entry_size,
GFP_KERNEL);
if (!interrupt_ring)
return -ENOMEM;
kfd->interrupt_ring = interrupt_ring;
kfd->interrupt_ring_size =
KFD_INTERRUPT_RING_SIZE * kfd->device_info->ih_ring_entry_size;
atomic_set(&kfd->interrupt_ring_wptr, 0);
atomic_set(&kfd->interrupt_ring_rptr, 0);
spin_lock_init(&kfd->interrupt_lock);
INIT_WORK(&kfd->interrupt_work, interrupt_wq);
kfd->interrupts_active = true;
/*
* After this function returns, the interrupt will be enabled. This
* barrier ensures that the interrupt running on a different processor
* sees all the above writes.
*/
smp_wmb();
return 0;
}
void kfd_interrupt_exit(struct kfd_dev *kfd)
{
/*
* Stop the interrupt handler from writing to the ring and scheduling
* workqueue items. The spinlock ensures that any interrupt running
* after we have unlocked sees interrupts_active = false.
*/
unsigned long flags;
spin_lock_irqsave(&kfd->interrupt_lock, flags);
kfd->interrupts_active = false;
spin_unlock_irqrestore(&kfd->interrupt_lock, flags);
/*
* Flush_scheduled_work ensures that there are no outstanding
* work-queue items that will access interrupt_ring. New work items
* can't be created because we stopped interrupt handling above.
*/
flush_scheduled_work();
kfree(kfd->interrupt_ring);
}
/*
* This assumes that it can't be called concurrently with itself
* but only with dequeue_ih_ring_entry.
*/
bool enqueue_ih_ring_entry(struct kfd_dev *kfd, const void *ih_ring_entry)
{
unsigned int rptr = atomic_read(&kfd->interrupt_ring_rptr);
unsigned int wptr = atomic_read(&kfd->interrupt_ring_wptr);
if ((rptr - wptr) % kfd->interrupt_ring_size ==
kfd->device_info->ih_ring_entry_size) {
/* This is very bad, the system is likely to hang. */
dev_err_ratelimited(kfd_chardev(),
"Interrupt ring overflow, dropping interrupt.\n");
return false;
}
memcpy(kfd->interrupt_ring + wptr, ih_ring_entry,
kfd->device_info->ih_ring_entry_size);
wptr = (wptr + kfd->device_info->ih_ring_entry_size) %
kfd->interrupt_ring_size;
smp_wmb(); /* Ensure memcpy'd data is visible before wptr update. */
atomic_set(&kfd->interrupt_ring_wptr, wptr);
return true;
}
/*
* This assumes that it can't be called concurrently with itself
* but only with enqueue_ih_ring_entry.
*/
static bool dequeue_ih_ring_entry(struct kfd_dev *kfd, void *ih_ring_entry)
{
/*
* Assume that wait queues have an implicit barrier, i.e. anything that
* happened in the ISR before it queued work is visible.
*/
unsigned int wptr = atomic_read(&kfd->interrupt_ring_wptr);
unsigned int rptr = atomic_read(&kfd->interrupt_ring_rptr);
if (rptr == wptr)
return false;
memcpy(ih_ring_entry, kfd->interrupt_ring + rptr,
kfd->device_info->ih_ring_entry_size);
rptr = (rptr + kfd->device_info->ih_ring_entry_size) %
kfd->interrupt_ring_size;
/*
* Ensure the rptr write update is not visible until
* memcpy has finished reading.
*/
smp_mb();
atomic_set(&kfd->interrupt_ring_rptr, rptr);
return true;
}
static void interrupt_wq(struct work_struct *work)
{
struct kfd_dev *dev = container_of(work, struct kfd_dev,
interrupt_work);
uint32_t ih_ring_entry[DIV_ROUND_UP(
dev->device_info->ih_ring_entry_size,
sizeof(uint32_t))];
while (dequeue_ih_ring_entry(dev, ih_ring_entry))
;
}

View File

@ -135,22 +135,10 @@ struct kfd_dev {
struct kgd2kfd_shared_resources shared_resources; struct kgd2kfd_shared_resources shared_resources;
void *interrupt_ring;
size_t interrupt_ring_size;
atomic_t interrupt_ring_rptr;
atomic_t interrupt_ring_wptr;
struct work_struct interrupt_work;
spinlock_t interrupt_lock;
/* QCM Device instance */ /* QCM Device instance */
struct device_queue_manager *dqm; struct device_queue_manager *dqm;
bool init_complete; bool init_complete;
/*
* Interrupts of interest to KFD are copied
* from the HW ring into a SW ring.
*/
bool interrupts_active;
}; };
/* KGD2KFD callbacks */ /* KGD2KFD callbacks */
@ -531,10 +519,7 @@ struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev);
struct kfd_dev *kfd_topology_enum_kfd_devices(uint8_t idx); struct kfd_dev *kfd_topology_enum_kfd_devices(uint8_t idx);
/* Interrupts */ /* Interrupts */
int kfd_interrupt_init(struct kfd_dev *dev);
void kfd_interrupt_exit(struct kfd_dev *dev);
void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry); void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry);
bool enqueue_ih_ring_entry(struct kfd_dev *kfd, const void *ih_ring_entry);
/* Power Management */ /* Power Management */
void kgd2kfd_suspend(struct kfd_dev *kfd); void kgd2kfd_suspend(struct kfd_dev *kfd);

View File

@ -741,7 +741,9 @@ int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info)
int i, j, rc = 0; int i, j, rc = 0;
int start; int start;
drm_modeset_lock_all(dev); if (__drm_modeset_lock_all(dev, !!oops_in_progress)) {
return -EBUSY;
}
if (!drm_fb_helper_is_bound(fb_helper)) { if (!drm_fb_helper_is_bound(fb_helper)) {
drm_modeset_unlock_all(dev); drm_modeset_unlock_all(dev);
return -EBUSY; return -EBUSY;
@ -915,7 +917,9 @@ int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
int ret = 0; int ret = 0;
int i; int i;
drm_modeset_lock_all(dev); if (__drm_modeset_lock_all(dev, !!oops_in_progress)) {
return -EBUSY;
}
if (!drm_fb_helper_is_bound(fb_helper)) { if (!drm_fb_helper_is_bound(fb_helper)) {
drm_modeset_unlock_all(dev); drm_modeset_unlock_all(dev);
return -EBUSY; return -EBUSY;

View File

@ -645,18 +645,6 @@ static int exynos_drm_init(void)
if (!is_exynos) if (!is_exynos)
return -ENODEV; return -ENODEV;
/*
* Register device object only in case of Exynos SoC.
*
* Below codes resolves temporarily infinite loop issue incurred
* by Exynos drm driver when using multi-platform kernel.
* So these codes will be replaced with more generic way later.
*/
if (!of_machine_is_compatible("samsung,exynos3") &&
!of_machine_is_compatible("samsung,exynos4") &&
!of_machine_is_compatible("samsung,exynos5"))
return -ENODEV;
exynos_drm_pdev = platform_device_register_simple("exynos-drm", -1, exynos_drm_pdev = platform_device_register_simple("exynos-drm", -1,
NULL, 0); NULL, 0);
if (IS_ERR(exynos_drm_pdev)) if (IS_ERR(exynos_drm_pdev))

View File

@ -1669,7 +1669,6 @@ static void hdmi_mode_apply(struct hdmi_context *hdata)
static void hdmiphy_conf_reset(struct hdmi_context *hdata) static void hdmiphy_conf_reset(struct hdmi_context *hdata)
{ {
u8 buffer[2];
u32 reg; u32 reg;
clk_disable_unprepare(hdata->res.sclk_hdmi); clk_disable_unprepare(hdata->res.sclk_hdmi);
@ -1677,11 +1676,8 @@ static void hdmiphy_conf_reset(struct hdmi_context *hdata)
clk_prepare_enable(hdata->res.sclk_hdmi); clk_prepare_enable(hdata->res.sclk_hdmi);
/* operation mode */ /* operation mode */
buffer[0] = 0x1f; hdmiphy_reg_writeb(hdata, HDMIPHY_MODE_SET_DONE,
buffer[1] = 0x00; HDMI_PHY_ENABLE_MODE_SET);
if (hdata->hdmiphy_port)
i2c_master_send(hdata->hdmiphy_port, buffer, 2);
if (hdata->type == HDMI_TYPE13) if (hdata->type == HDMI_TYPE13)
reg = HDMI_V13_PHY_RSTOUT; reg = HDMI_V13_PHY_RSTOUT;

View File

@ -1026,6 +1026,7 @@ static void mixer_win_disable(struct exynos_drm_manager *mgr, int zpos)
static void mixer_wait_for_vblank(struct exynos_drm_manager *mgr) static void mixer_wait_for_vblank(struct exynos_drm_manager *mgr)
{ {
struct mixer_context *mixer_ctx = mgr_to_mixer(mgr); struct mixer_context *mixer_ctx = mgr_to_mixer(mgr);
int err;
mutex_lock(&mixer_ctx->mixer_mutex); mutex_lock(&mixer_ctx->mixer_mutex);
if (!mixer_ctx->powered) { if (!mixer_ctx->powered) {
@ -1034,7 +1035,11 @@ static void mixer_wait_for_vblank(struct exynos_drm_manager *mgr)
} }
mutex_unlock(&mixer_ctx->mixer_mutex); mutex_unlock(&mixer_ctx->mixer_mutex);
drm_vblank_get(mgr->crtc->dev, mixer_ctx->pipe); err = drm_vblank_get(mgr->crtc->dev, mixer_ctx->pipe);
if (err < 0) {
DRM_DEBUG_KMS("failed to acquire vblank counter\n");
return;
}
atomic_set(&mixer_ctx->wait_vsync_event, 1); atomic_set(&mixer_ctx->wait_vsync_event, 1);
@ -1262,8 +1267,6 @@ static int mixer_bind(struct device *dev, struct device *manager, void *data)
return ret; return ret;
} }
pm_runtime_enable(dev);
return 0; return 0;
} }
@ -1272,8 +1275,6 @@ static void mixer_unbind(struct device *dev, struct device *master, void *data)
struct mixer_context *ctx = dev_get_drvdata(dev); struct mixer_context *ctx = dev_get_drvdata(dev);
mixer_mgr_remove(&ctx->manager); mixer_mgr_remove(&ctx->manager);
pm_runtime_disable(dev);
} }
static const struct component_ops mixer_component_ops = { static const struct component_ops mixer_component_ops = {

View File

@ -5155,7 +5155,7 @@ static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
if (!mutex_is_locked(mutex)) if (!mutex_is_locked(mutex))
return false; return false;
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES) #if defined(CONFIG_SMP) && !defined(CONFIG_DEBUG_MUTEXES)
return mutex->owner == task; return mutex->owner == task;
#else #else
/* Since UP may be pre-empted, we cannot assume that we own the lock */ /* Since UP may be pre-empted, we cannot assume that we own the lock */

View File

@ -292,6 +292,23 @@ void gen6_enable_rps_interrupts(struct drm_device *dev)
spin_unlock_irq(&dev_priv->irq_lock); spin_unlock_irq(&dev_priv->irq_lock);
} }
u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask)
{
/*
* SNB,IVB can while VLV,CHV may hard hang on looping batchbuffer
* if GEN6_PM_UP_EI_EXPIRED is masked.
*
* TODO: verify if this can be reproduced on VLV,CHV.
*/
if (INTEL_INFO(dev_priv)->gen <= 7 && !IS_HASWELL(dev_priv))
mask &= ~GEN6_PM_RP_UP_EI_EXPIRED;
if (INTEL_INFO(dev_priv)->gen >= 8)
mask &= ~GEN8_PMINTR_REDIRECT_TO_NON_DISP;
return mask;
}
void gen6_disable_rps_interrupts(struct drm_device *dev) void gen6_disable_rps_interrupts(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
@ -304,8 +321,7 @@ void gen6_disable_rps_interrupts(struct drm_device *dev)
spin_lock_irq(&dev_priv->irq_lock); spin_lock_irq(&dev_priv->irq_lock);
I915_WRITE(GEN6_PMINTRMSK, INTEL_INFO(dev_priv)->gen >= 8 ? I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0));
~GEN8_PMINTR_REDIRECT_TO_NON_DISP : ~0);
__gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events); __gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events);
I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) & I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) &

View File

@ -9815,7 +9815,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
if (obj->tiling_mode != work->old_fb_obj->tiling_mode) if (obj->tiling_mode != work->old_fb_obj->tiling_mode)
/* vlv: DISPLAY_FLIP fails to change tiling */ /* vlv: DISPLAY_FLIP fails to change tiling */
ring = NULL; ring = NULL;
} else if (IS_IVYBRIDGE(dev)) { } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
ring = &dev_priv->ring[BCS]; ring = &dev_priv->ring[BCS];
} else if (INTEL_INFO(dev)->gen >= 7) { } else if (INTEL_INFO(dev)->gen >= 7) {
ring = obj->ring; ring = obj->ring;

View File

@ -794,6 +794,7 @@ void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
void gen6_reset_rps_interrupts(struct drm_device *dev); void gen6_reset_rps_interrupts(struct drm_device *dev);
void gen6_enable_rps_interrupts(struct drm_device *dev); void gen6_enable_rps_interrupts(struct drm_device *dev);
void gen6_disable_rps_interrupts(struct drm_device *dev); void gen6_disable_rps_interrupts(struct drm_device *dev);
u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask);
void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv); void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv);
void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv); void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv);
static inline bool intel_irqs_enabled(struct drm_i915_private *dev_priv) static inline bool intel_irqs_enabled(struct drm_i915_private *dev_priv)

View File

@ -4363,16 +4363,7 @@ static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
mask |= dev_priv->pm_rps_events & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED); mask |= dev_priv->pm_rps_events & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED);
mask &= dev_priv->pm_rps_events; mask &= dev_priv->pm_rps_events;
/* IVB and SNB hard hangs on looping batchbuffer return gen6_sanitize_rps_pm_mask(dev_priv, ~mask);
* if GEN6_PM_UP_EI_EXPIRED is masked.
*/
if (INTEL_INFO(dev_priv->dev)->gen <= 7 && !IS_HASWELL(dev_priv->dev))
mask |= GEN6_PM_RP_UP_EI_EXPIRED;
if (IS_GEN8(dev_priv->dev))
mask |= GEN8_PMINTR_REDIRECT_TO_NON_DISP;
return ~mask;
} }
/* gen6_set_rps is called to update the frequency request, but should also be /* gen6_set_rps is called to update the frequency request, but should also be
@ -4441,7 +4432,8 @@ static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
return; return;
/* Mask turbo interrupt so that they will not come in between */ /* Mask turbo interrupt so that they will not come in between */
I915_WRITE(GEN6_PMINTRMSK, 0xffffffff); I915_WRITE(GEN6_PMINTRMSK,
gen6_sanitize_rps_pm_mask(dev_priv, ~0));
vlv_force_gfx_clock(dev_priv, true); vlv_force_gfx_clock(dev_priv, true);

View File

@ -6033,6 +6033,17 @@ void cik_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
radeon_ring_write(ring, 0); radeon_ring_write(ring, 0);
radeon_ring_write(ring, 1 << vm_id); radeon_ring_write(ring, 1 << vm_id);
/* wait for the invalidate to complete */
radeon_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
radeon_ring_write(ring, (WAIT_REG_MEM_OPERATION(0) | /* wait */
WAIT_REG_MEM_FUNCTION(0) | /* always */
WAIT_REG_MEM_ENGINE(0))); /* me */
radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
radeon_ring_write(ring, 0);
radeon_ring_write(ring, 0); /* ref */
radeon_ring_write(ring, 0); /* mask */
radeon_ring_write(ring, 0x20); /* poll interval */
/* compute doesn't have PFP */ /* compute doesn't have PFP */
if (usepfp) { if (usepfp) {
/* sync PFP to ME, otherwise we might get invalid PFP reads */ /* sync PFP to ME, otherwise we might get invalid PFP reads */

View File

@ -903,6 +903,9 @@ void cik_sdma_vm_pad_ib(struct radeon_ib *ib)
void cik_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring, void cik_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
unsigned vm_id, uint64_t pd_addr) unsigned vm_id, uint64_t pd_addr)
{ {
u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(0) |
SDMA_POLL_REG_MEM_EXTRA_FUNC(0)); /* always */
radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000)); radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
if (vm_id < 8) { if (vm_id < 8) {
radeon_ring_write(ring, (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm_id << 2)) >> 2); radeon_ring_write(ring, (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm_id << 2)) >> 2);
@ -943,5 +946,12 @@ void cik_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000)); radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2); radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
radeon_ring_write(ring, 1 << vm_id); radeon_ring_write(ring, 1 << vm_id);
radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits));
radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
radeon_ring_write(ring, 0);
radeon_ring_write(ring, 0); /* reference */
radeon_ring_write(ring, 0); /* mask */
radeon_ring_write(ring, (0xfff << 16) | 10); /* retry count, poll interval */
} }

View File

@ -2516,6 +2516,16 @@ void cayman_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
radeon_ring_write(ring, PACKET0(VM_INVALIDATE_REQUEST, 0)); radeon_ring_write(ring, PACKET0(VM_INVALIDATE_REQUEST, 0));
radeon_ring_write(ring, 1 << vm_id); radeon_ring_write(ring, 1 << vm_id);
/* wait for the invalidate to complete */
radeon_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
radeon_ring_write(ring, (WAIT_REG_MEM_FUNCTION(0) | /* always */
WAIT_REG_MEM_ENGINE(0))); /* me */
radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
radeon_ring_write(ring, 0);
radeon_ring_write(ring, 0); /* ref */
radeon_ring_write(ring, 0); /* mask */
radeon_ring_write(ring, 0x20); /* poll interval */
/* sync PFP to ME, otherwise we might get invalid PFP reads */ /* sync PFP to ME, otherwise we might get invalid PFP reads */
radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0)); radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
radeon_ring_write(ring, 0x0); radeon_ring_write(ring, 0x0);

View File

@ -463,5 +463,11 @@ void cayman_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0)); radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2)); radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2));
radeon_ring_write(ring, 1 << vm_id); radeon_ring_write(ring, 1 << vm_id);
/* wait for invalidate to complete */
radeon_ring_write(ring, DMA_SRBM_READ_PACKET);
radeon_ring_write(ring, (0xff << 20) | (VM_INVALIDATE_REQUEST >> 2));
radeon_ring_write(ring, 0); /* mask */
radeon_ring_write(ring, 0); /* value */
} }

View File

@ -1133,6 +1133,23 @@
#define PACKET3_MEM_SEMAPHORE 0x39 #define PACKET3_MEM_SEMAPHORE 0x39
#define PACKET3_MPEG_INDEX 0x3A #define PACKET3_MPEG_INDEX 0x3A
#define PACKET3_WAIT_REG_MEM 0x3C #define PACKET3_WAIT_REG_MEM 0x3C
#define WAIT_REG_MEM_FUNCTION(x) ((x) << 0)
/* 0 - always
* 1 - <
* 2 - <=
* 3 - ==
* 4 - !=
* 5 - >=
* 6 - >
*/
#define WAIT_REG_MEM_MEM_SPACE(x) ((x) << 4)
/* 0 - reg
* 1 - mem
*/
#define WAIT_REG_MEM_ENGINE(x) ((x) << 8)
/* 0 - me
* 1 - pfp
*/
#define PACKET3_MEM_WRITE 0x3D #define PACKET3_MEM_WRITE 0x3D
#define PACKET3_PFP_SYNC_ME 0x42 #define PACKET3_PFP_SYNC_ME 0x42
#define PACKET3_SURFACE_SYNC 0x43 #define PACKET3_SURFACE_SYNC 0x43
@ -1272,6 +1289,13 @@
(1 << 21) | \ (1 << 21) | \
(((n) & 0xFFFFF) << 0)) (((n) & 0xFFFFF) << 0))
#define DMA_SRBM_POLL_PACKET ((9 << 28) | \
(1 << 27) | \
(1 << 26))
#define DMA_SRBM_READ_PACKET ((9 << 28) | \
(1 << 27))
/* async DMA Packet types */ /* async DMA Packet types */
#define DMA_PACKET_WRITE 0x2 #define DMA_PACKET_WRITE 0x2
#define DMA_PACKET_COPY 0x3 #define DMA_PACKET_COPY 0x3

View File

@ -333,6 +333,20 @@ static struct radeon_asic_ring r300_gfx_ring = {
.set_wptr = &r100_gfx_set_wptr, .set_wptr = &r100_gfx_set_wptr,
}; };
static struct radeon_asic_ring rv515_gfx_ring = {
.ib_execute = &r100_ring_ib_execute,
.emit_fence = &r300_fence_ring_emit,
.emit_semaphore = &r100_semaphore_ring_emit,
.cs_parse = &r300_cs_parse,
.ring_start = &rv515_ring_start,
.ring_test = &r100_ring_test,
.ib_test = &r100_ib_test,
.is_lockup = &r100_gpu_is_lockup,
.get_rptr = &r100_gfx_get_rptr,
.get_wptr = &r100_gfx_get_wptr,
.set_wptr = &r100_gfx_set_wptr,
};
static struct radeon_asic r300_asic = { static struct radeon_asic r300_asic = {
.init = &r300_init, .init = &r300_init,
.fini = &r300_fini, .fini = &r300_fini,
@ -748,7 +762,7 @@ static struct radeon_asic rv515_asic = {
.set_page = &rv370_pcie_gart_set_page, .set_page = &rv370_pcie_gart_set_page,
}, },
.ring = { .ring = {
[RADEON_RING_TYPE_GFX_INDEX] = &r300_gfx_ring [RADEON_RING_TYPE_GFX_INDEX] = &rv515_gfx_ring
}, },
.irq = { .irq = {
.set = &rs600_irq_set, .set = &rs600_irq_set,
@ -814,7 +828,7 @@ static struct radeon_asic r520_asic = {
.set_page = &rv370_pcie_gart_set_page, .set_page = &rv370_pcie_gart_set_page,
}, },
.ring = { .ring = {
[RADEON_RING_TYPE_GFX_INDEX] = &r300_gfx_ring [RADEON_RING_TYPE_GFX_INDEX] = &rv515_gfx_ring
}, },
.irq = { .irq = {
.set = &rs600_irq_set, .set = &rs600_irq_set,

View File

@ -576,7 +576,7 @@ error_unreserve:
error_free: error_free:
drm_free_large(vm_bos); drm_free_large(vm_bos);
if (r) if (r && r != -ERESTARTSYS)
DRM_ERROR("Couldn't update BO_VA (%d)\n", r); DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
} }

View File

@ -1287,8 +1287,39 @@ dpm_failed:
return ret; return ret;
} }
struct radeon_dpm_quirk {
u32 chip_vendor;
u32 chip_device;
u32 subsys_vendor;
u32 subsys_device;
};
/* cards with dpm stability problems */
static struct radeon_dpm_quirk radeon_dpm_quirk_list[] = {
/* TURKS - https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1386534 */
{ PCI_VENDOR_ID_ATI, 0x6759, 0x1682, 0x3195 },
/* TURKS - https://bugzilla.kernel.org/show_bug.cgi?id=83731 */
{ PCI_VENDOR_ID_ATI, 0x6840, 0x1179, 0xfb81 },
{ 0, 0, 0, 0 },
};
int radeon_pm_init(struct radeon_device *rdev) int radeon_pm_init(struct radeon_device *rdev)
{ {
struct radeon_dpm_quirk *p = radeon_dpm_quirk_list;
bool disable_dpm = false;
/* Apply dpm quirks */
while (p && p->chip_device != 0) {
if (rdev->pdev->vendor == p->chip_vendor &&
rdev->pdev->device == p->chip_device &&
rdev->pdev->subsystem_vendor == p->subsys_vendor &&
rdev->pdev->subsystem_device == p->subsys_device) {
disable_dpm = true;
break;
}
++p;
}
/* enable dpm on rv6xx+ */ /* enable dpm on rv6xx+ */
switch (rdev->family) { switch (rdev->family) {
case CHIP_RV610: case CHIP_RV610:
@ -1344,6 +1375,8 @@ int radeon_pm_init(struct radeon_device *rdev)
(!(rdev->flags & RADEON_IS_IGP)) && (!(rdev->flags & RADEON_IS_IGP)) &&
(!rdev->smc_fw)) (!rdev->smc_fw))
rdev->pm.pm_method = PM_METHOD_PROFILE; rdev->pm.pm_method = PM_METHOD_PROFILE;
else if (disable_dpm && (radeon_dpm == -1))
rdev->pm.pm_method = PM_METHOD_PROFILE;
else if (radeon_dpm == 0) else if (radeon_dpm == 0)
rdev->pm.pm_method = PM_METHOD_PROFILE; rdev->pm.pm_method = PM_METHOD_PROFILE;
else else

View File

@ -5057,6 +5057,16 @@ void si_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
radeon_ring_write(ring, 0); radeon_ring_write(ring, 0);
radeon_ring_write(ring, 1 << vm_id); radeon_ring_write(ring, 1 << vm_id);
/* wait for the invalidate to complete */
radeon_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
radeon_ring_write(ring, (WAIT_REG_MEM_FUNCTION(0) | /* always */
WAIT_REG_MEM_ENGINE(0))); /* me */
radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
radeon_ring_write(ring, 0);
radeon_ring_write(ring, 0); /* ref */
radeon_ring_write(ring, 0); /* mask */
radeon_ring_write(ring, 0x20); /* poll interval */
/* sync PFP to ME, otherwise we might get invalid PFP reads */ /* sync PFP to ME, otherwise we might get invalid PFP reads */
radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0)); radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
radeon_ring_write(ring, 0x0); radeon_ring_write(ring, 0x0);

View File

@ -206,6 +206,14 @@ void si_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0)); radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2)); radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2));
radeon_ring_write(ring, 1 << vm_id); radeon_ring_write(ring, 1 << vm_id);
/* wait for invalidate to complete */
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_POLL_REG_MEM, 0, 0, 0, 0));
radeon_ring_write(ring, VM_INVALIDATE_REQUEST);
radeon_ring_write(ring, 0xff << 16); /* retry */
radeon_ring_write(ring, 1 << vm_id); /* mask */
radeon_ring_write(ring, 0); /* value */
radeon_ring_write(ring, (0 << 28) | 0x20); /* func(always) | poll interval */
} }
/** /**

View File

@ -2908,6 +2908,22 @@ static int si_init_smc_spll_table(struct radeon_device *rdev)
return ret; return ret;
} }
struct si_dpm_quirk {
u32 chip_vendor;
u32 chip_device;
u32 subsys_vendor;
u32 subsys_device;
u32 max_sclk;
u32 max_mclk;
};
/* cards with dpm stability problems */
static struct si_dpm_quirk si_dpm_quirk_list[] = {
/* PITCAIRN - https://bugs.freedesktop.org/show_bug.cgi?id=76490 */
{ PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 },
{ 0, 0, 0, 0 },
};
static void si_apply_state_adjust_rules(struct radeon_device *rdev, static void si_apply_state_adjust_rules(struct radeon_device *rdev,
struct radeon_ps *rps) struct radeon_ps *rps)
{ {
@ -2918,7 +2934,22 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
u32 mclk, sclk; u32 mclk, sclk;
u16 vddc, vddci; u16 vddc, vddci;
u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc; u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc;
u32 max_sclk = 0, max_mclk = 0;
int i; int i;
struct si_dpm_quirk *p = si_dpm_quirk_list;
/* Apply dpm quirks */
while (p && p->chip_device != 0) {
if (rdev->pdev->vendor == p->chip_vendor &&
rdev->pdev->device == p->chip_device &&
rdev->pdev->subsystem_vendor == p->subsys_vendor &&
rdev->pdev->subsystem_device == p->subsys_device) {
max_sclk = p->max_sclk;
max_mclk = p->max_mclk;
break;
}
++p;
}
if ((rdev->pm.dpm.new_active_crtc_count > 1) || if ((rdev->pm.dpm.new_active_crtc_count > 1) ||
ni_dpm_vblank_too_short(rdev)) ni_dpm_vblank_too_short(rdev))
@ -2972,6 +3003,14 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
if (ps->performance_levels[i].mclk > max_mclk_vddc) if (ps->performance_levels[i].mclk > max_mclk_vddc)
ps->performance_levels[i].mclk = max_mclk_vddc; ps->performance_levels[i].mclk = max_mclk_vddc;
} }
if (max_mclk) {
if (ps->performance_levels[i].mclk > max_mclk)
ps->performance_levels[i].mclk = max_mclk;
}
if (max_sclk) {
if (ps->performance_levels[i].sclk > max_sclk)
ps->performance_levels[i].sclk = max_sclk;
}
} }
/* XXX validate the min clocks required for display */ /* XXX validate the min clocks required for display */

View File

@ -1632,6 +1632,23 @@
#define PACKET3_MPEG_INDEX 0x3A #define PACKET3_MPEG_INDEX 0x3A
#define PACKET3_COPY_DW 0x3B #define PACKET3_COPY_DW 0x3B
#define PACKET3_WAIT_REG_MEM 0x3C #define PACKET3_WAIT_REG_MEM 0x3C
#define WAIT_REG_MEM_FUNCTION(x) ((x) << 0)
/* 0 - always
* 1 - <
* 2 - <=
* 3 - ==
* 4 - !=
* 5 - >=
* 6 - >
*/
#define WAIT_REG_MEM_MEM_SPACE(x) ((x) << 4)
/* 0 - reg
* 1 - mem
*/
#define WAIT_REG_MEM_ENGINE(x) ((x) << 8)
/* 0 - me
* 1 - pfp
*/
#define PACKET3_MEM_WRITE 0x3D #define PACKET3_MEM_WRITE 0x3D
#define PACKET3_COPY_DATA 0x40 #define PACKET3_COPY_DATA 0x40
#define PACKET3_CP_DMA 0x41 #define PACKET3_CP_DMA 0x41
@ -1835,6 +1852,7 @@
#define DMA_PACKET_TRAP 0x7 #define DMA_PACKET_TRAP 0x7
#define DMA_PACKET_SRBM_WRITE 0x9 #define DMA_PACKET_SRBM_WRITE 0x9
#define DMA_PACKET_CONSTANT_FILL 0xd #define DMA_PACKET_CONSTANT_FILL 0xd
#define DMA_PACKET_POLL_REG_MEM 0xe
#define DMA_PACKET_NOP 0xf #define DMA_PACKET_NOP 0xf
#define VCE_STATUS 0x20004 #define VCE_STATUS 0x20004