Merge branch 'drm-fixes-5.0' of git://people.freedesktop.org/~agd5f/linux into drm-fixes
A few fixes for 5.0: - Fix radeon crash on SI with VM passthrough - Fencing fix for shared buffers - Fix power hwmon reporting on APUs - Powerplay fix for APUs Signed-off-by: Dave Airlie <airlied@redhat.com> From: Alex Deucher <alexdeucher@gmail.com> Link: https://patchwork.freedesktop.org/patch/msgid/20190201043455.5988-1-alexander.deucher@amd.com
This commit is contained in:
commit
2072ce0363
|
@ -1686,7 +1686,8 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
|
|||
effective_mode &= ~S_IWUSR;
|
||||
|
||||
if ((adev->flags & AMD_IS_APU) &&
|
||||
(attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr ||
|
||||
(attr == &sensor_dev_attr_power1_average.dev_attr.attr ||
|
||||
attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr ||
|
||||
attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr||
|
||||
attr == &sensor_dev_attr_power1_cap.dev_attr.attr))
|
||||
return 0;
|
||||
|
|
|
@ -38,6 +38,7 @@
|
|||
#include "amdgpu_gem.h"
|
||||
#include <drm/amdgpu_drm.h>
|
||||
#include <linux/dma-buf.h>
|
||||
#include <linux/dma-fence-array.h>
|
||||
|
||||
/**
|
||||
* amdgpu_gem_prime_get_sg_table - &drm_driver.gem_prime_get_sg_table
|
||||
|
@ -187,6 +188,48 @@ error:
|
|||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
static int
|
||||
__reservation_object_make_exclusive(struct reservation_object *obj)
|
||||
{
|
||||
struct dma_fence **fences;
|
||||
unsigned int count;
|
||||
int r;
|
||||
|
||||
if (!reservation_object_get_list(obj)) /* no shared fences to convert */
|
||||
return 0;
|
||||
|
||||
r = reservation_object_get_fences_rcu(obj, NULL, &count, &fences);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
if (count == 0) {
|
||||
/* Now that was unexpected. */
|
||||
} else if (count == 1) {
|
||||
reservation_object_add_excl_fence(obj, fences[0]);
|
||||
dma_fence_put(fences[0]);
|
||||
kfree(fences);
|
||||
} else {
|
||||
struct dma_fence_array *array;
|
||||
|
||||
array = dma_fence_array_create(count, fences,
|
||||
dma_fence_context_alloc(1), 0,
|
||||
false);
|
||||
if (!array)
|
||||
goto err_fences_put;
|
||||
|
||||
reservation_object_add_excl_fence(obj, &array->base);
|
||||
dma_fence_put(&array->base);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_fences_put:
|
||||
while (count--)
|
||||
dma_fence_put(fences[count]);
|
||||
kfree(fences);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_gem_map_attach - &dma_buf_ops.attach implementation
|
||||
* @dma_buf: Shared DMA buffer
|
||||
|
@ -218,16 +261,16 @@ static int amdgpu_gem_map_attach(struct dma_buf *dma_buf,
|
|||
|
||||
if (attach->dev->driver != adev->dev->driver) {
|
||||
/*
|
||||
* Wait for all shared fences to complete before we switch to future
|
||||
* use of exclusive fence on this prime shared bo.
|
||||
* We only create shared fences for internal use, but importers
|
||||
* of the dmabuf rely on exclusive fences for implicitly
|
||||
* tracking write hazards. As any of the current fences may
|
||||
* correspond to a write, we need to convert all existing
|
||||
* fences on the reservation object into a single exclusive
|
||||
* fence.
|
||||
*/
|
||||
r = reservation_object_wait_timeout_rcu(bo->tbo.resv,
|
||||
true, false,
|
||||
MAX_SCHEDULE_TIMEOUT);
|
||||
if (unlikely(r < 0)) {
|
||||
DRM_DEBUG_PRIME("Fence wait failed: %li\n", r);
|
||||
r = __reservation_object_make_exclusive(bo->tbo.resv);
|
||||
if (r)
|
||||
goto error_unreserve;
|
||||
}
|
||||
}
|
||||
|
||||
/* pin buffer into GTT */
|
||||
|
|
|
@ -1033,6 +1033,7 @@ static int smu10_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr,
|
|||
break;
|
||||
case amd_pp_dpp_clock:
|
||||
pclk_vol_table = pinfo->vdd_dep_on_dppclk;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
|
|
@ -5676,7 +5676,7 @@ int ci_dpm_init(struct radeon_device *rdev)
|
|||
u16 data_offset, size;
|
||||
u8 frev, crev;
|
||||
struct ci_power_info *pi;
|
||||
enum pci_bus_speed speed_cap;
|
||||
enum pci_bus_speed speed_cap = PCI_SPEED_UNKNOWN;
|
||||
struct pci_dev *root = rdev->pdev->bus->self;
|
||||
int ret;
|
||||
|
||||
|
@ -5685,7 +5685,8 @@ int ci_dpm_init(struct radeon_device *rdev)
|
|||
return -ENOMEM;
|
||||
rdev->pm.dpm.priv = pi;
|
||||
|
||||
speed_cap = pcie_get_speed_cap(root);
|
||||
if (!pci_is_root_bus(rdev->pdev->bus))
|
||||
speed_cap = pcie_get_speed_cap(root);
|
||||
if (speed_cap == PCI_SPEED_UNKNOWN) {
|
||||
pi->sys_pcie_mask = 0;
|
||||
} else {
|
||||
|
|
|
@ -6899,7 +6899,7 @@ int si_dpm_init(struct radeon_device *rdev)
|
|||
struct ni_power_info *ni_pi;
|
||||
struct si_power_info *si_pi;
|
||||
struct atom_clock_dividers dividers;
|
||||
enum pci_bus_speed speed_cap;
|
||||
enum pci_bus_speed speed_cap = PCI_SPEED_UNKNOWN;
|
||||
struct pci_dev *root = rdev->pdev->bus->self;
|
||||
int ret;
|
||||
|
||||
|
@ -6911,7 +6911,8 @@ int si_dpm_init(struct radeon_device *rdev)
|
|||
eg_pi = &ni_pi->eg;
|
||||
pi = &eg_pi->rv7xx;
|
||||
|
||||
speed_cap = pcie_get_speed_cap(root);
|
||||
if (!pci_is_root_bus(rdev->pdev->bus))
|
||||
speed_cap = pcie_get_speed_cap(root);
|
||||
if (speed_cap == PCI_SPEED_UNKNOWN) {
|
||||
si_pi->sys_pcie_mask = 0;
|
||||
} else {
|
||||
|
|
Loading…
Reference in New Issue