i915, gvt, nouveau, udl and etnaviv fixes
-----BEGIN PGP SIGNATURE----- iQIcBAABAgAGBQJY7ulCAAoJEAx081l5xIa+vZ4P+wSZ41DGfCYO/k5Rg+5+QHW6 k7rPj5m0G/lEKJIb23+3p4SktcoQZpaiic6EtTRDeAStseQodaAFypmXs4uyt3Gs 5/RQelGtJD7xHHq4hYbLNaINBeazE3XErZ1AOC1HRxFpq80qJTeO8AbJez0eF8WK gBi6A6pSzRIHKsqRE7wPertRm1qpdjOB0dl045ZmRD75AP11+OU1oHjcRv0c+cld vWfItHpp/sDgcXNGJU3UVbjO6VVfkQjIfPxFNIp/prPCr82S+VoS5VEYe7kz1BY4 Nt3tFzfH50vt7nLr/4V0zKyPYnWwe4Y7ATo5WVCY1FFgwuZvvt7jsC5Q10wNkUMU OjKf7R5hnMkinOuVBmR/Wdgt1aOtwtUD72oF9YHGIVTsGegkcxHxwyhIxNQ538Bx tMo5rB4oyp74ov9petc8HhE0jAOtUldxzqpS18ogFgdD3+BDSK2Tm46g3vDP1qdk I8LFA+4Xwx3EseZhWadtTz5RjeWT9MIUqhnsBWFAWiBPJmm0VgS60VouF1p5LZbC FtoOoGvhnuClmpo4DV/55dzQqbo2Xct6T9no6TSJimN9xN/4T4M0afr0gN5qea+Z J93zrTy3GkmdBscZOWbEhYSFc4aFX9prDUPSootqNHA7xXywmFNX41y6JBGCfir3 tU7JYihscCK6rW/+RCwz =A/S/ -----END PGP SIGNATURE----- Merge tag 'drm-fixes-for-v4.11-rc7' of git://people.freedesktop.org/~airlied/linux Pull drm fixes from Dave Airlie: "i915, gvt, nouveau, udl and etnaviv fixes. I was away the end of last week, so some of these would have been in rc6, and it's Easter from tomorrow, so I decided I better dequeue what I have now. The nouveau changes, just add a hw enable for GP107 display (like a pci id addition really), and fix a couple of regressions. i915 has some more gvt fixes, along with a few run of the mill ones, the rcu one seems like a few people have hit it. Otherwise a small udl and small etnaviv fix" * tag 'drm-fixes-for-v4.11-rc7' of git://people.freedesktop.org/~airlied/linux: (22 commits) drm/etnaviv: fix missing unlock on error in etnaviv_gpu_submit() drm/udl: Fix unaligned memory access in udl_render_hline drm/i915: Don't call synchronize_rcu_expedited under struct_mutex drm/i915: Suspend GuC prior to GPU Reset during GEM suspend drm/nouveau: initial support (display-only) for GP107 drm/nouveau/kms/nv50: fix double dma_fence_put() when destroying plane state drm/nouveau/kms/nv50: fix setting of HeadSetRasterVertBlankDmi method drm/nouveau/mmu/nv4a: use nv04 mmu rather than the nv44 one drm/nouveau/mpeg: mthd returns true on success now drm/i915/gvt: set the correct default value of CTX STATUS PTR drm/i915/gvt: Fix firmware loading interface for GVT-g golden HW state drm/i915: Use a dummy timeline name for a signaled fence drm/i915: Ironlake do_idle_maps w/a may be called w/o struct_mutex drm/i915/gvt: remove the redundant info NULL check drm/i915/gvt: adjust mem size for low resolution type drm/i915: Avoid lock dropping between rescheduling drm/i915/gvt: exclude cfg space from failsafe mode drm/i915/gvt: Activate/de-activate vGPU in mdev ops. drm/i915/execlists: Wrap tail pointer after reset tweaking drm/i915/perf: remove user triggerable warn ...
This commit is contained in:
commit
ee921c762c
|
@ -1317,7 +1317,7 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
|
||||||
if (!fence) {
|
if (!fence) {
|
||||||
event_free(gpu, event);
|
event_free(gpu, event);
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
goto out_pm_put;
|
goto out_unlock;
|
||||||
}
|
}
|
||||||
|
|
||||||
gpu->event[event].fence = fence;
|
gpu->event[event].fence = fence;
|
||||||
|
@ -1357,6 +1357,7 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
|
||||||
hangcheck_timer_reset(gpu);
|
hangcheck_timer_reset(gpu);
|
||||||
ret = 0;
|
ret = 0;
|
||||||
|
|
||||||
|
out_unlock:
|
||||||
mutex_unlock(&gpu->lock);
|
mutex_unlock(&gpu->lock);
|
||||||
|
|
||||||
out_pm_put:
|
out_pm_put:
|
||||||
|
|
|
@ -285,9 +285,6 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (vgpu->failsafe)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
if (WARN_ON(bytes > 4))
|
if (WARN_ON(bytes > 4))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
|
|
@ -775,7 +775,8 @@ static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id)
|
||||||
_EL_OFFSET_STATUS_PTR);
|
_EL_OFFSET_STATUS_PTR);
|
||||||
|
|
||||||
ctx_status_ptr.dw = vgpu_vreg(vgpu, ctx_status_ptr_reg);
|
ctx_status_ptr.dw = vgpu_vreg(vgpu, ctx_status_ptr_reg);
|
||||||
ctx_status_ptr.read_ptr = ctx_status_ptr.write_ptr = 0x7;
|
ctx_status_ptr.read_ptr = 0;
|
||||||
|
ctx_status_ptr.write_ptr = 0x7;
|
||||||
vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw;
|
vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -75,11 +75,11 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt)
|
||||||
struct gvt_firmware_header *h;
|
struct gvt_firmware_header *h;
|
||||||
void *firmware;
|
void *firmware;
|
||||||
void *p;
|
void *p;
|
||||||
unsigned long size;
|
unsigned long size, crc32_start;
|
||||||
int i;
|
int i;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
size = sizeof(*h) + info->mmio_size + info->cfg_space_size - 1;
|
size = sizeof(*h) + info->mmio_size + info->cfg_space_size;
|
||||||
firmware = vzalloc(size);
|
firmware = vzalloc(size);
|
||||||
if (!firmware)
|
if (!firmware)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
@ -112,6 +112,9 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt)
|
||||||
|
|
||||||
memcpy(gvt->firmware.mmio, p, info->mmio_size);
|
memcpy(gvt->firmware.mmio, p, info->mmio_size);
|
||||||
|
|
||||||
|
crc32_start = offsetof(struct gvt_firmware_header, crc32) + 4;
|
||||||
|
h->crc32 = crc32_le(0, firmware + crc32_start, size - crc32_start);
|
||||||
|
|
||||||
firmware_attr.size = size;
|
firmware_attr.size = size;
|
||||||
firmware_attr.private = firmware;
|
firmware_attr.private = firmware;
|
||||||
|
|
||||||
|
@ -234,7 +237,7 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt)
|
||||||
|
|
||||||
firmware->mmio = mem;
|
firmware->mmio = mem;
|
||||||
|
|
||||||
sprintf(path, "%s/vid_0x%04x_did_0x%04x_rid_0x%04x.golden_hw_state",
|
sprintf(path, "%s/vid_0x%04x_did_0x%04x_rid_0x%02x.golden_hw_state",
|
||||||
GVT_FIRMWARE_PATH, pdev->vendor, pdev->device,
|
GVT_FIRMWARE_PATH, pdev->vendor, pdev->device,
|
||||||
pdev->revision);
|
pdev->revision);
|
||||||
|
|
||||||
|
|
|
@ -52,6 +52,8 @@ static const struct intel_gvt_ops intel_gvt_ops = {
|
||||||
.vgpu_create = intel_gvt_create_vgpu,
|
.vgpu_create = intel_gvt_create_vgpu,
|
||||||
.vgpu_destroy = intel_gvt_destroy_vgpu,
|
.vgpu_destroy = intel_gvt_destroy_vgpu,
|
||||||
.vgpu_reset = intel_gvt_reset_vgpu,
|
.vgpu_reset = intel_gvt_reset_vgpu,
|
||||||
|
.vgpu_activate = intel_gvt_activate_vgpu,
|
||||||
|
.vgpu_deactivate = intel_gvt_deactivate_vgpu,
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -382,7 +382,8 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu);
|
||||||
void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
|
void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
|
||||||
unsigned int engine_mask);
|
unsigned int engine_mask);
|
||||||
void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu);
|
void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu);
|
||||||
|
void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu);
|
||||||
|
void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu);
|
||||||
|
|
||||||
/* validating GM functions */
|
/* validating GM functions */
|
||||||
#define vgpu_gmadr_is_aperture(vgpu, gmadr) \
|
#define vgpu_gmadr_is_aperture(vgpu, gmadr) \
|
||||||
|
@ -449,6 +450,8 @@ struct intel_gvt_ops {
|
||||||
struct intel_vgpu_type *);
|
struct intel_vgpu_type *);
|
||||||
void (*vgpu_destroy)(struct intel_vgpu *);
|
void (*vgpu_destroy)(struct intel_vgpu *);
|
||||||
void (*vgpu_reset)(struct intel_vgpu *);
|
void (*vgpu_reset)(struct intel_vgpu *);
|
||||||
|
void (*vgpu_activate)(struct intel_vgpu *);
|
||||||
|
void (*vgpu_deactivate)(struct intel_vgpu *);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -544,6 +544,8 @@ static int intel_vgpu_open(struct mdev_device *mdev)
|
||||||
if (ret)
|
if (ret)
|
||||||
goto undo_group;
|
goto undo_group;
|
||||||
|
|
||||||
|
intel_gvt_ops->vgpu_activate(vgpu);
|
||||||
|
|
||||||
atomic_set(&vgpu->vdev.released, 0);
|
atomic_set(&vgpu->vdev.released, 0);
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
@ -569,6 +571,8 @@ static void __intel_vgpu_release(struct intel_vgpu *vgpu)
|
||||||
if (atomic_cmpxchg(&vgpu->vdev.released, 0, 1))
|
if (atomic_cmpxchg(&vgpu->vdev.released, 0, 1))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
intel_gvt_ops->vgpu_deactivate(vgpu);
|
||||||
|
|
||||||
ret = vfio_unregister_notifier(mdev_dev(vgpu->vdev.mdev), VFIO_IOMMU_NOTIFY,
|
ret = vfio_unregister_notifier(mdev_dev(vgpu->vdev.mdev), VFIO_IOMMU_NOTIFY,
|
||||||
&vgpu->vdev.iommu_notifier);
|
&vgpu->vdev.iommu_notifier);
|
||||||
WARN(ret, "vfio_unregister_notifier for iommu failed: %d\n", ret);
|
WARN(ret, "vfio_unregister_notifier for iommu failed: %d\n", ret);
|
||||||
|
@ -1340,13 +1344,6 @@ static int kvmgt_guest_init(struct mdev_device *mdev)
|
||||||
|
|
||||||
static bool kvmgt_guest_exit(struct kvmgt_guest_info *info)
|
static bool kvmgt_guest_exit(struct kvmgt_guest_info *info)
|
||||||
{
|
{
|
||||||
struct intel_vgpu *vgpu = info->vgpu;
|
|
||||||
|
|
||||||
if (!info) {
|
|
||||||
gvt_vgpu_err("kvmgt_guest_info invalid\n");
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
kvm_page_track_unregister_notifier(info->kvm, &info->track_node);
|
kvm_page_track_unregister_notifier(info->kvm, &info->track_node);
|
||||||
kvm_put_kvm(info->kvm);
|
kvm_put_kvm(info->kvm);
|
||||||
kvmgt_protect_table_destroy(info);
|
kvmgt_protect_table_destroy(info);
|
||||||
|
|
|
@ -72,7 +72,7 @@ static struct {
|
||||||
char *name;
|
char *name;
|
||||||
} vgpu_types[] = {
|
} vgpu_types[] = {
|
||||||
/* Fixed vGPU type table */
|
/* Fixed vGPU type table */
|
||||||
{ MB_TO_BYTES(64), MB_TO_BYTES(512), 4, GVT_EDID_1024_768, "8" },
|
{ MB_TO_BYTES(64), MB_TO_BYTES(384), 4, GVT_EDID_1024_768, "8" },
|
||||||
{ MB_TO_BYTES(128), MB_TO_BYTES(512), 4, GVT_EDID_1920_1200, "4" },
|
{ MB_TO_BYTES(128), MB_TO_BYTES(512), 4, GVT_EDID_1920_1200, "4" },
|
||||||
{ MB_TO_BYTES(256), MB_TO_BYTES(1024), 4, GVT_EDID_1920_1200, "2" },
|
{ MB_TO_BYTES(256), MB_TO_BYTES(1024), 4, GVT_EDID_1920_1200, "2" },
|
||||||
{ MB_TO_BYTES(512), MB_TO_BYTES(2048), 4, GVT_EDID_1920_1200, "1" },
|
{ MB_TO_BYTES(512), MB_TO_BYTES(2048), 4, GVT_EDID_1920_1200, "1" },
|
||||||
|
@ -178,6 +178,47 @@ static void intel_gvt_update_vgpu_types(struct intel_gvt *gvt)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* intel_gvt_active_vgpu - activate a virtual GPU
|
||||||
|
* @vgpu: virtual GPU
|
||||||
|
*
|
||||||
|
* This function is called when user wants to activate a virtual GPU.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu)
|
||||||
|
{
|
||||||
|
mutex_lock(&vgpu->gvt->lock);
|
||||||
|
vgpu->active = true;
|
||||||
|
mutex_unlock(&vgpu->gvt->lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* intel_gvt_deactive_vgpu - deactivate a virtual GPU
|
||||||
|
* @vgpu: virtual GPU
|
||||||
|
*
|
||||||
|
* This function is called when user wants to deactivate a virtual GPU.
|
||||||
|
* All virtual GPU runtime information will be destroyed.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu)
|
||||||
|
{
|
||||||
|
struct intel_gvt *gvt = vgpu->gvt;
|
||||||
|
|
||||||
|
mutex_lock(&gvt->lock);
|
||||||
|
|
||||||
|
vgpu->active = false;
|
||||||
|
|
||||||
|
if (atomic_read(&vgpu->running_workload_num)) {
|
||||||
|
mutex_unlock(&gvt->lock);
|
||||||
|
intel_gvt_wait_vgpu_idle(vgpu);
|
||||||
|
mutex_lock(&gvt->lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
intel_vgpu_stop_schedule(vgpu);
|
||||||
|
|
||||||
|
mutex_unlock(&gvt->lock);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* intel_gvt_destroy_vgpu - destroy a virtual GPU
|
* intel_gvt_destroy_vgpu - destroy a virtual GPU
|
||||||
* @vgpu: virtual GPU
|
* @vgpu: virtual GPU
|
||||||
|
@ -191,16 +232,9 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
|
||||||
|
|
||||||
mutex_lock(&gvt->lock);
|
mutex_lock(&gvt->lock);
|
||||||
|
|
||||||
vgpu->active = false;
|
WARN(vgpu->active, "vGPU is still active!\n");
|
||||||
|
|
||||||
idr_remove(&gvt->vgpu_idr, vgpu->id);
|
idr_remove(&gvt->vgpu_idr, vgpu->id);
|
||||||
|
|
||||||
if (atomic_read(&vgpu->running_workload_num)) {
|
|
||||||
mutex_unlock(&gvt->lock);
|
|
||||||
intel_gvt_wait_vgpu_idle(vgpu);
|
|
||||||
mutex_lock(&gvt->lock);
|
|
||||||
}
|
|
||||||
|
|
||||||
intel_vgpu_stop_schedule(vgpu);
|
|
||||||
intel_vgpu_clean_sched_policy(vgpu);
|
intel_vgpu_clean_sched_policy(vgpu);
|
||||||
intel_vgpu_clean_gvt_context(vgpu);
|
intel_vgpu_clean_gvt_context(vgpu);
|
||||||
intel_vgpu_clean_execlist(vgpu);
|
intel_vgpu_clean_execlist(vgpu);
|
||||||
|
@ -277,7 +311,6 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out_clean_shadow_ctx;
|
goto out_clean_shadow_ctx;
|
||||||
|
|
||||||
vgpu->active = true;
|
|
||||||
mutex_unlock(&gvt->lock);
|
mutex_unlock(&gvt->lock);
|
||||||
|
|
||||||
return vgpu;
|
return vgpu;
|
||||||
|
|
|
@ -1434,8 +1434,6 @@ static int i915_drm_suspend(struct drm_device *dev)
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
intel_guc_suspend(dev_priv);
|
|
||||||
|
|
||||||
intel_display_suspend(dev);
|
intel_display_suspend(dev);
|
||||||
|
|
||||||
intel_dp_mst_suspend(dev);
|
intel_dp_mst_suspend(dev);
|
||||||
|
|
|
@ -806,6 +806,7 @@ struct intel_csr {
|
||||||
func(has_resource_streamer); \
|
func(has_resource_streamer); \
|
||||||
func(has_runtime_pm); \
|
func(has_runtime_pm); \
|
||||||
func(has_snoop); \
|
func(has_snoop); \
|
||||||
|
func(unfenced_needs_alignment); \
|
||||||
func(cursor_needs_physical); \
|
func(cursor_needs_physical); \
|
||||||
func(hws_needs_physical); \
|
func(hws_needs_physical); \
|
||||||
func(overlay_needs_physical); \
|
func(overlay_needs_physical); \
|
||||||
|
|
|
@ -4348,6 +4348,8 @@ int i915_gem_suspend(struct drm_i915_private *dev_priv)
|
||||||
i915_gem_context_lost(dev_priv);
|
i915_gem_context_lost(dev_priv);
|
||||||
mutex_unlock(&dev->struct_mutex);
|
mutex_unlock(&dev->struct_mutex);
|
||||||
|
|
||||||
|
intel_guc_suspend(dev_priv);
|
||||||
|
|
||||||
cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
|
cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
|
||||||
cancel_delayed_work_sync(&dev_priv->gt.retire_work);
|
cancel_delayed_work_sync(&dev_priv->gt.retire_work);
|
||||||
|
|
||||||
|
|
|
@ -888,6 +888,7 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *engine,
|
||||||
struct list_head ordered_vmas;
|
struct list_head ordered_vmas;
|
||||||
struct list_head pinned_vmas;
|
struct list_head pinned_vmas;
|
||||||
bool has_fenced_gpu_access = INTEL_GEN(engine->i915) < 4;
|
bool has_fenced_gpu_access = INTEL_GEN(engine->i915) < 4;
|
||||||
|
bool needs_unfenced_map = INTEL_INFO(engine->i915)->unfenced_needs_alignment;
|
||||||
int retry;
|
int retry;
|
||||||
|
|
||||||
vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm;
|
vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm;
|
||||||
|
@ -908,7 +909,8 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *engine,
|
||||||
if (!has_fenced_gpu_access)
|
if (!has_fenced_gpu_access)
|
||||||
entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE;
|
entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE;
|
||||||
need_fence =
|
need_fence =
|
||||||
entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
|
(entry->flags & EXEC_OBJECT_NEEDS_FENCE ||
|
||||||
|
needs_unfenced_map) &&
|
||||||
i915_gem_object_is_tiled(obj);
|
i915_gem_object_is_tiled(obj);
|
||||||
need_mappable = need_fence || need_reloc_mappable(vma);
|
need_mappable = need_fence || need_reloc_mappable(vma);
|
||||||
|
|
||||||
|
|
|
@ -2704,7 +2704,7 @@ void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
|
||||||
struct i915_ggtt *ggtt = &dev_priv->ggtt;
|
struct i915_ggtt *ggtt = &dev_priv->ggtt;
|
||||||
|
|
||||||
if (unlikely(ggtt->do_idle_maps)) {
|
if (unlikely(ggtt->do_idle_maps)) {
|
||||||
if (i915_gem_wait_for_idle(dev_priv, I915_WAIT_LOCKED)) {
|
if (i915_gem_wait_for_idle(dev_priv, 0)) {
|
||||||
DRM_ERROR("Failed to wait for idle; VT'd may hang.\n");
|
DRM_ERROR("Failed to wait for idle; VT'd may hang.\n");
|
||||||
/* Wait a bit, in hopes it avoids the hang */
|
/* Wait a bit, in hopes it avoids the hang */
|
||||||
udelay(10);
|
udelay(10);
|
||||||
|
|
|
@ -37,6 +37,17 @@ static const char *i915_fence_get_driver_name(struct dma_fence *fence)
|
||||||
|
|
||||||
static const char *i915_fence_get_timeline_name(struct dma_fence *fence)
|
static const char *i915_fence_get_timeline_name(struct dma_fence *fence)
|
||||||
{
|
{
|
||||||
|
/* The timeline struct (as part of the ppgtt underneath a context)
|
||||||
|
* may be freed when the request is no longer in use by the GPU.
|
||||||
|
* We could extend the life of a context to beyond that of all
|
||||||
|
* fences, possibly keeping the hw resource around indefinitely,
|
||||||
|
* or we just give them a false name. Since
|
||||||
|
* dma_fence_ops.get_timeline_name is a debug feature, the occasional
|
||||||
|
* lie seems justifiable.
|
||||||
|
*/
|
||||||
|
if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
|
||||||
|
return "signaled";
|
||||||
|
|
||||||
return to_request(fence)->timeline->common->name;
|
return to_request(fence)->timeline->common->name;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -53,6 +53,17 @@ static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
|
||||||
BUG();
|
BUG();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void i915_gem_shrinker_unlock(struct drm_device *dev, bool unlock)
|
||||||
|
{
|
||||||
|
if (!unlock)
|
||||||
|
return;
|
||||||
|
|
||||||
|
mutex_unlock(&dev->struct_mutex);
|
||||||
|
|
||||||
|
/* expedite the RCU grace period to free some request slabs */
|
||||||
|
synchronize_rcu_expedited();
|
||||||
|
}
|
||||||
|
|
||||||
static bool any_vma_pinned(struct drm_i915_gem_object *obj)
|
static bool any_vma_pinned(struct drm_i915_gem_object *obj)
|
||||||
{
|
{
|
||||||
struct i915_vma *vma;
|
struct i915_vma *vma;
|
||||||
|
@ -232,11 +243,8 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
|
||||||
intel_runtime_pm_put(dev_priv);
|
intel_runtime_pm_put(dev_priv);
|
||||||
|
|
||||||
i915_gem_retire_requests(dev_priv);
|
i915_gem_retire_requests(dev_priv);
|
||||||
if (unlock)
|
|
||||||
mutex_unlock(&dev_priv->drm.struct_mutex);
|
|
||||||
|
|
||||||
/* expedite the RCU grace period to free some request slabs */
|
i915_gem_shrinker_unlock(&dev_priv->drm, unlock);
|
||||||
synchronize_rcu_expedited();
|
|
||||||
|
|
||||||
return count;
|
return count;
|
||||||
}
|
}
|
||||||
|
@ -293,8 +301,7 @@ i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
|
||||||
count += obj->base.size >> PAGE_SHIFT;
|
count += obj->base.size >> PAGE_SHIFT;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (unlock)
|
i915_gem_shrinker_unlock(dev, unlock);
|
||||||
mutex_unlock(&dev->struct_mutex);
|
|
||||||
|
|
||||||
return count;
|
return count;
|
||||||
}
|
}
|
||||||
|
@ -321,8 +328,8 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
|
||||||
sc->nr_to_scan - freed,
|
sc->nr_to_scan - freed,
|
||||||
I915_SHRINK_BOUND |
|
I915_SHRINK_BOUND |
|
||||||
I915_SHRINK_UNBOUND);
|
I915_SHRINK_UNBOUND);
|
||||||
if (unlock)
|
|
||||||
mutex_unlock(&dev->struct_mutex);
|
i915_gem_shrinker_unlock(dev, unlock);
|
||||||
|
|
||||||
return freed;
|
return freed;
|
||||||
}
|
}
|
||||||
|
@ -364,8 +371,7 @@ i915_gem_shrinker_unlock_uninterruptible(struct drm_i915_private *dev_priv,
|
||||||
struct shrinker_lock_uninterruptible *slu)
|
struct shrinker_lock_uninterruptible *slu)
|
||||||
{
|
{
|
||||||
dev_priv->mm.interruptible = slu->was_interruptible;
|
dev_priv->mm.interruptible = slu->was_interruptible;
|
||||||
if (slu->unlock)
|
i915_gem_shrinker_unlock(&dev_priv->drm, slu->unlock);
|
||||||
mutex_unlock(&dev_priv->drm.struct_mutex);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
|
|
|
@ -60,6 +60,7 @@
|
||||||
.has_overlay = 1, .overlay_needs_physical = 1, \
|
.has_overlay = 1, .overlay_needs_physical = 1, \
|
||||||
.has_gmch_display = 1, \
|
.has_gmch_display = 1, \
|
||||||
.hws_needs_physical = 1, \
|
.hws_needs_physical = 1, \
|
||||||
|
.unfenced_needs_alignment = 1, \
|
||||||
.ring_mask = RENDER_RING, \
|
.ring_mask = RENDER_RING, \
|
||||||
GEN_DEFAULT_PIPEOFFSETS, \
|
GEN_DEFAULT_PIPEOFFSETS, \
|
||||||
CURSOR_OFFSETS
|
CURSOR_OFFSETS
|
||||||
|
@ -101,6 +102,7 @@ static const struct intel_device_info intel_i915g_info = {
|
||||||
.platform = INTEL_I915G, .cursor_needs_physical = 1,
|
.platform = INTEL_I915G, .cursor_needs_physical = 1,
|
||||||
.has_overlay = 1, .overlay_needs_physical = 1,
|
.has_overlay = 1, .overlay_needs_physical = 1,
|
||||||
.hws_needs_physical = 1,
|
.hws_needs_physical = 1,
|
||||||
|
.unfenced_needs_alignment = 1,
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct intel_device_info intel_i915gm_info = {
|
static const struct intel_device_info intel_i915gm_info = {
|
||||||
|
@ -112,6 +114,7 @@ static const struct intel_device_info intel_i915gm_info = {
|
||||||
.supports_tv = 1,
|
.supports_tv = 1,
|
||||||
.has_fbc = 1,
|
.has_fbc = 1,
|
||||||
.hws_needs_physical = 1,
|
.hws_needs_physical = 1,
|
||||||
|
.unfenced_needs_alignment = 1,
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct intel_device_info intel_i945g_info = {
|
static const struct intel_device_info intel_i945g_info = {
|
||||||
|
@ -120,6 +123,7 @@ static const struct intel_device_info intel_i945g_info = {
|
||||||
.has_hotplug = 1, .cursor_needs_physical = 1,
|
.has_hotplug = 1, .cursor_needs_physical = 1,
|
||||||
.has_overlay = 1, .overlay_needs_physical = 1,
|
.has_overlay = 1, .overlay_needs_physical = 1,
|
||||||
.hws_needs_physical = 1,
|
.hws_needs_physical = 1,
|
||||||
|
.unfenced_needs_alignment = 1,
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct intel_device_info intel_i945gm_info = {
|
static const struct intel_device_info intel_i945gm_info = {
|
||||||
|
@ -130,6 +134,7 @@ static const struct intel_device_info intel_i945gm_info = {
|
||||||
.supports_tv = 1,
|
.supports_tv = 1,
|
||||||
.has_fbc = 1,
|
.has_fbc = 1,
|
||||||
.hws_needs_physical = 1,
|
.hws_needs_physical = 1,
|
||||||
|
.unfenced_needs_alignment = 1,
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct intel_device_info intel_g33_info = {
|
static const struct intel_device_info intel_g33_info = {
|
||||||
|
|
|
@ -1705,7 +1705,7 @@ i915_perf_open_ioctl_locked(struct drm_i915_private *dev_priv,
|
||||||
*/
|
*/
|
||||||
if (WARN_ON(stream->sample_flags != props->sample_flags)) {
|
if (WARN_ON(stream->sample_flags != props->sample_flags)) {
|
||||||
ret = -ENODEV;
|
ret = -ENODEV;
|
||||||
goto err_alloc;
|
goto err_flags;
|
||||||
}
|
}
|
||||||
|
|
||||||
list_add(&stream->link, &dev_priv->perf.streams);
|
list_add(&stream->link, &dev_priv->perf.streams);
|
||||||
|
@ -1728,6 +1728,7 @@ i915_perf_open_ioctl_locked(struct drm_i915_private *dev_priv,
|
||||||
|
|
||||||
err_open:
|
err_open:
|
||||||
list_del(&stream->link);
|
list_del(&stream->link);
|
||||||
|
err_flags:
|
||||||
if (stream->ops->destroy)
|
if (stream->ops->destroy)
|
||||||
stream->ops->destroy(stream);
|
stream->ops->destroy(stream);
|
||||||
err_alloc:
|
err_alloc:
|
||||||
|
@ -1793,6 +1794,11 @@ static int read_properties_unlocked(struct drm_i915_private *dev_priv,
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
if (id == 0 || id >= DRM_I915_PERF_PROP_MAX) {
|
||||||
|
DRM_DEBUG("Unknown i915 perf property ID\n");
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
switch ((enum drm_i915_perf_property_id)id) {
|
switch ((enum drm_i915_perf_property_id)id) {
|
||||||
case DRM_I915_PERF_PROP_CTX_HANDLE:
|
case DRM_I915_PERF_PROP_CTX_HANDLE:
|
||||||
props->single_context = 1;
|
props->single_context = 1;
|
||||||
|
@ -1862,9 +1868,8 @@ static int read_properties_unlocked(struct drm_i915_private *dev_priv,
|
||||||
props->oa_periodic = true;
|
props->oa_periodic = true;
|
||||||
props->oa_period_exponent = value;
|
props->oa_period_exponent = value;
|
||||||
break;
|
break;
|
||||||
default:
|
case DRM_I915_PERF_PROP_MAX:
|
||||||
MISSING_CASE(id);
|
MISSING_CASE(id);
|
||||||
DRM_DEBUG("Unknown i915 perf property ID\n");
|
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -670,15 +670,14 @@ static void execlists_submit_request(struct drm_i915_gem_request *request)
|
||||||
static struct intel_engine_cs *
|
static struct intel_engine_cs *
|
||||||
pt_lock_engine(struct i915_priotree *pt, struct intel_engine_cs *locked)
|
pt_lock_engine(struct i915_priotree *pt, struct intel_engine_cs *locked)
|
||||||
{
|
{
|
||||||
struct intel_engine_cs *engine;
|
struct intel_engine_cs *engine =
|
||||||
|
container_of(pt, struct drm_i915_gem_request, priotree)->engine;
|
||||||
|
|
||||||
|
GEM_BUG_ON(!locked);
|
||||||
|
|
||||||
engine = container_of(pt,
|
|
||||||
struct drm_i915_gem_request,
|
|
||||||
priotree)->engine;
|
|
||||||
if (engine != locked) {
|
if (engine != locked) {
|
||||||
if (locked)
|
spin_unlock(&locked->timeline->lock);
|
||||||
spin_unlock_irq(&locked->timeline->lock);
|
spin_lock(&engine->timeline->lock);
|
||||||
spin_lock_irq(&engine->timeline->lock);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return engine;
|
return engine;
|
||||||
|
@ -686,7 +685,7 @@ pt_lock_engine(struct i915_priotree *pt, struct intel_engine_cs *locked)
|
||||||
|
|
||||||
static void execlists_schedule(struct drm_i915_gem_request *request, int prio)
|
static void execlists_schedule(struct drm_i915_gem_request *request, int prio)
|
||||||
{
|
{
|
||||||
struct intel_engine_cs *engine = NULL;
|
struct intel_engine_cs *engine;
|
||||||
struct i915_dependency *dep, *p;
|
struct i915_dependency *dep, *p;
|
||||||
struct i915_dependency stack;
|
struct i915_dependency stack;
|
||||||
LIST_HEAD(dfs);
|
LIST_HEAD(dfs);
|
||||||
|
@ -720,26 +719,23 @@ static void execlists_schedule(struct drm_i915_gem_request *request, int prio)
|
||||||
list_for_each_entry_safe(dep, p, &dfs, dfs_link) {
|
list_for_each_entry_safe(dep, p, &dfs, dfs_link) {
|
||||||
struct i915_priotree *pt = dep->signaler;
|
struct i915_priotree *pt = dep->signaler;
|
||||||
|
|
||||||
list_for_each_entry(p, &pt->signalers_list, signal_link)
|
/* Within an engine, there can be no cycle, but we may
|
||||||
|
* refer to the same dependency chain multiple times
|
||||||
|
* (redundant dependencies are not eliminated) and across
|
||||||
|
* engines.
|
||||||
|
*/
|
||||||
|
list_for_each_entry(p, &pt->signalers_list, signal_link) {
|
||||||
|
GEM_BUG_ON(p->signaler->priority < pt->priority);
|
||||||
if (prio > READ_ONCE(p->signaler->priority))
|
if (prio > READ_ONCE(p->signaler->priority))
|
||||||
list_move_tail(&p->dfs_link, &dfs);
|
list_move_tail(&p->dfs_link, &dfs);
|
||||||
|
}
|
||||||
|
|
||||||
list_safe_reset_next(dep, p, dfs_link);
|
list_safe_reset_next(dep, p, dfs_link);
|
||||||
if (!RB_EMPTY_NODE(&pt->node))
|
|
||||||
continue;
|
|
||||||
|
|
||||||
engine = pt_lock_engine(pt, engine);
|
|
||||||
|
|
||||||
/* If it is not already in the rbtree, we can update the
|
|
||||||
* priority inplace and skip over it (and its dependencies)
|
|
||||||
* if it is referenced *again* as we descend the dfs.
|
|
||||||
*/
|
|
||||||
if (prio > pt->priority && RB_EMPTY_NODE(&pt->node)) {
|
|
||||||
pt->priority = prio;
|
|
||||||
list_del_init(&dep->dfs_link);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
engine = request->engine;
|
||||||
|
spin_lock_irq(&engine->timeline->lock);
|
||||||
|
|
||||||
/* Fifo and depth-first replacement ensure our deps execute before us */
|
/* Fifo and depth-first replacement ensure our deps execute before us */
|
||||||
list_for_each_entry_safe_reverse(dep, p, &dfs, dfs_link) {
|
list_for_each_entry_safe_reverse(dep, p, &dfs, dfs_link) {
|
||||||
struct i915_priotree *pt = dep->signaler;
|
struct i915_priotree *pt = dep->signaler;
|
||||||
|
@ -751,16 +747,15 @@ static void execlists_schedule(struct drm_i915_gem_request *request, int prio)
|
||||||
if (prio <= pt->priority)
|
if (prio <= pt->priority)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
GEM_BUG_ON(RB_EMPTY_NODE(&pt->node));
|
|
||||||
|
|
||||||
pt->priority = prio;
|
pt->priority = prio;
|
||||||
rb_erase(&pt->node, &engine->execlist_queue);
|
if (!RB_EMPTY_NODE(&pt->node)) {
|
||||||
if (insert_request(pt, &engine->execlist_queue))
|
rb_erase(&pt->node, &engine->execlist_queue);
|
||||||
engine->execlist_first = &pt->node;
|
if (insert_request(pt, &engine->execlist_queue))
|
||||||
|
engine->execlist_first = &pt->node;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (engine)
|
spin_unlock_irq(&engine->timeline->lock);
|
||||||
spin_unlock_irq(&engine->timeline->lock);
|
|
||||||
|
|
||||||
/* XXX Do we need to preempt to make room for us and our deps? */
|
/* XXX Do we need to preempt to make room for us and our deps? */
|
||||||
}
|
}
|
||||||
|
@ -1440,7 +1435,9 @@ static void reset_common_ring(struct intel_engine_cs *engine,
|
||||||
GEM_BUG_ON(request->ctx != port[0].request->ctx);
|
GEM_BUG_ON(request->ctx != port[0].request->ctx);
|
||||||
|
|
||||||
/* Reset WaIdleLiteRestore:bdw,skl as well */
|
/* Reset WaIdleLiteRestore:bdw,skl as well */
|
||||||
request->tail = request->wa_tail - WA_TAIL_DWORDS * sizeof(u32);
|
request->tail =
|
||||||
|
intel_ring_wrap(request->ring,
|
||||||
|
request->wa_tail - WA_TAIL_DWORDS*sizeof(u32));
|
||||||
}
|
}
|
||||||
|
|
||||||
static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
|
static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
|
||||||
|
|
|
@ -521,11 +521,17 @@ static inline void intel_ring_advance(struct intel_ring *ring)
|
||||||
*/
|
*/
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline u32
|
||||||
|
intel_ring_wrap(const struct intel_ring *ring, u32 pos)
|
||||||
|
{
|
||||||
|
return pos & (ring->size - 1);
|
||||||
|
}
|
||||||
|
|
||||||
static inline u32 intel_ring_offset(struct intel_ring *ring, void *addr)
|
static inline u32 intel_ring_offset(struct intel_ring *ring, void *addr)
|
||||||
{
|
{
|
||||||
/* Don't write ring->size (equivalent to 0) as that hangs some GPUs. */
|
/* Don't write ring->size (equivalent to 0) as that hangs some GPUs. */
|
||||||
u32 offset = addr - ring->vaddr;
|
u32 offset = addr - ring->vaddr;
|
||||||
return offset & (ring->size - 1);
|
return intel_ring_wrap(ring, offset);
|
||||||
}
|
}
|
||||||
|
|
||||||
int __intel_ring_space(int head, int tail, int size);
|
int __intel_ring_space(int head, int tail, int size);
|
||||||
|
|
|
@ -995,7 +995,6 @@ nv50_wndw_atomic_destroy_state(struct drm_plane *plane,
|
||||||
{
|
{
|
||||||
struct nv50_wndw_atom *asyw = nv50_wndw_atom(state);
|
struct nv50_wndw_atom *asyw = nv50_wndw_atom(state);
|
||||||
__drm_atomic_helper_plane_destroy_state(&asyw->state);
|
__drm_atomic_helper_plane_destroy_state(&asyw->state);
|
||||||
dma_fence_put(asyw->state.fence);
|
|
||||||
kfree(asyw);
|
kfree(asyw);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1007,7 +1006,6 @@ nv50_wndw_atomic_duplicate_state(struct drm_plane *plane)
|
||||||
if (!(asyw = kmalloc(sizeof(*asyw), GFP_KERNEL)))
|
if (!(asyw = kmalloc(sizeof(*asyw), GFP_KERNEL)))
|
||||||
return NULL;
|
return NULL;
|
||||||
__drm_atomic_helper_plane_duplicate_state(plane, &asyw->state);
|
__drm_atomic_helper_plane_duplicate_state(plane, &asyw->state);
|
||||||
asyw->state.fence = NULL;
|
|
||||||
asyw->interval = 1;
|
asyw->interval = 1;
|
||||||
asyw->sema = armw->sema;
|
asyw->sema = armw->sema;
|
||||||
asyw->ntfy = armw->ntfy;
|
asyw->ntfy = armw->ntfy;
|
||||||
|
@ -2036,6 +2034,7 @@ nv50_head_atomic_check_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
|
||||||
u32 vbackp = (mode->vtotal - mode->vsync_end) * vscan / ilace;
|
u32 vbackp = (mode->vtotal - mode->vsync_end) * vscan / ilace;
|
||||||
u32 hfrontp = mode->hsync_start - mode->hdisplay;
|
u32 hfrontp = mode->hsync_start - mode->hdisplay;
|
||||||
u32 vfrontp = (mode->vsync_start - mode->vdisplay) * vscan / ilace;
|
u32 vfrontp = (mode->vsync_start - mode->vdisplay) * vscan / ilace;
|
||||||
|
u32 blankus;
|
||||||
struct nv50_head_mode *m = &asyh->mode;
|
struct nv50_head_mode *m = &asyh->mode;
|
||||||
|
|
||||||
m->h.active = mode->htotal;
|
m->h.active = mode->htotal;
|
||||||
|
@ -2049,9 +2048,10 @@ nv50_head_atomic_check_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
|
||||||
m->v.blanks = m->v.active - vfrontp - 1;
|
m->v.blanks = m->v.active - vfrontp - 1;
|
||||||
|
|
||||||
/*XXX: Safe underestimate, even "0" works */
|
/*XXX: Safe underestimate, even "0" works */
|
||||||
m->v.blankus = (m->v.active - mode->vdisplay - 2) * m->h.active;
|
blankus = (m->v.active - mode->vdisplay - 2) * m->h.active;
|
||||||
m->v.blankus *= 1000;
|
blankus *= 1000;
|
||||||
m->v.blankus /= mode->clock;
|
blankus /= mode->clock;
|
||||||
|
m->v.blankus = blankus;
|
||||||
|
|
||||||
if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
|
if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
|
||||||
m->v.blank2e = m->v.active + m->v.synce + vbackp;
|
m->v.blank2e = m->v.active + m->v.synce + vbackp;
|
||||||
|
|
|
@ -714,7 +714,7 @@ nv4a_chipset = {
|
||||||
.i2c = nv04_i2c_new,
|
.i2c = nv04_i2c_new,
|
||||||
.imem = nv40_instmem_new,
|
.imem = nv40_instmem_new,
|
||||||
.mc = nv44_mc_new,
|
.mc = nv44_mc_new,
|
||||||
.mmu = nv44_mmu_new,
|
.mmu = nv04_mmu_new,
|
||||||
.pci = nv40_pci_new,
|
.pci = nv40_pci_new,
|
||||||
.therm = nv40_therm_new,
|
.therm = nv40_therm_new,
|
||||||
.timer = nv41_timer_new,
|
.timer = nv41_timer_new,
|
||||||
|
@ -2271,6 +2271,35 @@ nv136_chipset = {
|
||||||
.fifo = gp100_fifo_new,
|
.fifo = gp100_fifo_new,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static const struct nvkm_device_chip
|
||||||
|
nv137_chipset = {
|
||||||
|
.name = "GP107",
|
||||||
|
.bar = gf100_bar_new,
|
||||||
|
.bios = nvkm_bios_new,
|
||||||
|
.bus = gf100_bus_new,
|
||||||
|
.devinit = gm200_devinit_new,
|
||||||
|
.fb = gp102_fb_new,
|
||||||
|
.fuse = gm107_fuse_new,
|
||||||
|
.gpio = gk104_gpio_new,
|
||||||
|
.i2c = gm200_i2c_new,
|
||||||
|
.ibus = gm200_ibus_new,
|
||||||
|
.imem = nv50_instmem_new,
|
||||||
|
.ltc = gp100_ltc_new,
|
||||||
|
.mc = gp100_mc_new,
|
||||||
|
.mmu = gf100_mmu_new,
|
||||||
|
.pci = gp100_pci_new,
|
||||||
|
.pmu = gp102_pmu_new,
|
||||||
|
.timer = gk20a_timer_new,
|
||||||
|
.top = gk104_top_new,
|
||||||
|
.ce[0] = gp102_ce_new,
|
||||||
|
.ce[1] = gp102_ce_new,
|
||||||
|
.ce[2] = gp102_ce_new,
|
||||||
|
.ce[3] = gp102_ce_new,
|
||||||
|
.disp = gp102_disp_new,
|
||||||
|
.dma = gf119_dma_new,
|
||||||
|
.fifo = gp100_fifo_new,
|
||||||
|
};
|
||||||
|
|
||||||
static int
|
static int
|
||||||
nvkm_device_event_ctor(struct nvkm_object *object, void *data, u32 size,
|
nvkm_device_event_ctor(struct nvkm_object *object, void *data, u32 size,
|
||||||
struct nvkm_notify *notify)
|
struct nvkm_notify *notify)
|
||||||
|
@ -2708,6 +2737,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
|
||||||
case 0x132: device->chip = &nv132_chipset; break;
|
case 0x132: device->chip = &nv132_chipset; break;
|
||||||
case 0x134: device->chip = &nv134_chipset; break;
|
case 0x134: device->chip = &nv134_chipset; break;
|
||||||
case 0x136: device->chip = &nv136_chipset; break;
|
case 0x136: device->chip = &nv136_chipset; break;
|
||||||
|
case 0x137: device->chip = &nv137_chipset; break;
|
||||||
default:
|
default:
|
||||||
nvdev_error(device, "unknown chipset (%08x)\n", boot0);
|
nvdev_error(device, "unknown chipset (%08x)\n", boot0);
|
||||||
goto done;
|
goto done;
|
||||||
|
|
|
@ -198,7 +198,7 @@ nv31_mpeg_intr(struct nvkm_engine *engine)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (type == 0x00000010) {
|
if (type == 0x00000010) {
|
||||||
if (!nv31_mpeg_mthd(mpeg, mthd, data))
|
if (nv31_mpeg_mthd(mpeg, mthd, data))
|
||||||
show &= ~0x01000000;
|
show &= ~0x01000000;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -172,7 +172,7 @@ nv44_mpeg_intr(struct nvkm_engine *engine)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (type == 0x00000010) {
|
if (type == 0x00000010) {
|
||||||
if (!nv44_mpeg_mthd(subdev->device, mthd, data))
|
if (nv44_mpeg_mthd(subdev->device, mthd, data))
|
||||||
show &= ~0x01000000;
|
show &= ~0x01000000;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -14,6 +14,7 @@
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
#include <linux/fb.h>
|
#include <linux/fb.h>
|
||||||
#include <linux/prefetch.h>
|
#include <linux/prefetch.h>
|
||||||
|
#include <asm/unaligned.h>
|
||||||
|
|
||||||
#include <drm/drmP.h>
|
#include <drm/drmP.h>
|
||||||
#include "udl_drv.h"
|
#include "udl_drv.h"
|
||||||
|
@ -163,7 +164,7 @@ static void udl_compress_hline16(
|
||||||
const u8 *const start = pixel;
|
const u8 *const start = pixel;
|
||||||
const uint16_t repeating_pixel_val16 = pixel_val16;
|
const uint16_t repeating_pixel_val16 = pixel_val16;
|
||||||
|
|
||||||
*(uint16_t *)cmd = cpu_to_be16(pixel_val16);
|
put_unaligned_be16(pixel_val16, cmd);
|
||||||
|
|
||||||
cmd += 2;
|
cmd += 2;
|
||||||
pixel += bpp;
|
pixel += bpp;
|
||||||
|
|
Loading…
Reference in New Issue