nouveau features, i915 + amdgpu fixes
-----BEGIN PGP SIGNATURE----- iQIcBAABAgAGBQJae8OFAAoJEAx081l5xIa+mIUP/0leefSxgD4GTAAO5nQDIwTX TLnFP52i0/wrQ1T1CKkBCTnc8yRo4OSH3KMqnwHppBRGinYVRlz404pEckw3yUYq kTFS6ZKlfjZRgo7UIia49UlbDWse6aK6VUFwtyyb9et62rlTE0nmLXLHdKHuTnzi DxxMvmdDKWn9q/he5nHKg3d9H3ICc/EWINEqlxKIrX4Zgk/ymq/95rZNY0tOvuFa 1WSFAl0IuCR330trgpN4kOLuCno/W0MuQFVJ4ymgeMW8ZhjM4UTjOANAm/8wZfmo Dau16psa18iE/kdz+iobdC1nzAS1VdMYXLv7HepLouYXByd6o2Xc6TMvBO0d9NxV JiLpntzdnmGHE0y/5GgMPJ5+8CCNzaI0ASqPbNvKVSB08cZB0hvYiVQdLSGAMLoY DiNwsgT+Pk+OXddvR+i8WdAUfU9aOKhl01bFlPWheXyZdAkGwvbBb4xQ6A11U5C2 HUW1ZKPE0M4yGblnQpAulw7wcYEGHs0xMIfG8RwLGR0FazSsW2Rk8GKbMapEvhUx Ge3pvB51u70L/q1X1POy/q9+ITs82KXr5T+cjpdo+yOxq1JbfgQWdSlCIXH4Ptlf h53HWbJOu5JUWjI2FiePHwmjhxwxT01ManUThrlYJ4OR+5LyWbA1y0m5c1FV2zFd p82ux/7cSmaE6hN8LsdF =857C -----END PGP SIGNATURE----- Merge tag 'drm-for-v4.16-part2-fixes' of git://people.freedesktop.org/~airlied/linux Pull more drm updates from Dave Airlie: "Ben missed sending his nouveau tree, but he really didn't have much stuff in it: - GP108 acceleration support is enabled by "secure boot" support - some clockgating work on Kepler, and bunch of fixes - the bulk of the diff is regenerated firmware files, the change to them really isn't that large. Otherwise this contains regular Intel and AMDGPU fixes" * tag 'drm-for-v4.16-part2-fixes' of git://people.freedesktop.org/~airlied/linux: (59 commits) drm/i915/bios: add DP max link rate to VBT child device struct drm/i915/cnp: Properly handle VBT ddc pin out of bounds. drm/i915/cnp: Ignore VBT request for know invalid DDC pin. drm/i915/cmdparser: Do not check past the cmd length. drm/i915/cmdparser: Check reg_table_count before derefencing. drm/i915/bxt, glk: Increase PCODE timeouts during CDCLK freq changing drm/i915/gvt: Use KVM r/w to access guest opregion drm/i915/gvt: Fix aperture read/write emulation when enable x-no-mmap=on drm/i915/gvt: only reset execlist state of one engine during VM engine reset drm/i915/gvt: refine intel_vgpu_submission_ops as per engine ops drm/amdgpu: re-enable CGCG on CZ and disable on ST drm/nouveau/clk: fix gcc-7 -Wint-in-bool-context warning drm/nouveau/mmu: Fix trailing semicolon drm/nouveau: Introduce NvPmEnableGating option drm/nouveau: Add support for SLCG for Kepler2 drm/nouveau: Add support for BLCG on Kepler2 drm/nouveau: Add support for BLCG on Kepler1 drm/nouveau: Add support for basic clockgating on Kepler1 drm/nouveau/kms/nv50: fix handling of gamma since atomic conversion drm/nouveau/kms/nv50: use INTERPOLATE_257_UNITY_RANGE LUT on newer chipsets ...
This commit is contained in:
commit
fe26adf431
|
@ -179,8 +179,12 @@ static int amdgpu_gfx_kiq_acquire(struct amdgpu_device *adev,
|
|||
|
||||
amdgpu_gfx_bit_to_queue(adev, queue_bit, &mec, &pipe, &queue);
|
||||
|
||||
/* Using pipes 2/3 from MEC 2 seems cause problems */
|
||||
if (mec == 1 && pipe > 1)
|
||||
/*
|
||||
* 1. Using pipes 2/3 from MEC 2 seems cause problems.
|
||||
* 2. It must use queue id 0, because CGPG_IDLE/SAVE/LOAD/RUN
|
||||
* only can be issued on queue 0.
|
||||
*/
|
||||
if ((mec == 1 && pipe > 1) || queue != 0)
|
||||
continue;
|
||||
|
||||
ring->me = mec + 1;
|
||||
|
|
|
@ -2262,12 +2262,12 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
|||
{
|
||||
const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE,
|
||||
AMDGPU_VM_PTE_COUNT(adev) * 8);
|
||||
uint64_t init_pde_value = 0, flags;
|
||||
unsigned ring_instance;
|
||||
struct amdgpu_ring *ring;
|
||||
struct drm_sched_rq *rq;
|
||||
unsigned long size;
|
||||
int r, i;
|
||||
u64 flags;
|
||||
uint64_t init_pde_value = 0;
|
||||
|
||||
vm->va = RB_ROOT_CACHED;
|
||||
for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
|
||||
|
@ -2318,29 +2318,21 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
|||
flags |= (AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
|
||||
AMDGPU_GEM_CREATE_SHADOW);
|
||||
|
||||
r = amdgpu_bo_create(adev,
|
||||
amdgpu_vm_bo_size(adev, adev->vm_manager.root_level),
|
||||
align, true,
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
flags,
|
||||
NULL, NULL, init_pde_value, &vm->root.base.bo);
|
||||
size = amdgpu_vm_bo_size(adev, adev->vm_manager.root_level);
|
||||
r = amdgpu_bo_create(adev, size, align, true, AMDGPU_GEM_DOMAIN_VRAM,
|
||||
flags, NULL, NULL, init_pde_value,
|
||||
&vm->root.base.bo);
|
||||
if (r)
|
||||
goto error_free_sched_entity;
|
||||
|
||||
r = amdgpu_bo_reserve(vm->root.base.bo, true);
|
||||
if (r)
|
||||
goto error_free_root;
|
||||
|
||||
vm->root.base.vm = vm;
|
||||
list_add_tail(&vm->root.base.bo_list, &vm->root.base.bo->va);
|
||||
INIT_LIST_HEAD(&vm->root.base.vm_status);
|
||||
|
||||
if (vm->use_cpu_for_update) {
|
||||
r = amdgpu_bo_reserve(vm->root.base.bo, false);
|
||||
if (r)
|
||||
goto error_free_root;
|
||||
|
||||
r = amdgpu_bo_kmap(vm->root.base.bo, NULL);
|
||||
amdgpu_bo_unreserve(vm->root.base.bo);
|
||||
if (r)
|
||||
goto error_free_root;
|
||||
}
|
||||
list_add_tail(&vm->root.base.vm_status, &vm->evicted);
|
||||
amdgpu_bo_unreserve(vm->root.base.bo);
|
||||
|
||||
if (pasid) {
|
||||
unsigned long flags;
|
||||
|
|
|
@ -278,9 +278,9 @@ static bool vega10_ih_prescreen_iv(struct amdgpu_device *adev)
|
|||
/* Track retry faults in per-VM fault FIFO. */
|
||||
spin_lock(&adev->vm_manager.pasid_lock);
|
||||
vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
|
||||
spin_unlock(&adev->vm_manager.pasid_lock);
|
||||
if (WARN_ON_ONCE(!vm)) {
|
||||
if (!vm) {
|
||||
/* VM not found, process it normally */
|
||||
spin_unlock(&adev->vm_manager.pasid_lock);
|
||||
amdgpu_ih_clear_fault(adev, key);
|
||||
return true;
|
||||
}
|
||||
|
@ -288,9 +288,11 @@ static bool vega10_ih_prescreen_iv(struct amdgpu_device *adev)
|
|||
r = kfifo_put(&vm->faults, key);
|
||||
if (!r) {
|
||||
/* FIFO is full. Ignore it until there is space */
|
||||
spin_unlock(&adev->vm_manager.pasid_lock);
|
||||
amdgpu_ih_clear_fault(adev, key);
|
||||
goto ignore_iv;
|
||||
}
|
||||
spin_unlock(&adev->vm_manager.pasid_lock);
|
||||
|
||||
/* It's the first fault for this address, process it normally */
|
||||
return true;
|
||||
|
|
|
@ -1049,7 +1049,6 @@ static int vi_common_early_init(void *handle)
|
|||
AMD_CG_SUPPORT_GFX_CP_LS |
|
||||
AMD_CG_SUPPORT_GFX_CGTS |
|
||||
AMD_CG_SUPPORT_GFX_CGTS_LS |
|
||||
AMD_CG_SUPPORT_GFX_CGCG |
|
||||
AMD_CG_SUPPORT_GFX_CGLS |
|
||||
AMD_CG_SUPPORT_BIF_LS |
|
||||
AMD_CG_SUPPORT_HDP_MGCG |
|
||||
|
|
|
@ -119,16 +119,6 @@ static int map_aperture(struct intel_vgpu *vgpu, bool map)
|
|||
if (map == vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].tracked)
|
||||
return 0;
|
||||
|
||||
if (map) {
|
||||
vgpu->gm.aperture_va = memremap(aperture_pa, aperture_sz,
|
||||
MEMREMAP_WC);
|
||||
if (!vgpu->gm.aperture_va)
|
||||
return -ENOMEM;
|
||||
} else {
|
||||
memunmap(vgpu->gm.aperture_va);
|
||||
vgpu->gm.aperture_va = NULL;
|
||||
}
|
||||
|
||||
val = vgpu_cfg_space(vgpu)[PCI_BASE_ADDRESS_2];
|
||||
if (val & PCI_BASE_ADDRESS_MEM_TYPE_64)
|
||||
val = *(u64 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_2);
|
||||
|
@ -141,11 +131,8 @@ static int map_aperture(struct intel_vgpu *vgpu, bool map)
|
|||
aperture_pa >> PAGE_SHIFT,
|
||||
aperture_sz >> PAGE_SHIFT,
|
||||
map);
|
||||
if (ret) {
|
||||
memunmap(vgpu->gm.aperture_va);
|
||||
vgpu->gm.aperture_va = NULL;
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].tracked = map;
|
||||
return 0;
|
||||
|
|
|
@ -472,7 +472,6 @@ int intel_vgpu_get_dmabuf(struct intel_vgpu *vgpu, unsigned int dmabuf_id)
|
|||
ret = PTR_ERR(dmabuf);
|
||||
goto out_free_gem;
|
||||
}
|
||||
obj->base.dma_buf = dmabuf;
|
||||
|
||||
i915_gem_object_put(obj);
|
||||
|
||||
|
|
|
@ -521,24 +521,23 @@ static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id)
|
|||
|
||||
ctx_status_ptr_reg = execlist_ring_mmio(vgpu->gvt, ring_id,
|
||||
_EL_OFFSET_STATUS_PTR);
|
||||
|
||||
ctx_status_ptr.dw = vgpu_vreg(vgpu, ctx_status_ptr_reg);
|
||||
ctx_status_ptr.read_ptr = 0;
|
||||
ctx_status_ptr.write_ptr = 0x7;
|
||||
vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw;
|
||||
}
|
||||
|
||||
static void clean_execlist(struct intel_vgpu *vgpu)
|
||||
static void clean_execlist(struct intel_vgpu *vgpu, unsigned long engine_mask)
|
||||
{
|
||||
enum intel_engine_id i;
|
||||
unsigned int tmp;
|
||||
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
|
||||
struct intel_engine_cs *engine;
|
||||
struct intel_vgpu_submission *s = &vgpu->submission;
|
||||
|
||||
for_each_engine(engine, vgpu->gvt->dev_priv, i) {
|
||||
struct intel_vgpu_submission *s = &vgpu->submission;
|
||||
|
||||
kfree(s->ring_scan_buffer[i]);
|
||||
s->ring_scan_buffer[i] = NULL;
|
||||
s->ring_scan_buffer_size[i] = 0;
|
||||
for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
|
||||
kfree(s->ring_scan_buffer[engine->id]);
|
||||
s->ring_scan_buffer[engine->id] = NULL;
|
||||
s->ring_scan_buffer_size[engine->id] = 0;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -553,9 +552,10 @@ static void reset_execlist(struct intel_vgpu *vgpu,
|
|||
init_vgpu_execlist(vgpu, engine->id);
|
||||
}
|
||||
|
||||
static int init_execlist(struct intel_vgpu *vgpu)
|
||||
static int init_execlist(struct intel_vgpu *vgpu,
|
||||
unsigned long engine_mask)
|
||||
{
|
||||
reset_execlist(vgpu, ALL_ENGINES);
|
||||
reset_execlist(vgpu, engine_mask);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -997,9 +997,11 @@ static inline void ppgtt_generate_shadow_entry(struct intel_gvt_gtt_entry *se,
|
|||
static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
|
||||
{
|
||||
struct intel_vgpu *vgpu = spt->vgpu;
|
||||
struct intel_gvt *gvt = vgpu->gvt;
|
||||
struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
|
||||
struct intel_vgpu_ppgtt_spt *s;
|
||||
struct intel_gvt_gtt_entry se, ge;
|
||||
unsigned long i;
|
||||
unsigned long gfn, i;
|
||||
int ret;
|
||||
|
||||
trace_spt_change(spt->vgpu->id, "born", spt,
|
||||
|
@ -1007,9 +1009,10 @@ static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
|
|||
|
||||
if (gtt_type_is_pte_pt(spt->shadow_page.type)) {
|
||||
for_each_present_guest_entry(spt, &ge, i) {
|
||||
ret = gtt_entry_p2m(vgpu, &ge, &se);
|
||||
if (ret)
|
||||
goto fail;
|
||||
gfn = ops->get_pfn(&ge);
|
||||
if (!intel_gvt_hypervisor_is_valid_gfn(vgpu, gfn) ||
|
||||
gtt_entry_p2m(vgpu, &ge, &se))
|
||||
ops->set_pfn(&se, gvt->gtt.scratch_mfn);
|
||||
ppgtt_set_shadow_entry(spt, &se, i);
|
||||
}
|
||||
return 0;
|
||||
|
@ -1906,7 +1909,7 @@ static int emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
|
|||
struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm;
|
||||
struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
|
||||
unsigned long g_gtt_index = off >> info->gtt_entry_size_shift;
|
||||
unsigned long gma;
|
||||
unsigned long gma, gfn;
|
||||
struct intel_gvt_gtt_entry e, m;
|
||||
int ret;
|
||||
|
||||
|
@ -1925,6 +1928,16 @@ static int emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
|
|||
bytes);
|
||||
|
||||
if (ops->test_present(&e)) {
|
||||
gfn = ops->get_pfn(&e);
|
||||
|
||||
/* one PTE update may be issued in multiple writes and the
|
||||
* first write may not construct a valid gfn
|
||||
*/
|
||||
if (!intel_gvt_hypervisor_is_valid_gfn(vgpu, gfn)) {
|
||||
ops->set_pfn(&m, gvt->gtt.scratch_mfn);
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = gtt_entry_p2m(vgpu, &e, &m);
|
||||
if (ret) {
|
||||
gvt_vgpu_err("fail to translate guest gtt entry\n");
|
||||
|
@ -1939,6 +1952,7 @@ static int emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
|
|||
ops->set_pfn(&m, gvt->gtt.scratch_mfn);
|
||||
}
|
||||
|
||||
out:
|
||||
ggtt_set_shadow_entry(ggtt_mm, &m, g_gtt_index);
|
||||
gtt_invalidate(gvt->dev_priv);
|
||||
ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index);
|
||||
|
|
|
@ -82,7 +82,6 @@ struct intel_gvt_device_info {
|
|||
struct intel_vgpu_gm {
|
||||
u64 aperture_sz;
|
||||
u64 hidden_sz;
|
||||
void *aperture_va;
|
||||
struct drm_mm_node low_gm_node;
|
||||
struct drm_mm_node high_gm_node;
|
||||
};
|
||||
|
@ -127,7 +126,6 @@ struct intel_vgpu_irq {
|
|||
struct intel_vgpu_opregion {
|
||||
bool mapped;
|
||||
void *va;
|
||||
void *va_gopregion;
|
||||
u32 gfn[INTEL_GVT_OPREGION_PAGES];
|
||||
};
|
||||
|
||||
|
@ -152,8 +150,8 @@ enum {
|
|||
|
||||
struct intel_vgpu_submission_ops {
|
||||
const char *name;
|
||||
int (*init)(struct intel_vgpu *vgpu);
|
||||
void (*clean)(struct intel_vgpu *vgpu);
|
||||
int (*init)(struct intel_vgpu *vgpu, unsigned long engine_mask);
|
||||
void (*clean)(struct intel_vgpu *vgpu, unsigned long engine_mask);
|
||||
void (*reset)(struct intel_vgpu *vgpu, unsigned long engine_mask);
|
||||
};
|
||||
|
||||
|
|
|
@ -1494,7 +1494,6 @@ static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
|
|||
static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
|
||||
void *p_data, unsigned int bytes)
|
||||
{
|
||||
struct intel_vgpu_submission *s = &vgpu->submission;
|
||||
u32 data = *(u32 *)p_data;
|
||||
int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset);
|
||||
bool enable_execlist;
|
||||
|
@ -1523,11 +1522,9 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
|
|||
if (!enable_execlist)
|
||||
return 0;
|
||||
|
||||
if (s->active)
|
||||
return 0;
|
||||
|
||||
ret = intel_vgpu_select_submission_ops(vgpu,
|
||||
INTEL_VGPU_EXECLIST_SUBMISSION);
|
||||
ENGINE_MASK(ring_id),
|
||||
INTEL_VGPU_EXECLIST_SUBMISSION);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -2843,6 +2840,9 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
|
|||
MMIO_D(_MMIO(_PLANE_KEYVAL_1(PIPE_A)), D_SKL_PLUS);
|
||||
MMIO_D(_MMIO(_PLANE_KEYVAL_1(PIPE_B)), D_SKL_PLUS);
|
||||
MMIO_D(_MMIO(_PLANE_KEYVAL_1(PIPE_C)), D_SKL_PLUS);
|
||||
MMIO_D(_MMIO(_PLANE_KEYMAX_1(PIPE_A)), D_SKL_PLUS);
|
||||
MMIO_D(_MMIO(_PLANE_KEYMAX_1(PIPE_B)), D_SKL_PLUS);
|
||||
MMIO_D(_MMIO(_PLANE_KEYMAX_1(PIPE_C)), D_SKL_PLUS);
|
||||
MMIO_D(_MMIO(_PLANE_KEYMSK_1(PIPE_A)), D_SKL_PLUS);
|
||||
MMIO_D(_MMIO(_PLANE_KEYMSK_1(PIPE_B)), D_SKL_PLUS);
|
||||
MMIO_D(_MMIO(_PLANE_KEYMSK_1(PIPE_C)), D_SKL_PLUS);
|
||||
|
|
|
@ -58,6 +58,7 @@ struct intel_gvt_mpt {
|
|||
int (*set_opregion)(void *vgpu);
|
||||
int (*get_vfio_device)(void *vgpu);
|
||||
void (*put_vfio_device)(void *vgpu);
|
||||
bool (*is_valid_gfn)(unsigned long handle, unsigned long gfn);
|
||||
};
|
||||
|
||||
extern struct intel_gvt_mpt xengt_mpt;
|
||||
|
|
|
@ -651,6 +651,39 @@ static int intel_vgpu_bar_rw(struct intel_vgpu *vgpu, int bar, uint64_t off,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static inline bool intel_vgpu_in_aperture(struct intel_vgpu *vgpu, uint64_t off)
|
||||
{
|
||||
return off >= vgpu_aperture_offset(vgpu) &&
|
||||
off < vgpu_aperture_offset(vgpu) + vgpu_aperture_sz(vgpu);
|
||||
}
|
||||
|
||||
static int intel_vgpu_aperture_rw(struct intel_vgpu *vgpu, uint64_t off,
|
||||
void *buf, unsigned long count, bool is_write)
|
||||
{
|
||||
void *aperture_va;
|
||||
|
||||
if (!intel_vgpu_in_aperture(vgpu, off) ||
|
||||
!intel_vgpu_in_aperture(vgpu, off + count)) {
|
||||
gvt_vgpu_err("Invalid aperture offset %llu\n", off);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
aperture_va = io_mapping_map_wc(&vgpu->gvt->dev_priv->ggtt.iomap,
|
||||
ALIGN_DOWN(off, PAGE_SIZE),
|
||||
count + offset_in_page(off));
|
||||
if (!aperture_va)
|
||||
return -EIO;
|
||||
|
||||
if (is_write)
|
||||
memcpy(aperture_va + offset_in_page(off), buf, count);
|
||||
else
|
||||
memcpy(buf, aperture_va + offset_in_page(off), count);
|
||||
|
||||
io_mapping_unmap(aperture_va);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf,
|
||||
size_t count, loff_t *ppos, bool is_write)
|
||||
{
|
||||
|
@ -679,8 +712,7 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf,
|
|||
buf, count, is_write);
|
||||
break;
|
||||
case VFIO_PCI_BAR2_REGION_INDEX:
|
||||
ret = intel_vgpu_bar_rw(vgpu, PCI_BASE_ADDRESS_2, pos,
|
||||
buf, count, is_write);
|
||||
ret = intel_vgpu_aperture_rw(vgpu, pos, buf, count, is_write);
|
||||
break;
|
||||
case VFIO_PCI_BAR1_REGION_INDEX:
|
||||
case VFIO_PCI_BAR3_REGION_INDEX:
|
||||
|
@ -1575,6 +1607,21 @@ static unsigned long kvmgt_virt_to_pfn(void *addr)
|
|||
return PFN_DOWN(__pa(addr));
|
||||
}
|
||||
|
||||
static bool kvmgt_is_valid_gfn(unsigned long handle, unsigned long gfn)
|
||||
{
|
||||
struct kvmgt_guest_info *info;
|
||||
struct kvm *kvm;
|
||||
|
||||
if (!handle_valid(handle))
|
||||
return false;
|
||||
|
||||
info = (struct kvmgt_guest_info *)handle;
|
||||
kvm = info->kvm;
|
||||
|
||||
return kvm_is_visible_gfn(kvm, gfn);
|
||||
|
||||
}
|
||||
|
||||
struct intel_gvt_mpt kvmgt_mpt = {
|
||||
.host_init = kvmgt_host_init,
|
||||
.host_exit = kvmgt_host_exit,
|
||||
|
@ -1590,6 +1637,7 @@ struct intel_gvt_mpt kvmgt_mpt = {
|
|||
.set_opregion = kvmgt_set_opregion,
|
||||
.get_vfio_device = kvmgt_get_vfio_device,
|
||||
.put_vfio_device = kvmgt_put_vfio_device,
|
||||
.is_valid_gfn = kvmgt_is_valid_gfn,
|
||||
};
|
||||
EXPORT_SYMBOL_GPL(kvmgt_mpt);
|
||||
|
||||
|
|
|
@ -56,38 +56,6 @@ int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa)
|
|||
(reg >= gvt->device_info.gtt_start_offset \
|
||||
&& reg < gvt->device_info.gtt_start_offset + gvt_ggtt_sz(gvt))
|
||||
|
||||
static bool vgpu_gpa_is_aperture(struct intel_vgpu *vgpu, uint64_t gpa)
|
||||
{
|
||||
u64 aperture_gpa = intel_vgpu_get_bar_gpa(vgpu, PCI_BASE_ADDRESS_2);
|
||||
u64 aperture_sz = vgpu_aperture_sz(vgpu);
|
||||
|
||||
return gpa >= aperture_gpa && gpa < aperture_gpa + aperture_sz;
|
||||
}
|
||||
|
||||
static int vgpu_aperture_rw(struct intel_vgpu *vgpu, uint64_t gpa,
|
||||
void *pdata, unsigned int size, bool is_read)
|
||||
{
|
||||
u64 aperture_gpa = intel_vgpu_get_bar_gpa(vgpu, PCI_BASE_ADDRESS_2);
|
||||
u64 offset = gpa - aperture_gpa;
|
||||
|
||||
if (!vgpu_gpa_is_aperture(vgpu, gpa + size - 1)) {
|
||||
gvt_vgpu_err("Aperture rw out of range, offset %llx, size %d\n",
|
||||
offset, size);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!vgpu->gm.aperture_va) {
|
||||
gvt_vgpu_err("BAR is not enabled\n");
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
if (is_read)
|
||||
memcpy(pdata, vgpu->gm.aperture_va + offset, size);
|
||||
else
|
||||
memcpy(vgpu->gm.aperture_va + offset, pdata, size);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void failsafe_emulate_mmio_rw(struct intel_vgpu *vgpu, uint64_t pa,
|
||||
void *p_data, unsigned int bytes, bool read)
|
||||
{
|
||||
|
@ -144,11 +112,6 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
|
|||
}
|
||||
mutex_lock(&gvt->lock);
|
||||
|
||||
if (vgpu_gpa_is_aperture(vgpu, pa)) {
|
||||
ret = vgpu_aperture_rw(vgpu, pa, p_data, bytes, true);
|
||||
goto out;
|
||||
}
|
||||
|
||||
offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa);
|
||||
|
||||
if (WARN_ON(bytes > 8))
|
||||
|
@ -222,11 +185,6 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
|
|||
|
||||
mutex_lock(&gvt->lock);
|
||||
|
||||
if (vgpu_gpa_is_aperture(vgpu, pa)) {
|
||||
ret = vgpu_aperture_rw(vgpu, pa, p_data, bytes, false);
|
||||
goto out;
|
||||
}
|
||||
|
||||
offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa);
|
||||
|
||||
if (WARN_ON(bytes > 8))
|
||||
|
|
|
@ -80,7 +80,7 @@ static struct engine_mmio gen8_engine_mmio_list[] __cacheline_aligned = {
|
|||
{BCS, RING_INSTPM(BLT_RING_BASE), 0xffff, false}, /* 0x220c0 */
|
||||
{BCS, RING_HWSTAM(BLT_RING_BASE), 0x0, false}, /* 0x22098 */
|
||||
{BCS, RING_EXCC(BLT_RING_BASE), 0x0, false}, /* 0x22028 */
|
||||
{ /* Terminated */ }
|
||||
{RCS, INVALID_MMIO_REG, 0, false } /* Terminated */
|
||||
};
|
||||
|
||||
static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = {
|
||||
|
@ -146,7 +146,7 @@ static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = {
|
|||
{RCS, GEN8_GARBCNTL, 0x0, false}, /* 0xb004 */
|
||||
{RCS, GEN7_FF_THREAD_MODE, 0x0, false}, /* 0x20a0 */
|
||||
{RCS, FF_SLICE_CS_CHICKEN2, 0xffff, false}, /* 0x20e4 */
|
||||
{ /* Terminated */ }
|
||||
{RCS, INVALID_MMIO_REG, 0, false } /* Terminated */
|
||||
};
|
||||
|
||||
static struct {
|
||||
|
@ -167,7 +167,7 @@ static void load_render_mocs(struct drm_i915_private *dev_priv)
|
|||
};
|
||||
int ring_id, i;
|
||||
|
||||
for (ring_id = 0; ring_id < I915_NUM_ENGINES; ring_id++) {
|
||||
for (ring_id = 0; ring_id < ARRAY_SIZE(regs); ring_id++) {
|
||||
offset.reg = regs[ring_id];
|
||||
for (i = 0; i < 64; i++) {
|
||||
gen9_render_mocs.control_table[ring_id][i] =
|
||||
|
@ -310,8 +310,8 @@ static void switch_mmio(struct intel_vgpu *pre,
|
|||
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
|
||||
switch_mocs(pre, next, ring_id);
|
||||
|
||||
mmio = dev_priv->gvt->engine_mmio_list;
|
||||
while (i915_mmio_reg_offset((mmio++)->reg)) {
|
||||
for (mmio = dev_priv->gvt->engine_mmio_list;
|
||||
i915_mmio_reg_valid(mmio->reg); mmio++) {
|
||||
if (mmio->ring_id != ring_id)
|
||||
continue;
|
||||
// save
|
||||
|
|
|
@ -339,4 +339,21 @@ static inline void intel_gvt_hypervisor_put_vfio_device(struct intel_vgpu *vgpu)
|
|||
intel_gvt_host.mpt->put_vfio_device(vgpu);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_gvt_hypervisor_is_valid_gfn - check if a visible gfn
|
||||
* @vgpu: a vGPU
|
||||
* @gfn: guest PFN
|
||||
*
|
||||
* Returns:
|
||||
* true on valid gfn, false on not.
|
||||
*/
|
||||
static inline bool intel_gvt_hypervisor_is_valid_gfn(
|
||||
struct intel_vgpu *vgpu, unsigned long gfn)
|
||||
{
|
||||
if (!intel_gvt_host.mpt->is_valid_gfn)
|
||||
return true;
|
||||
|
||||
return intel_gvt_host.mpt->is_valid_gfn(vgpu->handle, gfn);
|
||||
}
|
||||
|
||||
#endif /* _GVT_MPT_H_ */
|
||||
|
|
|
@ -299,21 +299,13 @@ int intel_vgpu_opregion_base_write_handler(struct intel_vgpu *vgpu, u32 gpa)
|
|||
{
|
||||
|
||||
int i, ret = 0;
|
||||
unsigned long pfn;
|
||||
|
||||
gvt_dbg_core("emulate opregion from kernel\n");
|
||||
|
||||
switch (intel_gvt_host.hypervisor_type) {
|
||||
case INTEL_GVT_HYPERVISOR_KVM:
|
||||
pfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, gpa >> PAGE_SHIFT);
|
||||
vgpu_opregion(vgpu)->va_gopregion = memremap(pfn << PAGE_SHIFT,
|
||||
INTEL_GVT_OPREGION_SIZE,
|
||||
MEMREMAP_WB);
|
||||
if (!vgpu_opregion(vgpu)->va_gopregion) {
|
||||
gvt_vgpu_err("failed to map guest opregion\n");
|
||||
ret = -EFAULT;
|
||||
}
|
||||
vgpu_opregion(vgpu)->mapped = true;
|
||||
for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++)
|
||||
vgpu_opregion(vgpu)->gfn[i] = (gpa >> PAGE_SHIFT) + i;
|
||||
break;
|
||||
case INTEL_GVT_HYPERVISOR_XEN:
|
||||
/**
|
||||
|
@ -352,10 +344,7 @@ void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu)
|
|||
if (vgpu_opregion(vgpu)->mapped)
|
||||
map_vgpu_opregion(vgpu, false);
|
||||
} else if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_KVM) {
|
||||
if (vgpu_opregion(vgpu)->mapped) {
|
||||
memunmap(vgpu_opregion(vgpu)->va_gopregion);
|
||||
vgpu_opregion(vgpu)->va_gopregion = NULL;
|
||||
}
|
||||
/* Guest opregion is released by VFIO */
|
||||
}
|
||||
free_pages((unsigned long)vgpu_opregion(vgpu)->va,
|
||||
get_order(INTEL_GVT_OPREGION_SIZE));
|
||||
|
@ -480,19 +469,40 @@ static bool querying_capabilities(u32 scic)
|
|||
*/
|
||||
int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci)
|
||||
{
|
||||
u32 *scic, *parm;
|
||||
u32 scic, parm;
|
||||
u32 func, subfunc;
|
||||
u64 scic_pa = 0, parm_pa = 0;
|
||||
int ret;
|
||||
|
||||
switch (intel_gvt_host.hypervisor_type) {
|
||||
case INTEL_GVT_HYPERVISOR_XEN:
|
||||
scic = vgpu_opregion(vgpu)->va + INTEL_GVT_OPREGION_SCIC;
|
||||
parm = vgpu_opregion(vgpu)->va + INTEL_GVT_OPREGION_PARM;
|
||||
scic = *((u32 *)vgpu_opregion(vgpu)->va +
|
||||
INTEL_GVT_OPREGION_SCIC);
|
||||
parm = *((u32 *)vgpu_opregion(vgpu)->va +
|
||||
INTEL_GVT_OPREGION_PARM);
|
||||
break;
|
||||
case INTEL_GVT_HYPERVISOR_KVM:
|
||||
scic = vgpu_opregion(vgpu)->va_gopregion +
|
||||
INTEL_GVT_OPREGION_SCIC;
|
||||
parm = vgpu_opregion(vgpu)->va_gopregion +
|
||||
INTEL_GVT_OPREGION_PARM;
|
||||
scic_pa = (vgpu_opregion(vgpu)->gfn[0] << PAGE_SHIFT) +
|
||||
INTEL_GVT_OPREGION_SCIC;
|
||||
parm_pa = (vgpu_opregion(vgpu)->gfn[0] << PAGE_SHIFT) +
|
||||
INTEL_GVT_OPREGION_PARM;
|
||||
|
||||
ret = intel_gvt_hypervisor_read_gpa(vgpu, scic_pa,
|
||||
&scic, sizeof(scic));
|
||||
if (ret) {
|
||||
gvt_vgpu_err("guest opregion read error %d, gpa 0x%llx, len %lu\n",
|
||||
ret, scic_pa, sizeof(scic));
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = intel_gvt_hypervisor_read_gpa(vgpu, parm_pa,
|
||||
&parm, sizeof(parm));
|
||||
if (ret) {
|
||||
gvt_vgpu_err("guest opregion read error %d, gpa 0x%llx, len %lu\n",
|
||||
ret, scic_pa, sizeof(scic));
|
||||
return ret;
|
||||
}
|
||||
|
||||
break;
|
||||
default:
|
||||
gvt_vgpu_err("not supported hypervisor\n");
|
||||
|
@ -510,9 +520,9 @@ int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci)
|
|||
return 0;
|
||||
}
|
||||
|
||||
func = GVT_OPREGION_FUNC(*scic);
|
||||
subfunc = GVT_OPREGION_SUBFUNC(*scic);
|
||||
if (!querying_capabilities(*scic)) {
|
||||
func = GVT_OPREGION_FUNC(scic);
|
||||
subfunc = GVT_OPREGION_SUBFUNC(scic);
|
||||
if (!querying_capabilities(scic)) {
|
||||
gvt_vgpu_err("requesting runtime service: func \"%s\","
|
||||
" subfunc \"%s\"\n",
|
||||
opregion_func_name(func),
|
||||
|
@ -521,11 +531,43 @@ int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci)
|
|||
* emulate exit status of function call, '0' means
|
||||
* "failure, generic, unsupported or unknown cause"
|
||||
*/
|
||||
*scic &= ~OPREGION_SCIC_EXIT_MASK;
|
||||
return 0;
|
||||
scic &= ~OPREGION_SCIC_EXIT_MASK;
|
||||
goto out;
|
||||
}
|
||||
|
||||
scic = 0;
|
||||
parm = 0;
|
||||
|
||||
out:
|
||||
switch (intel_gvt_host.hypervisor_type) {
|
||||
case INTEL_GVT_HYPERVISOR_XEN:
|
||||
*((u32 *)vgpu_opregion(vgpu)->va +
|
||||
INTEL_GVT_OPREGION_SCIC) = scic;
|
||||
*((u32 *)vgpu_opregion(vgpu)->va +
|
||||
INTEL_GVT_OPREGION_PARM) = parm;
|
||||
break;
|
||||
case INTEL_GVT_HYPERVISOR_KVM:
|
||||
ret = intel_gvt_hypervisor_write_gpa(vgpu, scic_pa,
|
||||
&scic, sizeof(scic));
|
||||
if (ret) {
|
||||
gvt_vgpu_err("guest opregion write error %d, gpa 0x%llx, len %lu\n",
|
||||
ret, scic_pa, sizeof(scic));
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = intel_gvt_hypervisor_write_gpa(vgpu, parm_pa,
|
||||
&parm, sizeof(parm));
|
||||
if (ret) {
|
||||
gvt_vgpu_err("guest opregion write error %d, gpa 0x%llx, len %lu\n",
|
||||
ret, scic_pa, sizeof(scic));
|
||||
return ret;
|
||||
}
|
||||
|
||||
break;
|
||||
default:
|
||||
gvt_vgpu_err("not supported hypervisor\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
*scic = 0;
|
||||
*parm = 0;
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -50,6 +50,7 @@ static bool vgpu_has_pending_workload(struct intel_vgpu *vgpu)
|
|||
struct vgpu_sched_data {
|
||||
struct list_head lru_list;
|
||||
struct intel_vgpu *vgpu;
|
||||
bool active;
|
||||
|
||||
ktime_t sched_in_time;
|
||||
ktime_t sched_out_time;
|
||||
|
@ -308,8 +309,15 @@ static int tbs_sched_init_vgpu(struct intel_vgpu *vgpu)
|
|||
|
||||
static void tbs_sched_clean_vgpu(struct intel_vgpu *vgpu)
|
||||
{
|
||||
struct intel_gvt *gvt = vgpu->gvt;
|
||||
struct gvt_sched_data *sched_data = gvt->scheduler.sched_data;
|
||||
|
||||
kfree(vgpu->sched_data);
|
||||
vgpu->sched_data = NULL;
|
||||
|
||||
/* this vgpu id has been removed */
|
||||
if (idr_is_empty(&gvt->vgpu_idr))
|
||||
hrtimer_cancel(&sched_data->timer);
|
||||
}
|
||||
|
||||
static void tbs_sched_start_schedule(struct intel_vgpu *vgpu)
|
||||
|
@ -325,6 +333,7 @@ static void tbs_sched_start_schedule(struct intel_vgpu *vgpu)
|
|||
if (!hrtimer_active(&sched_data->timer))
|
||||
hrtimer_start(&sched_data->timer, ktime_add_ns(ktime_get(),
|
||||
sched_data->period), HRTIMER_MODE_ABS);
|
||||
vgpu_data->active = true;
|
||||
}
|
||||
|
||||
static void tbs_sched_stop_schedule(struct intel_vgpu *vgpu)
|
||||
|
@ -332,6 +341,7 @@ static void tbs_sched_stop_schedule(struct intel_vgpu *vgpu)
|
|||
struct vgpu_sched_data *vgpu_data = vgpu->sched_data;
|
||||
|
||||
list_del_init(&vgpu_data->lru_list);
|
||||
vgpu_data->active = false;
|
||||
}
|
||||
|
||||
static struct intel_gvt_sched_policy_ops tbs_schedule_ops = {
|
||||
|
@ -367,9 +377,12 @@ void intel_vgpu_clean_sched_policy(struct intel_vgpu *vgpu)
|
|||
|
||||
void intel_vgpu_start_schedule(struct intel_vgpu *vgpu)
|
||||
{
|
||||
gvt_dbg_core("vgpu%d: start schedule\n", vgpu->id);
|
||||
struct vgpu_sched_data *vgpu_data = vgpu->sched_data;
|
||||
|
||||
vgpu->gvt->scheduler.sched_ops->start_schedule(vgpu);
|
||||
if (!vgpu_data->active) {
|
||||
gvt_dbg_core("vgpu%d: start schedule\n", vgpu->id);
|
||||
vgpu->gvt->scheduler.sched_ops->start_schedule(vgpu);
|
||||
}
|
||||
}
|
||||
|
||||
void intel_gvt_kick_schedule(struct intel_gvt *gvt)
|
||||
|
@ -382,6 +395,10 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
|
|||
struct intel_gvt_workload_scheduler *scheduler =
|
||||
&vgpu->gvt->scheduler;
|
||||
int ring_id;
|
||||
struct vgpu_sched_data *vgpu_data = vgpu->sched_data;
|
||||
|
||||
if (!vgpu_data->active)
|
||||
return;
|
||||
|
||||
gvt_dbg_core("vgpu%d: stop schedule\n", vgpu->id);
|
||||
|
||||
|
|
|
@ -991,7 +991,7 @@ void intel_vgpu_clean_submission(struct intel_vgpu *vgpu)
|
|||
{
|
||||
struct intel_vgpu_submission *s = &vgpu->submission;
|
||||
|
||||
intel_vgpu_select_submission_ops(vgpu, 0);
|
||||
intel_vgpu_select_submission_ops(vgpu, ALL_ENGINES, 0);
|
||||
i915_gem_context_put(s->shadow_ctx);
|
||||
kmem_cache_destroy(s->workloads);
|
||||
}
|
||||
|
@ -1079,6 +1079,7 @@ out_shadow_ctx:
|
|||
*
|
||||
*/
|
||||
int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu,
|
||||
unsigned long engine_mask,
|
||||
unsigned int interface)
|
||||
{
|
||||
struct intel_vgpu_submission *s = &vgpu->submission;
|
||||
|
@ -1091,21 +1092,21 @@ int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu,
|
|||
if (WARN_ON(interface >= ARRAY_SIZE(ops)))
|
||||
return -EINVAL;
|
||||
|
||||
if (s->active) {
|
||||
s->ops->clean(vgpu);
|
||||
s->active = false;
|
||||
gvt_dbg_core("vgpu%d: de-select ops [ %s ] \n",
|
||||
vgpu->id, s->ops->name);
|
||||
}
|
||||
if (WARN_ON(interface == 0 && engine_mask != ALL_ENGINES))
|
||||
return -EINVAL;
|
||||
|
||||
if (s->active)
|
||||
s->ops->clean(vgpu, engine_mask);
|
||||
|
||||
if (interface == 0) {
|
||||
s->ops = NULL;
|
||||
s->virtual_submission_interface = 0;
|
||||
gvt_dbg_core("vgpu%d: no submission ops\n", vgpu->id);
|
||||
s->active = false;
|
||||
gvt_dbg_core("vgpu%d: remove submission ops\n", vgpu->id);
|
||||
return 0;
|
||||
}
|
||||
|
||||
ret = ops[interface]->init(vgpu);
|
||||
ret = ops[interface]->init(vgpu, engine_mask);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
|
|
@ -141,6 +141,7 @@ void intel_vgpu_reset_submission(struct intel_vgpu *vgpu,
|
|||
void intel_vgpu_clean_submission(struct intel_vgpu *vgpu);
|
||||
|
||||
int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu,
|
||||
unsigned long engine_mask,
|
||||
unsigned int interface);
|
||||
|
||||
extern const struct intel_vgpu_submission_ops
|
||||
|
|
|
@ -258,6 +258,8 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
|
|||
|
||||
intel_gvt_debugfs_remove_vgpu(vgpu);
|
||||
idr_remove(&gvt->vgpu_idr, vgpu->id);
|
||||
if (idr_is_empty(&gvt->vgpu_idr))
|
||||
intel_gvt_clean_irq(gvt);
|
||||
intel_vgpu_clean_sched_policy(vgpu);
|
||||
intel_vgpu_clean_submission(vgpu);
|
||||
intel_vgpu_clean_display(vgpu);
|
||||
|
@ -518,8 +520,7 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
|
|||
intel_vgpu_reset_submission(vgpu, resetting_eng);
|
||||
/* full GPU reset or device model level reset */
|
||||
if (engine_mask == ALL_ENGINES || dmlr) {
|
||||
intel_vgpu_select_submission_ops(vgpu, 0);
|
||||
|
||||
intel_vgpu_select_submission_ops(vgpu, ALL_ENGINES, 0);
|
||||
/*fence will not be reset during virtual reset */
|
||||
if (dmlr) {
|
||||
intel_vgpu_reset_gtt(vgpu);
|
||||
|
|
|
@ -1032,7 +1032,7 @@ find_reg(const struct intel_engine_cs *engine, bool is_master, u32 addr)
|
|||
const struct drm_i915_reg_table *table = engine->reg_tables;
|
||||
int count = engine->reg_table_count;
|
||||
|
||||
do {
|
||||
for (; count > 0; ++table, --count) {
|
||||
if (!table->master || is_master) {
|
||||
const struct drm_i915_reg_descriptor *reg;
|
||||
|
||||
|
@ -1040,7 +1040,7 @@ find_reg(const struct intel_engine_cs *engine, bool is_master, u32 addr)
|
|||
if (reg != NULL)
|
||||
return reg;
|
||||
}
|
||||
} while (table++, --count);
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
@ -1212,6 +1212,12 @@ static bool check_cmd(const struct intel_engine_cs *engine,
|
|||
continue;
|
||||
}
|
||||
|
||||
if (desc->bits[i].offset >= length) {
|
||||
DRM_DEBUG_DRIVER("CMD: Rejected command 0x%08X, too short to check bitmask (%s)\n",
|
||||
*cmd, engine->name);
|
||||
return false;
|
||||
}
|
||||
|
||||
dword = cmd[desc->bits[i].offset] &
|
||||
desc->bits[i].mask;
|
||||
|
||||
|
|
|
@ -1842,6 +1842,8 @@ static int i915_drm_resume_early(struct drm_device *dev)
|
|||
if (IS_GEN9_LP(dev_priv) ||
|
||||
!(dev_priv->suspended_to_idle && dev_priv->csr.dmc_payload))
|
||||
intel_power_domains_init_hw(dev_priv, true);
|
||||
else
|
||||
intel_display_set_init_power(dev_priv, true);
|
||||
|
||||
i915_gem_sanitize(dev_priv);
|
||||
|
||||
|
|
|
@ -3717,7 +3717,11 @@ extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
|
|||
struct intel_display_error_state *error);
|
||||
|
||||
int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val);
|
||||
int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u32 mbox, u32 val);
|
||||
int sandybridge_pcode_write_timeout(struct drm_i915_private *dev_priv, u32 mbox,
|
||||
u32 val, int timeout_us);
|
||||
#define sandybridge_pcode_write(dev_priv, mbox, val) \
|
||||
sandybridge_pcode_write_timeout(dev_priv, mbox, val, 500)
|
||||
|
||||
int skl_pcode_request(struct drm_i915_private *dev_priv, u32 mbox, u32 request,
|
||||
u32 reply_mask, u32 reply, int timeout_base_ms);
|
||||
|
||||
|
|
|
@ -3323,16 +3323,15 @@ i915_gem_retire_work_handler(struct work_struct *work)
|
|||
mutex_unlock(&dev->struct_mutex);
|
||||
}
|
||||
|
||||
/* Keep the retire handler running until we are finally idle.
|
||||
/*
|
||||
* Keep the retire handler running until we are finally idle.
|
||||
* We do not need to do this test under locking as in the worst-case
|
||||
* we queue the retire worker once too often.
|
||||
*/
|
||||
if (READ_ONCE(dev_priv->gt.awake)) {
|
||||
i915_queue_hangcheck(dev_priv);
|
||||
if (READ_ONCE(dev_priv->gt.awake))
|
||||
queue_delayed_work(dev_priv->wq,
|
||||
&dev_priv->gt.retire_work,
|
||||
round_jiffies_up_relative(HZ));
|
||||
}
|
||||
}
|
||||
|
||||
static inline bool
|
||||
|
@ -5283,6 +5282,8 @@ err_unlock:
|
|||
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
|
||||
mutex_unlock(&dev_priv->drm.struct_mutex);
|
||||
|
||||
intel_uc_fini_wq(dev_priv);
|
||||
|
||||
if (ret != -EIO)
|
||||
i915_gem_cleanup_userptr(dev_priv);
|
||||
|
||||
|
|
|
@ -377,6 +377,7 @@ static gen6_pte_t iris_pte_encode(dma_addr_t addr,
|
|||
static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp)
|
||||
{
|
||||
struct pagevec *pvec = &vm->free_pages;
|
||||
struct pagevec stash;
|
||||
|
||||
if (I915_SELFTEST_ONLY(should_fail(&vm->fault_attr, 1)))
|
||||
i915_gem_shrink_all(vm->i915);
|
||||
|
@ -395,7 +396,15 @@ static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp)
|
|||
if (likely(pvec->nr))
|
||||
return pvec->pages[--pvec->nr];
|
||||
|
||||
/* Otherwise batch allocate pages to amoritize cost of set_pages_wc. */
|
||||
/*
|
||||
* Otherwise batch allocate pages to amoritize cost of set_pages_wc.
|
||||
*
|
||||
* We have to be careful as page allocation may trigger the shrinker
|
||||
* (via direct reclaim) which will fill up the WC stash underneath us.
|
||||
* So we add our WB pages into a temporary pvec on the stack and merge
|
||||
* them into the WC stash after all the allocations are complete.
|
||||
*/
|
||||
pagevec_init(&stash);
|
||||
do {
|
||||
struct page *page;
|
||||
|
||||
|
@ -403,15 +412,24 @@ static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp)
|
|||
if (unlikely(!page))
|
||||
break;
|
||||
|
||||
pvec->pages[pvec->nr++] = page;
|
||||
} while (pagevec_space(pvec));
|
||||
stash.pages[stash.nr++] = page;
|
||||
} while (stash.nr < pagevec_space(pvec));
|
||||
|
||||
if (unlikely(!pvec->nr))
|
||||
return NULL;
|
||||
if (stash.nr) {
|
||||
int nr = min_t(int, stash.nr, pagevec_space(pvec));
|
||||
struct page **pages = stash.pages + stash.nr - nr;
|
||||
|
||||
set_pages_array_wc(pvec->pages, pvec->nr);
|
||||
if (nr && !set_pages_array_wc(pages, nr)) {
|
||||
memcpy(pvec->pages + pvec->nr,
|
||||
pages, sizeof(pages[0]) * nr);
|
||||
pvec->nr += nr;
|
||||
stash.nr -= nr;
|
||||
}
|
||||
|
||||
return pvec->pages[--pvec->nr];
|
||||
pagevec_release(&stash);
|
||||
}
|
||||
|
||||
return likely(pvec->nr) ? pvec->pages[--pvec->nr] : NULL;
|
||||
}
|
||||
|
||||
static void vm_free_pages_release(struct i915_address_space *vm,
|
||||
|
@ -1341,15 +1359,18 @@ static int gen8_ppgtt_alloc_pd(struct i915_address_space *vm,
|
|||
int count = gen8_pte_count(start, length);
|
||||
|
||||
if (pt == vm->scratch_pt) {
|
||||
pd->used_pdes++;
|
||||
|
||||
pt = alloc_pt(vm);
|
||||
if (IS_ERR(pt))
|
||||
if (IS_ERR(pt)) {
|
||||
pd->used_pdes--;
|
||||
goto unwind;
|
||||
}
|
||||
|
||||
if (count < GEN8_PTES || intel_vgpu_active(vm->i915))
|
||||
gen8_initialize_pt(vm, pt);
|
||||
|
||||
gen8_ppgtt_set_pde(vm, pd, pt, pde);
|
||||
pd->used_pdes++;
|
||||
GEM_BUG_ON(pd->used_pdes > I915_PDES);
|
||||
}
|
||||
|
||||
|
@ -1373,13 +1394,16 @@ static int gen8_ppgtt_alloc_pdp(struct i915_address_space *vm,
|
|||
|
||||
gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
|
||||
if (pd == vm->scratch_pd) {
|
||||
pdp->used_pdpes++;
|
||||
|
||||
pd = alloc_pd(vm);
|
||||
if (IS_ERR(pd))
|
||||
if (IS_ERR(pd)) {
|
||||
pdp->used_pdpes--;
|
||||
goto unwind;
|
||||
}
|
||||
|
||||
gen8_initialize_pd(vm, pd);
|
||||
gen8_ppgtt_set_pdpe(vm, pdp, pd, pdpe);
|
||||
pdp->used_pdpes++;
|
||||
GEM_BUG_ON(pdp->used_pdpes > i915_pdpes_per_pdp(vm));
|
||||
|
||||
mark_tlbs_dirty(i915_vm_to_ppgtt(vm));
|
||||
|
@ -2287,12 +2311,23 @@ static void gen8_check_and_clear_faults(struct drm_i915_private *dev_priv)
|
|||
u32 fault = I915_READ(GEN8_RING_FAULT_REG);
|
||||
|
||||
if (fault & RING_FAULT_VALID) {
|
||||
u32 fault_data0, fault_data1;
|
||||
u64 fault_addr;
|
||||
|
||||
fault_data0 = I915_READ(GEN8_FAULT_TLB_DATA0);
|
||||
fault_data1 = I915_READ(GEN8_FAULT_TLB_DATA1);
|
||||
fault_addr = ((u64)(fault_data1 & FAULT_VA_HIGH_BITS) << 44) |
|
||||
((u64)fault_data0 << 12);
|
||||
|
||||
DRM_DEBUG_DRIVER("Unexpected fault\n"
|
||||
"\tAddr: 0x%08lx\n"
|
||||
"\tAddr: 0x%08x_%08x\n"
|
||||
"\tAddress space: %s\n"
|
||||
"\tEngine ID: %d\n"
|
||||
"\tSource ID: %d\n"
|
||||
"\tType: %d\n",
|
||||
fault & PAGE_MASK,
|
||||
upper_32_bits(fault_addr),
|
||||
lower_32_bits(fault_addr),
|
||||
fault_data1 & FAULT_GTT_SEL ? "GGTT" : "PPGTT",
|
||||
GEN8_RING_FAULT_ENGINE_ID(fault),
|
||||
RING_FAULT_SRCID(fault),
|
||||
RING_FAULT_FAULT_TYPE(fault));
|
||||
|
|
|
@ -276,6 +276,8 @@ static void mark_busy(struct drm_i915_private *i915)
|
|||
|
||||
intel_engines_unpark(i915);
|
||||
|
||||
i915_queue_hangcheck(i915);
|
||||
|
||||
queue_delayed_work(i915->wq,
|
||||
&i915->gt.retire_work,
|
||||
round_jiffies_up_relative(HZ));
|
||||
|
|
|
@ -363,13 +363,13 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
|
|||
I915_SHRINK_BOUND |
|
||||
I915_SHRINK_UNBOUND |
|
||||
I915_SHRINK_PURGEABLE);
|
||||
if (freed < sc->nr_to_scan)
|
||||
if (sc->nr_scanned < sc->nr_to_scan)
|
||||
freed += i915_gem_shrink(i915,
|
||||
sc->nr_to_scan - sc->nr_scanned,
|
||||
&sc->nr_scanned,
|
||||
I915_SHRINK_BOUND |
|
||||
I915_SHRINK_UNBOUND);
|
||||
if (freed < sc->nr_to_scan && current_is_kswapd()) {
|
||||
if (sc->nr_scanned < sc->nr_to_scan && current_is_kswapd()) {
|
||||
intel_runtime_pm_get(i915);
|
||||
freed += i915_gem_shrink(i915,
|
||||
sc->nr_to_scan - sc->nr_scanned,
|
||||
|
|
|
@ -74,19 +74,19 @@
|
|||
GEN_DEFAULT_PAGE_SIZES, \
|
||||
CURSOR_OFFSETS
|
||||
|
||||
static const struct intel_device_info intel_i830_info __initconst = {
|
||||
static const struct intel_device_info intel_i830_info = {
|
||||
GEN2_FEATURES,
|
||||
.platform = INTEL_I830,
|
||||
.is_mobile = 1, .cursor_needs_physical = 1,
|
||||
.num_pipes = 2, /* legal, last one wins */
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_i845g_info __initconst = {
|
||||
static const struct intel_device_info intel_i845g_info = {
|
||||
GEN2_FEATURES,
|
||||
.platform = INTEL_I845G,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_i85x_info __initconst = {
|
||||
static const struct intel_device_info intel_i85x_info = {
|
||||
GEN2_FEATURES,
|
||||
.platform = INTEL_I85X, .is_mobile = 1,
|
||||
.num_pipes = 2, /* legal, last one wins */
|
||||
|
@ -94,7 +94,7 @@ static const struct intel_device_info intel_i85x_info __initconst = {
|
|||
.has_fbc = 1,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_i865g_info __initconst = {
|
||||
static const struct intel_device_info intel_i865g_info = {
|
||||
GEN2_FEATURES,
|
||||
.platform = INTEL_I865G,
|
||||
};
|
||||
|
@ -108,7 +108,7 @@ static const struct intel_device_info intel_i865g_info __initconst = {
|
|||
GEN_DEFAULT_PAGE_SIZES, \
|
||||
CURSOR_OFFSETS
|
||||
|
||||
static const struct intel_device_info intel_i915g_info __initconst = {
|
||||
static const struct intel_device_info intel_i915g_info = {
|
||||
GEN3_FEATURES,
|
||||
.platform = INTEL_I915G, .cursor_needs_physical = 1,
|
||||
.has_overlay = 1, .overlay_needs_physical = 1,
|
||||
|
@ -116,7 +116,7 @@ static const struct intel_device_info intel_i915g_info __initconst = {
|
|||
.unfenced_needs_alignment = 1,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_i915gm_info __initconst = {
|
||||
static const struct intel_device_info intel_i915gm_info = {
|
||||
GEN3_FEATURES,
|
||||
.platform = INTEL_I915GM,
|
||||
.is_mobile = 1,
|
||||
|
@ -128,7 +128,7 @@ static const struct intel_device_info intel_i915gm_info __initconst = {
|
|||
.unfenced_needs_alignment = 1,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_i945g_info __initconst = {
|
||||
static const struct intel_device_info intel_i945g_info = {
|
||||
GEN3_FEATURES,
|
||||
.platform = INTEL_I945G,
|
||||
.has_hotplug = 1, .cursor_needs_physical = 1,
|
||||
|
@ -137,7 +137,7 @@ static const struct intel_device_info intel_i945g_info __initconst = {
|
|||
.unfenced_needs_alignment = 1,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_i945gm_info __initconst = {
|
||||
static const struct intel_device_info intel_i945gm_info = {
|
||||
GEN3_FEATURES,
|
||||
.platform = INTEL_I945GM, .is_mobile = 1,
|
||||
.has_hotplug = 1, .cursor_needs_physical = 1,
|
||||
|
@ -148,14 +148,14 @@ static const struct intel_device_info intel_i945gm_info __initconst = {
|
|||
.unfenced_needs_alignment = 1,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_g33_info __initconst = {
|
||||
static const struct intel_device_info intel_g33_info = {
|
||||
GEN3_FEATURES,
|
||||
.platform = INTEL_G33,
|
||||
.has_hotplug = 1,
|
||||
.has_overlay = 1,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_pineview_info __initconst = {
|
||||
static const struct intel_device_info intel_pineview_info = {
|
||||
GEN3_FEATURES,
|
||||
.platform = INTEL_PINEVIEW, .is_mobile = 1,
|
||||
.has_hotplug = 1,
|
||||
|
@ -172,7 +172,7 @@ static const struct intel_device_info intel_pineview_info __initconst = {
|
|||
GEN_DEFAULT_PAGE_SIZES, \
|
||||
CURSOR_OFFSETS
|
||||
|
||||
static const struct intel_device_info intel_i965g_info __initconst = {
|
||||
static const struct intel_device_info intel_i965g_info = {
|
||||
GEN4_FEATURES,
|
||||
.platform = INTEL_I965G,
|
||||
.has_overlay = 1,
|
||||
|
@ -180,7 +180,7 @@ static const struct intel_device_info intel_i965g_info __initconst = {
|
|||
.has_snoop = false,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_i965gm_info __initconst = {
|
||||
static const struct intel_device_info intel_i965gm_info = {
|
||||
GEN4_FEATURES,
|
||||
.platform = INTEL_I965GM,
|
||||
.is_mobile = 1, .has_fbc = 1,
|
||||
|
@ -190,13 +190,13 @@ static const struct intel_device_info intel_i965gm_info __initconst = {
|
|||
.has_snoop = false,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_g45_info __initconst = {
|
||||
static const struct intel_device_info intel_g45_info = {
|
||||
GEN4_FEATURES,
|
||||
.platform = INTEL_G45,
|
||||
.ring_mask = RENDER_RING | BSD_RING,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_gm45_info __initconst = {
|
||||
static const struct intel_device_info intel_gm45_info = {
|
||||
GEN4_FEATURES,
|
||||
.platform = INTEL_GM45,
|
||||
.is_mobile = 1, .has_fbc = 1,
|
||||
|
@ -215,12 +215,12 @@ static const struct intel_device_info intel_gm45_info __initconst = {
|
|||
GEN_DEFAULT_PAGE_SIZES, \
|
||||
CURSOR_OFFSETS
|
||||
|
||||
static const struct intel_device_info intel_ironlake_d_info __initconst = {
|
||||
static const struct intel_device_info intel_ironlake_d_info = {
|
||||
GEN5_FEATURES,
|
||||
.platform = INTEL_IRONLAKE,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_ironlake_m_info __initconst = {
|
||||
static const struct intel_device_info intel_ironlake_m_info = {
|
||||
GEN5_FEATURES,
|
||||
.platform = INTEL_IRONLAKE,
|
||||
.is_mobile = 1, .has_fbc = 1,
|
||||
|
@ -243,12 +243,12 @@ static const struct intel_device_info intel_ironlake_m_info __initconst = {
|
|||
GEN6_FEATURES, \
|
||||
.platform = INTEL_SANDYBRIDGE
|
||||
|
||||
static const struct intel_device_info intel_sandybridge_d_gt1_info __initconst = {
|
||||
static const struct intel_device_info intel_sandybridge_d_gt1_info = {
|
||||
SNB_D_PLATFORM,
|
||||
.gt = 1,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_sandybridge_d_gt2_info __initconst = {
|
||||
static const struct intel_device_info intel_sandybridge_d_gt2_info = {
|
||||
SNB_D_PLATFORM,
|
||||
.gt = 2,
|
||||
};
|
||||
|
@ -259,12 +259,12 @@ static const struct intel_device_info intel_sandybridge_d_gt2_info __initconst =
|
|||
.is_mobile = 1
|
||||
|
||||
|
||||
static const struct intel_device_info intel_sandybridge_m_gt1_info __initconst = {
|
||||
static const struct intel_device_info intel_sandybridge_m_gt1_info = {
|
||||
SNB_M_PLATFORM,
|
||||
.gt = 1,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_sandybridge_m_gt2_info __initconst = {
|
||||
static const struct intel_device_info intel_sandybridge_m_gt2_info = {
|
||||
SNB_M_PLATFORM,
|
||||
.gt = 2,
|
||||
};
|
||||
|
@ -288,12 +288,12 @@ static const struct intel_device_info intel_sandybridge_m_gt2_info __initconst =
|
|||
.platform = INTEL_IVYBRIDGE, \
|
||||
.has_l3_dpf = 1
|
||||
|
||||
static const struct intel_device_info intel_ivybridge_d_gt1_info __initconst = {
|
||||
static const struct intel_device_info intel_ivybridge_d_gt1_info = {
|
||||
IVB_D_PLATFORM,
|
||||
.gt = 1,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_ivybridge_d_gt2_info __initconst = {
|
||||
static const struct intel_device_info intel_ivybridge_d_gt2_info = {
|
||||
IVB_D_PLATFORM,
|
||||
.gt = 2,
|
||||
};
|
||||
|
@ -304,17 +304,17 @@ static const struct intel_device_info intel_ivybridge_d_gt2_info __initconst = {
|
|||
.is_mobile = 1, \
|
||||
.has_l3_dpf = 1
|
||||
|
||||
static const struct intel_device_info intel_ivybridge_m_gt1_info __initconst = {
|
||||
static const struct intel_device_info intel_ivybridge_m_gt1_info = {
|
||||
IVB_M_PLATFORM,
|
||||
.gt = 1,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_ivybridge_m_gt2_info __initconst = {
|
||||
static const struct intel_device_info intel_ivybridge_m_gt2_info = {
|
||||
IVB_M_PLATFORM,
|
||||
.gt = 2,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_ivybridge_q_info __initconst = {
|
||||
static const struct intel_device_info intel_ivybridge_q_info = {
|
||||
GEN7_FEATURES,
|
||||
.platform = INTEL_IVYBRIDGE,
|
||||
.gt = 2,
|
||||
|
@ -322,7 +322,7 @@ static const struct intel_device_info intel_ivybridge_q_info __initconst = {
|
|||
.has_l3_dpf = 1,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_valleyview_info __initconst = {
|
||||
static const struct intel_device_info intel_valleyview_info = {
|
||||
.platform = INTEL_VALLEYVIEW,
|
||||
.gen = 7,
|
||||
.is_lp = 1,
|
||||
|
@ -358,17 +358,17 @@ static const struct intel_device_info intel_valleyview_info __initconst = {
|
|||
.platform = INTEL_HASWELL, \
|
||||
.has_l3_dpf = 1
|
||||
|
||||
static const struct intel_device_info intel_haswell_gt1_info __initconst = {
|
||||
static const struct intel_device_info intel_haswell_gt1_info = {
|
||||
HSW_PLATFORM,
|
||||
.gt = 1,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_haswell_gt2_info __initconst = {
|
||||
static const struct intel_device_info intel_haswell_gt2_info = {
|
||||
HSW_PLATFORM,
|
||||
.gt = 2,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_haswell_gt3_info __initconst = {
|
||||
static const struct intel_device_info intel_haswell_gt3_info = {
|
||||
HSW_PLATFORM,
|
||||
.gt = 3,
|
||||
};
|
||||
|
@ -388,17 +388,17 @@ static const struct intel_device_info intel_haswell_gt3_info __initconst = {
|
|||
.gen = 8, \
|
||||
.platform = INTEL_BROADWELL
|
||||
|
||||
static const struct intel_device_info intel_broadwell_gt1_info __initconst = {
|
||||
static const struct intel_device_info intel_broadwell_gt1_info = {
|
||||
BDW_PLATFORM,
|
||||
.gt = 1,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_broadwell_gt2_info __initconst = {
|
||||
static const struct intel_device_info intel_broadwell_gt2_info = {
|
||||
BDW_PLATFORM,
|
||||
.gt = 2,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_broadwell_rsvd_info __initconst = {
|
||||
static const struct intel_device_info intel_broadwell_rsvd_info = {
|
||||
BDW_PLATFORM,
|
||||
.gt = 3,
|
||||
/* According to the device ID those devices are GT3, they were
|
||||
|
@ -406,13 +406,13 @@ static const struct intel_device_info intel_broadwell_rsvd_info __initconst = {
|
|||
*/
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_broadwell_gt3_info __initconst = {
|
||||
static const struct intel_device_info intel_broadwell_gt3_info = {
|
||||
BDW_PLATFORM,
|
||||
.gt = 3,
|
||||
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_cherryview_info __initconst = {
|
||||
static const struct intel_device_info intel_cherryview_info = {
|
||||
.gen = 8, .num_pipes = 3,
|
||||
.has_hotplug = 1,
|
||||
.is_lp = 1,
|
||||
|
@ -455,12 +455,12 @@ static const struct intel_device_info intel_cherryview_info __initconst = {
|
|||
.gen = 9, \
|
||||
.platform = INTEL_SKYLAKE
|
||||
|
||||
static const struct intel_device_info intel_skylake_gt1_info __initconst = {
|
||||
static const struct intel_device_info intel_skylake_gt1_info = {
|
||||
SKL_PLATFORM,
|
||||
.gt = 1,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_skylake_gt2_info __initconst = {
|
||||
static const struct intel_device_info intel_skylake_gt2_info = {
|
||||
SKL_PLATFORM,
|
||||
.gt = 2,
|
||||
};
|
||||
|
@ -470,12 +470,12 @@ static const struct intel_device_info intel_skylake_gt2_info __initconst = {
|
|||
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING
|
||||
|
||||
|
||||
static const struct intel_device_info intel_skylake_gt3_info __initconst = {
|
||||
static const struct intel_device_info intel_skylake_gt3_info = {
|
||||
SKL_GT3_PLUS_PLATFORM,
|
||||
.gt = 3,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_skylake_gt4_info __initconst = {
|
||||
static const struct intel_device_info intel_skylake_gt4_info = {
|
||||
SKL_GT3_PLUS_PLATFORM,
|
||||
.gt = 4,
|
||||
};
|
||||
|
@ -511,13 +511,13 @@ static const struct intel_device_info intel_skylake_gt4_info __initconst = {
|
|||
IVB_CURSOR_OFFSETS, \
|
||||
BDW_COLORS
|
||||
|
||||
static const struct intel_device_info intel_broxton_info __initconst = {
|
||||
static const struct intel_device_info intel_broxton_info = {
|
||||
GEN9_LP_FEATURES,
|
||||
.platform = INTEL_BROXTON,
|
||||
.ddb_size = 512,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_geminilake_info __initconst = {
|
||||
static const struct intel_device_info intel_geminilake_info = {
|
||||
GEN9_LP_FEATURES,
|
||||
.platform = INTEL_GEMINILAKE,
|
||||
.ddb_size = 1024,
|
||||
|
@ -529,17 +529,17 @@ static const struct intel_device_info intel_geminilake_info __initconst = {
|
|||
.gen = 9, \
|
||||
.platform = INTEL_KABYLAKE
|
||||
|
||||
static const struct intel_device_info intel_kabylake_gt1_info __initconst = {
|
||||
static const struct intel_device_info intel_kabylake_gt1_info = {
|
||||
KBL_PLATFORM,
|
||||
.gt = 1,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_kabylake_gt2_info __initconst = {
|
||||
static const struct intel_device_info intel_kabylake_gt2_info = {
|
||||
KBL_PLATFORM,
|
||||
.gt = 2,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_kabylake_gt3_info __initconst = {
|
||||
static const struct intel_device_info intel_kabylake_gt3_info = {
|
||||
KBL_PLATFORM,
|
||||
.gt = 3,
|
||||
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
|
||||
|
@ -550,17 +550,17 @@ static const struct intel_device_info intel_kabylake_gt3_info __initconst = {
|
|||
.gen = 9, \
|
||||
.platform = INTEL_COFFEELAKE
|
||||
|
||||
static const struct intel_device_info intel_coffeelake_gt1_info __initconst = {
|
||||
static const struct intel_device_info intel_coffeelake_gt1_info = {
|
||||
CFL_PLATFORM,
|
||||
.gt = 1,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_coffeelake_gt2_info __initconst = {
|
||||
static const struct intel_device_info intel_coffeelake_gt2_info = {
|
||||
CFL_PLATFORM,
|
||||
.gt = 2,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_coffeelake_gt3_info __initconst = {
|
||||
static const struct intel_device_info intel_coffeelake_gt3_info = {
|
||||
CFL_PLATFORM,
|
||||
.gt = 3,
|
||||
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
|
||||
|
@ -571,7 +571,7 @@ static const struct intel_device_info intel_coffeelake_gt3_info __initconst = {
|
|||
.ddb_size = 1024, \
|
||||
GLK_COLORS
|
||||
|
||||
static const struct intel_device_info intel_cannonlake_gt2_info __initconst = {
|
||||
static const struct intel_device_info intel_cannonlake_gt2_info = {
|
||||
GEN10_FEATURES,
|
||||
.is_alpha_support = 1,
|
||||
.platform = INTEL_CANNONLAKE,
|
||||
|
|
|
@ -2489,6 +2489,8 @@ enum i915_power_well_id {
|
|||
|
||||
#define GEN8_FAULT_TLB_DATA0 _MMIO(0x4b10)
|
||||
#define GEN8_FAULT_TLB_DATA1 _MMIO(0x4b14)
|
||||
#define FAULT_VA_HIGH_BITS (0xf << 0)
|
||||
#define FAULT_GTT_SEL (1 << 4)
|
||||
|
||||
#define FPGA_DBG _MMIO(0x42300)
|
||||
#define FPGA_DBG_RM_NOCLAIM (1<<31)
|
||||
|
|
|
@ -779,7 +779,7 @@ static struct intel_encoder *get_saved_enc(struct drm_i915_private *dev_priv,
|
|||
{
|
||||
struct intel_encoder *encoder;
|
||||
|
||||
if (WARN_ON(pipe >= INTEL_INFO(dev_priv)->num_pipes))
|
||||
if (WARN_ON(pipe >= ARRAY_SIZE(dev_priv->av_enc_map)))
|
||||
return NULL;
|
||||
|
||||
/* MST */
|
||||
|
|
|
@ -1107,6 +1107,7 @@ static void sanitize_aux_ch(struct drm_i915_private *dev_priv,
|
|||
}
|
||||
|
||||
static const u8 cnp_ddc_pin_map[] = {
|
||||
[0] = 0, /* N/A */
|
||||
[DDC_BUS_DDI_B] = GMBUS_PIN_1_BXT,
|
||||
[DDC_BUS_DDI_C] = GMBUS_PIN_2_BXT,
|
||||
[DDC_BUS_DDI_D] = GMBUS_PIN_4_CNP, /* sic */
|
||||
|
@ -1115,9 +1116,14 @@ static const u8 cnp_ddc_pin_map[] = {
|
|||
|
||||
static u8 map_ddc_pin(struct drm_i915_private *dev_priv, u8 vbt_pin)
|
||||
{
|
||||
if (HAS_PCH_CNP(dev_priv) &&
|
||||
vbt_pin > 0 && vbt_pin < ARRAY_SIZE(cnp_ddc_pin_map))
|
||||
return cnp_ddc_pin_map[vbt_pin];
|
||||
if (HAS_PCH_CNP(dev_priv)) {
|
||||
if (vbt_pin < ARRAY_SIZE(cnp_ddc_pin_map)) {
|
||||
return cnp_ddc_pin_map[vbt_pin];
|
||||
} else {
|
||||
DRM_DEBUG_KMS("Ignoring alternate pin: VBT claims DDC pin %d, which is not valid for this platform\n", vbt_pin);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
return vbt_pin;
|
||||
}
|
||||
|
@ -1323,11 +1329,13 @@ parse_general_definitions(struct drm_i915_private *dev_priv,
|
|||
expected_size = LEGACY_CHILD_DEVICE_CONFIG_SIZE;
|
||||
} else if (bdb->version == 195) {
|
||||
expected_size = 37;
|
||||
} else if (bdb->version <= 197) {
|
||||
} else if (bdb->version <= 215) {
|
||||
expected_size = 38;
|
||||
} else if (bdb->version <= 216) {
|
||||
expected_size = 39;
|
||||
} else {
|
||||
expected_size = 38;
|
||||
BUILD_BUG_ON(sizeof(*child) < 38);
|
||||
expected_size = sizeof(*child);
|
||||
BUILD_BUG_ON(sizeof(*child) < 39);
|
||||
DRM_DEBUG_DRIVER("Expected child device config size for VBT version %u not known; assuming %u\n",
|
||||
bdb->version, expected_size);
|
||||
}
|
||||
|
|
|
@ -149,17 +149,6 @@ static void intel_breadcrumbs_fake_irq(struct timer_list *t)
|
|||
return;
|
||||
|
||||
mod_timer(&b->fake_irq, jiffies + 1);
|
||||
|
||||
/* Ensure that even if the GPU hangs, we get woken up.
|
||||
*
|
||||
* However, note that if no one is waiting, we never notice
|
||||
* a gpu hang. Eventually, we will have to wait for a resource
|
||||
* held by the GPU and so trigger a hangcheck. In the most
|
||||
* pathological case, this will be upon memory starvation! To
|
||||
* prevent this, we also queue the hangcheck from the retire
|
||||
* worker.
|
||||
*/
|
||||
i915_queue_hangcheck(engine->i915);
|
||||
}
|
||||
|
||||
static void irq_enable(struct intel_engine_cs *engine)
|
||||
|
|
|
@ -1370,10 +1370,15 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
|
|||
break;
|
||||
}
|
||||
|
||||
/* Inform power controller of upcoming frequency change */
|
||||
/*
|
||||
* Inform power controller of upcoming frequency change. BSpec
|
||||
* requires us to wait up to 150usec, but that leads to timeouts;
|
||||
* the 2ms used here is based on experiment.
|
||||
*/
|
||||
mutex_lock(&dev_priv->pcu_lock);
|
||||
ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
|
||||
0x80000000);
|
||||
ret = sandybridge_pcode_write_timeout(dev_priv,
|
||||
HSW_PCODE_DE_WRITE_FREQ_REQ,
|
||||
0x80000000, 2000);
|
||||
mutex_unlock(&dev_priv->pcu_lock);
|
||||
|
||||
if (ret) {
|
||||
|
@ -1404,8 +1409,15 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
|
|||
I915_WRITE(CDCLK_CTL, val);
|
||||
|
||||
mutex_lock(&dev_priv->pcu_lock);
|
||||
ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
|
||||
cdclk_state->voltage_level);
|
||||
/*
|
||||
* The timeout isn't specified, the 2ms used here is based on
|
||||
* experiment.
|
||||
* FIXME: Waiting for the request completion could be delayed until
|
||||
* the next PCODE request based on BSpec.
|
||||
*/
|
||||
ret = sandybridge_pcode_write_timeout(dev_priv,
|
||||
HSW_PCODE_DE_WRITE_FREQ_REQ,
|
||||
cdclk_state->voltage_level, 2000);
|
||||
mutex_unlock(&dev_priv->pcu_lock);
|
||||
|
||||
if (ret) {
|
||||
|
|
|
@ -5661,8 +5661,8 @@ static u64 get_crtc_power_domains(struct drm_crtc *crtc,
|
|||
if (!crtc_state->base.active)
|
||||
return 0;
|
||||
|
||||
mask = BIT(POWER_DOMAIN_PIPE(pipe));
|
||||
mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder));
|
||||
mask = BIT_ULL(POWER_DOMAIN_PIPE(pipe));
|
||||
mask |= BIT_ULL(POWER_DOMAIN_TRANSCODER(transcoder));
|
||||
if (crtc_state->pch_pfit.enabled ||
|
||||
crtc_state->pch_pfit.force_thru)
|
||||
mask |= BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
|
||||
|
@ -5674,7 +5674,7 @@ static u64 get_crtc_power_domains(struct drm_crtc *crtc,
|
|||
}
|
||||
|
||||
if (HAS_DDI(dev_priv) && crtc_state->has_audio)
|
||||
mask |= BIT(POWER_DOMAIN_AUDIO);
|
||||
mask |= BIT_ULL(POWER_DOMAIN_AUDIO);
|
||||
|
||||
if (crtc_state->shared_dpll)
|
||||
mask |= BIT_ULL(POWER_DOMAIN_PLLS);
|
||||
|
|
|
@ -328,14 +328,22 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
|
|||
return;
|
||||
|
||||
failure_handling:
|
||||
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Link Training failed at link rate = %d, lane count = %d",
|
||||
intel_connector->base.base.id,
|
||||
intel_connector->base.name,
|
||||
intel_dp->link_rate, intel_dp->lane_count);
|
||||
if (!intel_dp_get_link_train_fallback_values(intel_dp,
|
||||
intel_dp->link_rate,
|
||||
intel_dp->lane_count))
|
||||
/* Schedule a Hotplug Uevent to userspace to start modeset */
|
||||
schedule_work(&intel_connector->modeset_retry_work);
|
||||
/* Dont fallback and prune modes if its eDP */
|
||||
if (!intel_dp_is_edp(intel_dp)) {
|
||||
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Link Training failed at link rate = %d, lane count = %d",
|
||||
intel_connector->base.base.id,
|
||||
intel_connector->base.name,
|
||||
intel_dp->link_rate, intel_dp->lane_count);
|
||||
if (!intel_dp_get_link_train_fallback_values(intel_dp,
|
||||
intel_dp->link_rate,
|
||||
intel_dp->lane_count))
|
||||
/* Schedule a Hotplug Uevent to userspace to start modeset */
|
||||
schedule_work(&intel_connector->modeset_retry_work);
|
||||
} else {
|
||||
DRM_ERROR("[CONNECTOR:%d:%s] Link Training failed at link rate = %d, lane count = %d",
|
||||
intel_connector->base.base.id,
|
||||
intel_connector->base.name,
|
||||
intel_dp->link_rate, intel_dp->lane_count);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -1951,8 +1951,22 @@ int intel_enable_engine_stats(struct intel_engine_cs *engine)
|
|||
spin_lock_irqsave(&engine->stats.lock, flags);
|
||||
if (engine->stats.enabled == ~0)
|
||||
goto busy;
|
||||
if (engine->stats.enabled++ == 0)
|
||||
if (engine->stats.enabled++ == 0) {
|
||||
struct intel_engine_execlists *execlists = &engine->execlists;
|
||||
const struct execlist_port *port = execlists->port;
|
||||
unsigned int num_ports = execlists_num_ports(execlists);
|
||||
|
||||
engine->stats.enabled_at = ktime_get();
|
||||
|
||||
/* XXX submission method oblivious? */
|
||||
while (num_ports-- && port_isset(port)) {
|
||||
engine->stats.active++;
|
||||
port++;
|
||||
}
|
||||
|
||||
if (engine->stats.active)
|
||||
engine->stats.start = engine->stats.enabled_at;
|
||||
}
|
||||
spin_unlock_irqrestore(&engine->stats.lock, flags);
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -39,9 +39,6 @@
|
|||
#define KBL_FW_MAJOR 9
|
||||
#define KBL_FW_MINOR 39
|
||||
|
||||
#define GLK_FW_MAJOR 10
|
||||
#define GLK_FW_MINOR 56
|
||||
|
||||
#define GUC_FW_PATH(platform, major, minor) \
|
||||
"i915/" __stringify(platform) "_guc_ver" __stringify(major) "_" __stringify(minor) ".bin"
|
||||
|
||||
|
@ -54,8 +51,6 @@ MODULE_FIRMWARE(I915_BXT_GUC_UCODE);
|
|||
#define I915_KBL_GUC_UCODE GUC_FW_PATH(kbl, KBL_FW_MAJOR, KBL_FW_MINOR)
|
||||
MODULE_FIRMWARE(I915_KBL_GUC_UCODE);
|
||||
|
||||
#define I915_GLK_GUC_UCODE GUC_FW_PATH(glk, GLK_FW_MAJOR, GLK_FW_MINOR)
|
||||
|
||||
static void guc_fw_select(struct intel_uc_fw *guc_fw)
|
||||
{
|
||||
struct intel_guc *guc = container_of(guc_fw, struct intel_guc, fw);
|
||||
|
@ -82,10 +77,6 @@ static void guc_fw_select(struct intel_uc_fw *guc_fw)
|
|||
guc_fw->path = I915_KBL_GUC_UCODE;
|
||||
guc_fw->major_ver_wanted = KBL_FW_MAJOR;
|
||||
guc_fw->minor_ver_wanted = KBL_FW_MINOR;
|
||||
} else if (IS_GEMINILAKE(dev_priv)) {
|
||||
guc_fw->path = I915_GLK_GUC_UCODE;
|
||||
guc_fw->major_ver_wanted = GLK_FW_MAJOR;
|
||||
guc_fw->minor_ver_wanted = GLK_FW_MINOR;
|
||||
} else {
|
||||
DRM_WARN("%s: No firmware known for this platform!\n",
|
||||
intel_uc_fw_type_repr(guc_fw->type));
|
||||
|
|
|
@ -411,7 +411,6 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
|
|||
struct intel_engine_cs *engine;
|
||||
enum intel_engine_id id;
|
||||
unsigned int hung = 0, stuck = 0;
|
||||
int busy_count = 0;
|
||||
|
||||
if (!i915_modparams.enable_hangcheck)
|
||||
return;
|
||||
|
@ -429,7 +428,6 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
|
|||
intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
|
||||
|
||||
for_each_engine(engine, dev_priv, id) {
|
||||
const bool busy = intel_engine_has_waiter(engine);
|
||||
struct intel_engine_hangcheck hc;
|
||||
|
||||
semaphore_clear_deadlocks(dev_priv);
|
||||
|
@ -443,16 +441,13 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
|
|||
if (hc.action != ENGINE_DEAD)
|
||||
stuck |= intel_engine_flag(engine);
|
||||
}
|
||||
|
||||
busy_count += busy;
|
||||
}
|
||||
|
||||
if (hung)
|
||||
hangcheck_declare_hang(dev_priv, hung, stuck);
|
||||
|
||||
/* Reset timer in case GPU hangs without another request being added */
|
||||
if (busy_count)
|
||||
i915_queue_hangcheck(dev_priv);
|
||||
i915_queue_hangcheck(dev_priv);
|
||||
}
|
||||
|
||||
void intel_engine_init_hangcheck(struct intel_engine_cs *engine)
|
||||
|
|
|
@ -1595,12 +1595,20 @@ intel_hdmi_set_edid(struct drm_connector *connector)
|
|||
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
|
||||
struct edid *edid;
|
||||
bool connected = false;
|
||||
struct i2c_adapter *i2c;
|
||||
|
||||
intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
|
||||
|
||||
edid = drm_get_edid(connector,
|
||||
intel_gmbus_get_adapter(dev_priv,
|
||||
intel_hdmi->ddc_bus));
|
||||
i2c = intel_gmbus_get_adapter(dev_priv, intel_hdmi->ddc_bus);
|
||||
|
||||
edid = drm_get_edid(connector, i2c);
|
||||
|
||||
if (!edid && !intel_gmbus_is_forced_bit(i2c)) {
|
||||
DRM_DEBUG_KMS("HDMI GMBUS EDID read failed, retry using GPIO bit-banging\n");
|
||||
intel_gmbus_force_bit(i2c, true);
|
||||
edid = drm_get_edid(connector, i2c);
|
||||
intel_gmbus_force_bit(i2c, false);
|
||||
}
|
||||
|
||||
intel_hdmi_dp_dual_mode_detect(connector, edid != NULL);
|
||||
|
||||
|
|
|
@ -54,10 +54,6 @@
|
|||
#define KBL_HUC_FW_MINOR 00
|
||||
#define KBL_BLD_NUM 1810
|
||||
|
||||
#define GLK_HUC_FW_MAJOR 02
|
||||
#define GLK_HUC_FW_MINOR 00
|
||||
#define GLK_BLD_NUM 1748
|
||||
|
||||
#define HUC_FW_PATH(platform, major, minor, bld_num) \
|
||||
"i915/" __stringify(platform) "_huc_ver" __stringify(major) "_" \
|
||||
__stringify(minor) "_" __stringify(bld_num) ".bin"
|
||||
|
@ -74,9 +70,6 @@ MODULE_FIRMWARE(I915_BXT_HUC_UCODE);
|
|||
KBL_HUC_FW_MINOR, KBL_BLD_NUM)
|
||||
MODULE_FIRMWARE(I915_KBL_HUC_UCODE);
|
||||
|
||||
#define I915_GLK_HUC_UCODE HUC_FW_PATH(glk, GLK_HUC_FW_MAJOR, \
|
||||
GLK_HUC_FW_MINOR, GLK_BLD_NUM)
|
||||
|
||||
static void huc_fw_select(struct intel_uc_fw *huc_fw)
|
||||
{
|
||||
struct intel_huc *huc = container_of(huc_fw, struct intel_huc, fw);
|
||||
|
@ -103,10 +96,6 @@ static void huc_fw_select(struct intel_uc_fw *huc_fw)
|
|||
huc_fw->path = I915_KBL_HUC_UCODE;
|
||||
huc_fw->major_ver_wanted = KBL_HUC_FW_MAJOR;
|
||||
huc_fw->minor_ver_wanted = KBL_HUC_FW_MINOR;
|
||||
} else if (IS_GEMINILAKE(dev_priv)) {
|
||||
huc_fw->path = I915_GLK_HUC_UCODE;
|
||||
huc_fw->major_ver_wanted = GLK_HUC_FW_MAJOR;
|
||||
huc_fw->minor_ver_wanted = GLK_HUC_FW_MINOR;
|
||||
} else {
|
||||
DRM_WARN("%s: No firmware known for this platform!\n",
|
||||
intel_uc_fw_type_repr(huc_fw->type));
|
||||
|
|
|
@ -9149,8 +9149,8 @@ int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val
|
|||
return 0;
|
||||
}
|
||||
|
||||
int sandybridge_pcode_write(struct drm_i915_private *dev_priv,
|
||||
u32 mbox, u32 val)
|
||||
int sandybridge_pcode_write_timeout(struct drm_i915_private *dev_priv,
|
||||
u32 mbox, u32 val, int timeout_us)
|
||||
{
|
||||
int status;
|
||||
|
||||
|
@ -9173,7 +9173,7 @@ int sandybridge_pcode_write(struct drm_i915_private *dev_priv,
|
|||
|
||||
if (__intel_wait_for_register_fw(dev_priv,
|
||||
GEN6_PCODE_MAILBOX, GEN6_PCODE_READY, 0,
|
||||
500, 0, NULL)) {
|
||||
timeout_us, 0, NULL)) {
|
||||
DRM_ERROR("timeout waiting for pcode write of 0x%08x to mbox %x to finish for %ps\n",
|
||||
val, mbox, __builtin_return_address(0));
|
||||
return -ETIMEDOUT;
|
||||
|
|
|
@ -209,8 +209,6 @@ void intel_uc_fini_wq(struct drm_i915_private *dev_priv)
|
|||
if (!USES_GUC(dev_priv))
|
||||
return;
|
||||
|
||||
GEM_BUG_ON(!HAS_GUC(dev_priv));
|
||||
|
||||
intel_guc_fini_wq(&dev_priv->guc);
|
||||
}
|
||||
|
||||
|
|
|
@ -412,6 +412,8 @@ struct child_device_config {
|
|||
u16 dp_gpio_pin_num; /* 195 */
|
||||
u8 dp_iboost_level:4; /* 196 */
|
||||
u8 hdmi_iboost_level:4; /* 196 */
|
||||
u8 dp_max_link_rate:2; /* 216 CNL+ */
|
||||
u8 dp_max_link_rate_reserved:6; /* 216 */
|
||||
} __packed;
|
||||
|
||||
struct bdb_general_definitions {
|
||||
|
|
|
@ -75,6 +75,7 @@ int mcp89_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
|
|||
int gf100_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
|
||||
int gf108_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
|
||||
int gk104_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
|
||||
int gk110_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
|
||||
int gk20a_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
|
||||
int gm107_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
|
||||
int gm200_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
|
||||
|
|
|
@ -60,6 +60,7 @@ int nvkm_secboot_reset(struct nvkm_secboot *, unsigned long);
|
|||
int gm200_secboot_new(struct nvkm_device *, int, struct nvkm_secboot **);
|
||||
int gm20b_secboot_new(struct nvkm_device *, int, struct nvkm_secboot **);
|
||||
int gp102_secboot_new(struct nvkm_device *, int, struct nvkm_secboot **);
|
||||
int gp108_secboot_new(struct nvkm_device *, int, struct nvkm_secboot **);
|
||||
int gp10b_secboot_new(struct nvkm_device *, int, struct nvkm_secboot **);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -46,6 +46,16 @@ enum nvkm_therm_attr_type {
|
|||
NVKM_THERM_ATTR_THRS_SHUTDOWN_HYST = 17,
|
||||
};
|
||||
|
||||
struct nvkm_therm_clkgate_init {
|
||||
u32 addr;
|
||||
u8 count;
|
||||
u32 data;
|
||||
};
|
||||
|
||||
struct nvkm_therm_clkgate_pack {
|
||||
const struct nvkm_therm_clkgate_init *init;
|
||||
};
|
||||
|
||||
struct nvkm_therm {
|
||||
const struct nvkm_therm_func *func;
|
||||
struct nvkm_subdev subdev;
|
||||
|
@ -85,17 +95,24 @@ struct nvkm_therm {
|
|||
|
||||
int (*attr_get)(struct nvkm_therm *, enum nvkm_therm_attr_type);
|
||||
int (*attr_set)(struct nvkm_therm *, enum nvkm_therm_attr_type, int);
|
||||
|
||||
bool clkgating_enabled;
|
||||
};
|
||||
|
||||
int nvkm_therm_temp_get(struct nvkm_therm *);
|
||||
int nvkm_therm_fan_sense(struct nvkm_therm *);
|
||||
int nvkm_therm_cstate(struct nvkm_therm *, int, int);
|
||||
void nvkm_therm_clkgate_init(struct nvkm_therm *,
|
||||
const struct nvkm_therm_clkgate_pack *);
|
||||
void nvkm_therm_clkgate_enable(struct nvkm_therm *);
|
||||
void nvkm_therm_clkgate_fini(struct nvkm_therm *, bool);
|
||||
|
||||
int nv40_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
|
||||
int nv50_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
|
||||
int g84_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
|
||||
int gt215_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
|
||||
int gf119_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
|
||||
int gk104_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
|
||||
int gm107_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
|
||||
int gm200_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
|
||||
int gp100_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
|
||||
|
|
|
@ -105,4 +105,32 @@ nvbo_kmap_obj_iovirtual(struct nouveau_bo *nvbo)
|
|||
return ioptr;
|
||||
}
|
||||
|
||||
static inline void
|
||||
nouveau_bo_unmap_unpin_unref(struct nouveau_bo **pnvbo)
|
||||
{
|
||||
if (*pnvbo) {
|
||||
nouveau_bo_unmap(*pnvbo);
|
||||
nouveau_bo_unpin(*pnvbo);
|
||||
nouveau_bo_ref(NULL, pnvbo);
|
||||
}
|
||||
}
|
||||
|
||||
static inline int
|
||||
nouveau_bo_new_pin_map(struct nouveau_cli *cli, u64 size, int align, u32 flags,
|
||||
struct nouveau_bo **pnvbo)
|
||||
{
|
||||
int ret = nouveau_bo_new(cli, size, align, flags,
|
||||
0, 0, NULL, NULL, pnvbo);
|
||||
if (ret == 0) {
|
||||
ret = nouveau_bo_pin(*pnvbo, flags, true);
|
||||
if (ret == 0) {
|
||||
ret = nouveau_bo_map(*pnvbo);
|
||||
if (ret == 0)
|
||||
return ret;
|
||||
nouveau_bo_unpin(*pnvbo);
|
||||
}
|
||||
nouveau_bo_ref(NULL, pnvbo);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -60,7 +60,6 @@ struct nouveau_crtc {
|
|||
} cursor;
|
||||
|
||||
struct {
|
||||
struct nouveau_bo *nvbo;
|
||||
int depth;
|
||||
} lut;
|
||||
|
||||
|
|
|
@ -56,6 +56,10 @@ MODULE_PARM_DESC(nofbaccel, "Disable fbcon acceleration");
|
|||
int nouveau_nofbaccel = 0;
|
||||
module_param_named(nofbaccel, nouveau_nofbaccel, int, 0400);
|
||||
|
||||
MODULE_PARM_DESC(fbcon_bpp, "fbcon bits-per-pixel (default: auto)");
|
||||
static int nouveau_fbcon_bpp;
|
||||
module_param_named(fbcon_bpp, nouveau_fbcon_bpp, int, 0400);
|
||||
|
||||
static void
|
||||
nouveau_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
|
||||
{
|
||||
|
@ -488,7 +492,7 @@ nouveau_fbcon_init(struct drm_device *dev)
|
|||
{
|
||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
struct nouveau_fbdev *fbcon;
|
||||
int preferred_bpp;
|
||||
int preferred_bpp = nouveau_fbcon_bpp;
|
||||
int ret;
|
||||
|
||||
if (!dev->mode_config.num_crtc ||
|
||||
|
@ -512,13 +516,15 @@ nouveau_fbcon_init(struct drm_device *dev)
|
|||
if (ret)
|
||||
goto fini;
|
||||
|
||||
if (drm->client.device.info.ram_size <= 32 * 1024 * 1024)
|
||||
preferred_bpp = 8;
|
||||
else
|
||||
if (drm->client.device.info.ram_size <= 64 * 1024 * 1024)
|
||||
preferred_bpp = 16;
|
||||
else
|
||||
preferred_bpp = 32;
|
||||
if (preferred_bpp != 8 && preferred_bpp != 16 && preferred_bpp != 32) {
|
||||
if (drm->client.device.info.ram_size <= 32 * 1024 * 1024)
|
||||
preferred_bpp = 8;
|
||||
else
|
||||
if (drm->client.device.info.ram_size <= 64 * 1024 * 1024)
|
||||
preferred_bpp = 16;
|
||||
else
|
||||
preferred_bpp = 32;
|
||||
}
|
||||
|
||||
/* disable all the possible outputs/crtcs before entering KMS mode */
|
||||
if (!drm_drv_uses_atomic_modeset(dev))
|
||||
|
|
|
@ -137,8 +137,10 @@ struct nv50_head_atom {
|
|||
} mode;
|
||||
|
||||
struct {
|
||||
bool visible;
|
||||
u32 handle;
|
||||
u64 offset:40;
|
||||
u8 mode:4;
|
||||
} lut;
|
||||
|
||||
struct {
|
||||
|
@ -192,6 +194,7 @@ struct nv50_head_atom {
|
|||
|
||||
union {
|
||||
struct {
|
||||
bool ilut:1;
|
||||
bool core:1;
|
||||
bool curs:1;
|
||||
};
|
||||
|
@ -200,6 +203,7 @@ struct nv50_head_atom {
|
|||
|
||||
union {
|
||||
struct {
|
||||
bool ilut:1;
|
||||
bool core:1;
|
||||
bool curs:1;
|
||||
bool view:1;
|
||||
|
@ -660,6 +664,10 @@ nv50_ovly_create(struct nvif_device *device, struct nvif_object *disp,
|
|||
|
||||
struct nv50_head {
|
||||
struct nouveau_crtc base;
|
||||
struct {
|
||||
struct nouveau_bo *nvbo[2];
|
||||
int next;
|
||||
} lut;
|
||||
struct nv50_ovly ovly;
|
||||
struct nv50_oimm oimm;
|
||||
};
|
||||
|
@ -1794,6 +1802,54 @@ nv50_head_lut_clr(struct nv50_head *head)
|
|||
}
|
||||
}
|
||||
|
||||
static void
|
||||
nv50_head_lut_load(struct drm_property_blob *blob, int mode,
|
||||
struct nouveau_bo *nvbo)
|
||||
{
|
||||
struct drm_color_lut *in = (struct drm_color_lut *)blob->data;
|
||||
void __iomem *lut = (u8 *)nvbo_kmap_obj_iovirtual(nvbo);
|
||||
const int size = blob->length / sizeof(*in);
|
||||
int bits, shift, i;
|
||||
u16 zero, r, g, b;
|
||||
|
||||
/* This can't happen.. But it shuts the compiler up. */
|
||||
if (WARN_ON(size != 256))
|
||||
return;
|
||||
|
||||
switch (mode) {
|
||||
case 0: /* LORES. */
|
||||
case 1: /* HIRES. */
|
||||
bits = 11;
|
||||
shift = 3;
|
||||
zero = 0x0000;
|
||||
break;
|
||||
case 7: /* INTERPOLATE_257_UNITY_RANGE. */
|
||||
bits = 14;
|
||||
shift = 0;
|
||||
zero = 0x6000;
|
||||
break;
|
||||
default:
|
||||
WARN_ON(1);
|
||||
return;
|
||||
}
|
||||
|
||||
for (i = 0; i < size; i++) {
|
||||
r = (drm_color_lut_extract(in[i]. red, bits) + zero) << shift;
|
||||
g = (drm_color_lut_extract(in[i].green, bits) + zero) << shift;
|
||||
b = (drm_color_lut_extract(in[i]. blue, bits) + zero) << shift;
|
||||
writew(r, lut + (i * 0x08) + 0);
|
||||
writew(g, lut + (i * 0x08) + 2);
|
||||
writew(b, lut + (i * 0x08) + 4);
|
||||
}
|
||||
|
||||
/* INTERPOLATE modes require a "next" entry to interpolate with,
|
||||
* so we replicate the last entry to deal with this for now.
|
||||
*/
|
||||
writew(r, lut + (i * 0x08) + 0);
|
||||
writew(g, lut + (i * 0x08) + 2);
|
||||
writew(b, lut + (i * 0x08) + 4);
|
||||
}
|
||||
|
||||
static void
|
||||
nv50_head_lut_set(struct nv50_head *head, struct nv50_head_atom *asyh)
|
||||
{
|
||||
|
@ -1802,18 +1858,18 @@ nv50_head_lut_set(struct nv50_head *head, struct nv50_head_atom *asyh)
|
|||
if ((push = evo_wait(core, 7))) {
|
||||
if (core->base.user.oclass < G82_DISP_CORE_CHANNEL_DMA) {
|
||||
evo_mthd(push, 0x0840 + (head->base.index * 0x400), 2);
|
||||
evo_data(push, 0xc0000000);
|
||||
evo_data(push, 0x80000000 | asyh->lut.mode << 30);
|
||||
evo_data(push, asyh->lut.offset >> 8);
|
||||
} else
|
||||
if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
|
||||
evo_mthd(push, 0x0840 + (head->base.index * 0x400), 2);
|
||||
evo_data(push, 0xc0000000);
|
||||
evo_data(push, 0x80000000 | asyh->lut.mode << 30);
|
||||
evo_data(push, asyh->lut.offset >> 8);
|
||||
evo_mthd(push, 0x085c + (head->base.index * 0x400), 1);
|
||||
evo_data(push, asyh->lut.handle);
|
||||
} else {
|
||||
evo_mthd(push, 0x0440 + (head->base.index * 0x300), 4);
|
||||
evo_data(push, 0x83000000);
|
||||
evo_data(push, 0x80000000 | asyh->lut.mode << 24);
|
||||
evo_data(push, asyh->lut.offset >> 8);
|
||||
evo_data(push, 0x00000000);
|
||||
evo_data(push, 0x00000000);
|
||||
|
@ -1896,7 +1952,7 @@ nv50_head_view(struct nv50_head *head, struct nv50_head_atom *asyh)
|
|||
static void
|
||||
nv50_head_flush_clr(struct nv50_head *head, struct nv50_head_atom *asyh, bool y)
|
||||
{
|
||||
if (asyh->clr.core && (!asyh->set.core || y))
|
||||
if (asyh->clr.ilut && (!asyh->set.ilut || y))
|
||||
nv50_head_lut_clr(head);
|
||||
if (asyh->clr.core && (!asyh->set.core || y))
|
||||
nv50_head_core_clr(head);
|
||||
|
@ -1909,7 +1965,15 @@ nv50_head_flush_set(struct nv50_head *head, struct nv50_head_atom *asyh)
|
|||
{
|
||||
if (asyh->set.view ) nv50_head_view (head, asyh);
|
||||
if (asyh->set.mode ) nv50_head_mode (head, asyh);
|
||||
if (asyh->set.core ) nv50_head_lut_set (head, asyh);
|
||||
if (asyh->set.ilut ) {
|
||||
struct nouveau_bo *nvbo = head->lut.nvbo[head->lut.next];
|
||||
struct drm_property_blob *blob = asyh->state.gamma_lut;
|
||||
if (blob)
|
||||
nv50_head_lut_load(blob, asyh->lut.mode, nvbo);
|
||||
asyh->lut.offset = nvbo->bo.offset;
|
||||
head->lut.next ^= 1;
|
||||
nv50_head_lut_set(head, asyh);
|
||||
}
|
||||
if (asyh->set.core ) nv50_head_core_set(head, asyh);
|
||||
if (asyh->set.curs ) nv50_head_curs_set(head, asyh);
|
||||
if (asyh->set.base ) nv50_head_base (head, asyh);
|
||||
|
@ -2043,6 +2107,37 @@ nv50_head_atomic_check_view(struct nv50_head_atom *armh,
|
|||
asyh->set.view = true;
|
||||
}
|
||||
|
||||
static void
|
||||
nv50_head_atomic_check_lut(struct nv50_head *head,
|
||||
struct nv50_head_atom *armh,
|
||||
struct nv50_head_atom *asyh)
|
||||
{
|
||||
struct nv50_disp *disp = nv50_disp(head->base.base.dev);
|
||||
|
||||
/* An I8 surface without an input LUT makes no sense, and
|
||||
* EVO will throw an error if you try.
|
||||
*
|
||||
* Legacy clients actually cause this due to the order in
|
||||
* which they call ioctls, so we will enable the LUT with
|
||||
* whatever contents the buffer already contains to avoid
|
||||
* triggering the error check.
|
||||
*/
|
||||
if (!asyh->state.gamma_lut && asyh->base.cpp != 1) {
|
||||
asyh->lut.handle = 0;
|
||||
asyh->clr.ilut = armh->lut.visible;
|
||||
return;
|
||||
}
|
||||
|
||||
if (disp->disp->oclass < GF110_DISP) {
|
||||
asyh->lut.mode = (asyh->base.cpp == 1) ? 0 : 1;
|
||||
asyh->set.ilut = true;
|
||||
} else {
|
||||
asyh->lut.mode = 7;
|
||||
asyh->set.ilut = asyh->state.color_mgmt_changed;
|
||||
}
|
||||
asyh->lut.handle = disp->mast.base.vram.handle;
|
||||
}
|
||||
|
||||
static void
|
||||
nv50_head_atomic_check_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
|
||||
{
|
||||
|
@ -2128,6 +2223,11 @@ nv50_head_atomic_check(struct drm_crtc *crtc, struct drm_crtc_state *state)
|
|||
if (asyh->state.mode_changed)
|
||||
nv50_head_atomic_check_mode(head, asyh);
|
||||
|
||||
if (asyh->state.color_mgmt_changed ||
|
||||
asyh->base.cpp != armh->base.cpp)
|
||||
nv50_head_atomic_check_lut(head, armh, asyh);
|
||||
asyh->lut.visible = asyh->lut.handle != 0;
|
||||
|
||||
if (asyc) {
|
||||
if (asyc->set.scaler)
|
||||
nv50_head_atomic_check_view(armh, asyh, asyc);
|
||||
|
@ -2143,7 +2243,8 @@ nv50_head_atomic_check(struct drm_crtc *crtc, struct drm_crtc_state *state)
|
|||
asyh->core.w = asyh->base.w;
|
||||
asyh->core.h = asyh->base.h;
|
||||
} else
|
||||
if ((asyh->core.visible = asyh->curs.visible)) {
|
||||
if ((asyh->core.visible = asyh->curs.visible) ||
|
||||
(asyh->core.visible = asyh->lut.visible)) {
|
||||
/*XXX: We need to either find some way of having the
|
||||
* primary base layer appear black, while still
|
||||
* being able to display the other layers, or we
|
||||
|
@ -2161,11 +2262,10 @@ nv50_head_atomic_check(struct drm_crtc *crtc, struct drm_crtc_state *state)
|
|||
asyh->core.layout = 1;
|
||||
asyh->core.block = 0;
|
||||
asyh->core.pitch = ALIGN(asyh->core.w, 64) * 4;
|
||||
asyh->lut.handle = disp->mast.base.vram.handle;
|
||||
asyh->lut.offset = head->base.lut.nvbo->bo.offset;
|
||||
asyh->set.base = armh->base.cpp != asyh->base.cpp;
|
||||
asyh->set.ovly = armh->ovly.cpp != asyh->ovly.cpp;
|
||||
} else {
|
||||
asyh->lut.visible = false;
|
||||
asyh->core.visible = false;
|
||||
asyh->curs.visible = false;
|
||||
asyh->base.cpp = 0;
|
||||
|
@ -2189,8 +2289,10 @@ nv50_head_atomic_check(struct drm_crtc *crtc, struct drm_crtc_state *state)
|
|||
asyh->clr.curs = true;
|
||||
}
|
||||
} else {
|
||||
asyh->clr.ilut = armh->lut.visible;
|
||||
asyh->clr.core = armh->core.visible;
|
||||
asyh->clr.curs = armh->curs.visible;
|
||||
asyh->set.ilut = asyh->lut.visible;
|
||||
asyh->set.core = asyh->core.visible;
|
||||
asyh->set.curs = asyh->curs.visible;
|
||||
}
|
||||
|
@ -2200,47 +2302,11 @@ nv50_head_atomic_check(struct drm_crtc *crtc, struct drm_crtc_state *state)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
nv50_head_lut_load(struct drm_crtc *crtc)
|
||||
{
|
||||
struct nv50_disp *disp = nv50_disp(crtc->dev);
|
||||
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
|
||||
void __iomem *lut = nvbo_kmap_obj_iovirtual(nv_crtc->lut.nvbo);
|
||||
u16 *r, *g, *b;
|
||||
int i;
|
||||
|
||||
r = crtc->gamma_store;
|
||||
g = r + crtc->gamma_size;
|
||||
b = g + crtc->gamma_size;
|
||||
|
||||
for (i = 0; i < 256; i++) {
|
||||
if (disp->disp->oclass < GF110_DISP) {
|
||||
writew((*r++ >> 2) + 0x0000, lut + (i * 0x08) + 0);
|
||||
writew((*g++ >> 2) + 0x0000, lut + (i * 0x08) + 2);
|
||||
writew((*b++ >> 2) + 0x0000, lut + (i * 0x08) + 4);
|
||||
} else {
|
||||
/* 0x6000 interferes with the 14-bit color??? */
|
||||
writew((*r++ >> 2) + 0x6000, lut + (i * 0x20) + 0);
|
||||
writew((*g++ >> 2) + 0x6000, lut + (i * 0x20) + 2);
|
||||
writew((*b++ >> 2) + 0x6000, lut + (i * 0x20) + 4);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static const struct drm_crtc_helper_funcs
|
||||
nv50_head_help = {
|
||||
.atomic_check = nv50_head_atomic_check,
|
||||
};
|
||||
|
||||
static int
|
||||
nv50_head_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
|
||||
uint32_t size,
|
||||
struct drm_modeset_acquire_ctx *ctx)
|
||||
{
|
||||
nv50_head_lut_load(crtc);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
nv50_head_atomic_destroy_state(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *state)
|
||||
|
@ -2296,17 +2362,15 @@ nv50_head_reset(struct drm_crtc *crtc)
|
|||
static void
|
||||
nv50_head_destroy(struct drm_crtc *crtc)
|
||||
{
|
||||
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
|
||||
struct nv50_disp *disp = nv50_disp(crtc->dev);
|
||||
struct nv50_head *head = nv50_head(crtc);
|
||||
int i;
|
||||
|
||||
nv50_dmac_destroy(&head->ovly.base, disp->disp);
|
||||
nv50_pioc_destroy(&head->oimm.base);
|
||||
|
||||
nouveau_bo_unmap(nv_crtc->lut.nvbo);
|
||||
if (nv_crtc->lut.nvbo)
|
||||
nouveau_bo_unpin(nv_crtc->lut.nvbo);
|
||||
nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo);
|
||||
for (i = 0; i < ARRAY_SIZE(head->lut.nvbo); i++)
|
||||
nouveau_bo_unmap_unpin_unref(&head->lut.nvbo[i]);
|
||||
|
||||
drm_crtc_cleanup(crtc);
|
||||
kfree(crtc);
|
||||
|
@ -2315,7 +2379,7 @@ nv50_head_destroy(struct drm_crtc *crtc)
|
|||
static const struct drm_crtc_funcs
|
||||
nv50_head_func = {
|
||||
.reset = nv50_head_reset,
|
||||
.gamma_set = nv50_head_gamma_set,
|
||||
.gamma_set = drm_atomic_helper_legacy_gamma_set,
|
||||
.destroy = nv50_head_destroy,
|
||||
.set_config = drm_atomic_helper_set_config,
|
||||
.page_flip = drm_atomic_helper_page_flip,
|
||||
|
@ -2333,7 +2397,7 @@ nv50_head_create(struct drm_device *dev, int index)
|
|||
struct nv50_base *base;
|
||||
struct nv50_curs *curs;
|
||||
struct drm_crtc *crtc;
|
||||
int ret;
|
||||
int ret, i;
|
||||
|
||||
head = kzalloc(sizeof(*head), GFP_KERNEL);
|
||||
if (!head)
|
||||
|
@ -2355,22 +2419,14 @@ nv50_head_create(struct drm_device *dev, int index)
|
|||
drm_crtc_helper_add(crtc, &nv50_head_help);
|
||||
drm_mode_crtc_set_gamma_size(crtc, 256);
|
||||
|
||||
ret = nouveau_bo_new(&drm->client, 8192, 0x100, TTM_PL_FLAG_VRAM,
|
||||
0, 0x0000, NULL, NULL, &head->base.lut.nvbo);
|
||||
if (!ret) {
|
||||
ret = nouveau_bo_pin(head->base.lut.nvbo, TTM_PL_FLAG_VRAM, true);
|
||||
if (!ret) {
|
||||
ret = nouveau_bo_map(head->base.lut.nvbo);
|
||||
if (ret)
|
||||
nouveau_bo_unpin(head->base.lut.nvbo);
|
||||
}
|
||||
for (i = 0; i < ARRAY_SIZE(head->lut.nvbo); i++) {
|
||||
ret = nouveau_bo_new_pin_map(&drm->client, 1025 * 8, 0x100,
|
||||
TTM_PL_FLAG_VRAM,
|
||||
&head->lut.nvbo[i]);
|
||||
if (ret)
|
||||
nouveau_bo_ref(NULL, &head->base.lut.nvbo);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
/* allocate overlay resources */
|
||||
ret = nv50_oimm_create(device, disp->disp, index, &head->oimm);
|
||||
if (ret)
|
||||
|
@ -4350,7 +4406,6 @@ nv50_display_init(struct drm_device *dev)
|
|||
{
|
||||
struct drm_encoder *encoder;
|
||||
struct drm_plane *plane;
|
||||
struct drm_crtc *crtc;
|
||||
u32 *push;
|
||||
|
||||
push = evo_wait(nv50_mast(dev), 32);
|
||||
|
@ -4369,10 +4424,6 @@ nv50_display_init(struct drm_device *dev)
|
|||
}
|
||||
}
|
||||
|
||||
drm_for_each_crtc(crtc, dev) {
|
||||
nv50_head_lut_load(crtc);
|
||||
}
|
||||
|
||||
drm_for_each_plane(plane, dev) {
|
||||
struct nv50_wndw *wndw = nv50_wndw(plane);
|
||||
if (plane->funcs != &nv50_wndw)
|
||||
|
|
|
@ -28,6 +28,7 @@
|
|||
#include <core/option.h>
|
||||
|
||||
#include <subdev/bios.h>
|
||||
#include <subdev/therm.h>
|
||||
|
||||
static DEFINE_MUTEX(nv_devices_mutex);
|
||||
static LIST_HEAD(nv_devices);
|
||||
|
@ -1682,7 +1683,7 @@ nve4_chipset = {
|
|||
.mxm = nv50_mxm_new,
|
||||
.pci = gk104_pci_new,
|
||||
.pmu = gk104_pmu_new,
|
||||
.therm = gf119_therm_new,
|
||||
.therm = gk104_therm_new,
|
||||
.timer = nv41_timer_new,
|
||||
.top = gk104_top_new,
|
||||
.volt = gk104_volt_new,
|
||||
|
@ -1721,7 +1722,7 @@ nve6_chipset = {
|
|||
.mxm = nv50_mxm_new,
|
||||
.pci = gk104_pci_new,
|
||||
.pmu = gk104_pmu_new,
|
||||
.therm = gf119_therm_new,
|
||||
.therm = gk104_therm_new,
|
||||
.timer = nv41_timer_new,
|
||||
.top = gk104_top_new,
|
||||
.volt = gk104_volt_new,
|
||||
|
@ -1760,7 +1761,7 @@ nve7_chipset = {
|
|||
.mxm = nv50_mxm_new,
|
||||
.pci = gk104_pci_new,
|
||||
.pmu = gk104_pmu_new,
|
||||
.therm = gf119_therm_new,
|
||||
.therm = gk104_therm_new,
|
||||
.timer = nv41_timer_new,
|
||||
.top = gk104_top_new,
|
||||
.volt = gk104_volt_new,
|
||||
|
@ -1811,7 +1812,7 @@ nvf0_chipset = {
|
|||
.bus = gf100_bus_new,
|
||||
.clk = gk104_clk_new,
|
||||
.devinit = gf100_devinit_new,
|
||||
.fb = gk104_fb_new,
|
||||
.fb = gk110_fb_new,
|
||||
.fuse = gf100_fuse_new,
|
||||
.gpio = gk104_gpio_new,
|
||||
.i2c = gk104_i2c_new,
|
||||
|
@ -1824,7 +1825,7 @@ nvf0_chipset = {
|
|||
.mxm = nv50_mxm_new,
|
||||
.pci = gk104_pci_new,
|
||||
.pmu = gk110_pmu_new,
|
||||
.therm = gf119_therm_new,
|
||||
.therm = gk104_therm_new,
|
||||
.timer = nv41_timer_new,
|
||||
.top = gk104_top_new,
|
||||
.volt = gk104_volt_new,
|
||||
|
@ -1849,7 +1850,7 @@ nvf1_chipset = {
|
|||
.bus = gf100_bus_new,
|
||||
.clk = gk104_clk_new,
|
||||
.devinit = gf100_devinit_new,
|
||||
.fb = gk104_fb_new,
|
||||
.fb = gk110_fb_new,
|
||||
.fuse = gf100_fuse_new,
|
||||
.gpio = gk104_gpio_new,
|
||||
.i2c = gk104_i2c_new,
|
||||
|
@ -1862,7 +1863,7 @@ nvf1_chipset = {
|
|||
.mxm = nv50_mxm_new,
|
||||
.pci = gk104_pci_new,
|
||||
.pmu = gk110_pmu_new,
|
||||
.therm = gf119_therm_new,
|
||||
.therm = gk104_therm_new,
|
||||
.timer = nv41_timer_new,
|
||||
.top = gk104_top_new,
|
||||
.volt = gk104_volt_new,
|
||||
|
@ -1887,7 +1888,7 @@ nv106_chipset = {
|
|||
.bus = gf100_bus_new,
|
||||
.clk = gk104_clk_new,
|
||||
.devinit = gf100_devinit_new,
|
||||
.fb = gk104_fb_new,
|
||||
.fb = gk110_fb_new,
|
||||
.fuse = gf100_fuse_new,
|
||||
.gpio = gk104_gpio_new,
|
||||
.i2c = gk104_i2c_new,
|
||||
|
@ -1900,7 +1901,7 @@ nv106_chipset = {
|
|||
.mxm = nv50_mxm_new,
|
||||
.pci = gk104_pci_new,
|
||||
.pmu = gk208_pmu_new,
|
||||
.therm = gf119_therm_new,
|
||||
.therm = gk104_therm_new,
|
||||
.timer = nv41_timer_new,
|
||||
.top = gk104_top_new,
|
||||
.volt = gk104_volt_new,
|
||||
|
@ -1925,7 +1926,7 @@ nv108_chipset = {
|
|||
.bus = gf100_bus_new,
|
||||
.clk = gk104_clk_new,
|
||||
.devinit = gf100_devinit_new,
|
||||
.fb = gk104_fb_new,
|
||||
.fb = gk110_fb_new,
|
||||
.fuse = gf100_fuse_new,
|
||||
.gpio = gk104_gpio_new,
|
||||
.i2c = gk104_i2c_new,
|
||||
|
@ -1938,7 +1939,7 @@ nv108_chipset = {
|
|||
.mxm = nv50_mxm_new,
|
||||
.pci = gk104_pci_new,
|
||||
.pmu = gk208_pmu_new,
|
||||
.therm = gf119_therm_new,
|
||||
.therm = gk104_therm_new,
|
||||
.timer = nv41_timer_new,
|
||||
.top = gk104_top_new,
|
||||
.volt = gk104_volt_new,
|
||||
|
@ -2345,6 +2346,7 @@ nv138_chipset = {
|
|||
.mc = gp100_mc_new,
|
||||
.mmu = gp100_mmu_new,
|
||||
.therm = gp100_therm_new,
|
||||
.secboot = gp108_secboot_new,
|
||||
.pci = gp100_pci_new,
|
||||
.pmu = gp102_pmu_new,
|
||||
.timer = gk20a_timer_new,
|
||||
|
@ -2356,6 +2358,10 @@ nv138_chipset = {
|
|||
.disp = gp102_disp_new,
|
||||
.dma = gf119_dma_new,
|
||||
.fifo = gp100_fifo_new,
|
||||
.gr = gp107_gr_new,
|
||||
.nvdec = gp102_nvdec_new,
|
||||
.sec2 = gp102_sec2_new,
|
||||
.sw = gf100_sw_new,
|
||||
};
|
||||
|
||||
static const struct nvkm_device_chip
|
||||
|
@ -2508,6 +2514,7 @@ nvkm_device_fini(struct nvkm_device *device, bool suspend)
|
|||
}
|
||||
}
|
||||
|
||||
nvkm_therm_clkgate_fini(device->therm, suspend);
|
||||
|
||||
if (device->func->fini)
|
||||
device->func->fini(device, suspend);
|
||||
|
@ -2597,6 +2604,7 @@ nvkm_device_init(struct nvkm_device *device)
|
|||
}
|
||||
|
||||
nvkm_acpi_init(device);
|
||||
nvkm_therm_clkgate_enable(device->therm);
|
||||
|
||||
time = ktime_to_us(ktime_get()) - time;
|
||||
nvdev_trace(device, "init completed in %lldus\n", time);
|
||||
|
|
|
@ -137,6 +137,7 @@ struct gf100_gr_func {
|
|||
int (*rops)(struct gf100_gr *);
|
||||
int ppc_nr;
|
||||
const struct gf100_grctx_func *grctx;
|
||||
const struct nvkm_therm_clkgate_pack *clkgate_pack;
|
||||
struct nvkm_sclass sclass[];
|
||||
};
|
||||
|
||||
|
|
|
@ -22,6 +22,7 @@
|
|||
* Authors: Ben Skeggs <bskeggs@redhat.com>
|
||||
*/
|
||||
#include "gf100.h"
|
||||
#include "gk104.h"
|
||||
#include "ctxgf100.h"
|
||||
|
||||
#include <nvif/class.h>
|
||||
|
@ -173,6 +174,208 @@ gk104_gr_pack_mmio[] = {
|
|||
{}
|
||||
};
|
||||
|
||||
const struct nvkm_therm_clkgate_init
|
||||
gk104_clkgate_blcg_init_main_0[] = {
|
||||
{ 0x4041f0, 1, 0x00004046 },
|
||||
{ 0x409890, 1, 0x00000045 },
|
||||
{ 0x4098b0, 1, 0x0000007f },
|
||||
{}
|
||||
};
|
||||
|
||||
const struct nvkm_therm_clkgate_init
|
||||
gk104_clkgate_blcg_init_rstr2d_0[] = {
|
||||
{ 0x4078c0, 1, 0x00000042 },
|
||||
{}
|
||||
};
|
||||
|
||||
const struct nvkm_therm_clkgate_init
|
||||
gk104_clkgate_blcg_init_unk_0[] = {
|
||||
{ 0x406000, 1, 0x00004044 },
|
||||
{ 0x405860, 1, 0x00004042 },
|
||||
{ 0x40590c, 1, 0x00004042 },
|
||||
{}
|
||||
};
|
||||
|
||||
const struct nvkm_therm_clkgate_init
|
||||
gk104_clkgate_blcg_init_gcc_0[] = {
|
||||
{ 0x408040, 1, 0x00004044 },
|
||||
{}
|
||||
};
|
||||
|
||||
const struct nvkm_therm_clkgate_init
|
||||
gk104_clkgate_blcg_init_sked_0[] = {
|
||||
{ 0x407000, 1, 0x00004044 },
|
||||
{}
|
||||
};
|
||||
|
||||
const struct nvkm_therm_clkgate_init
|
||||
gk104_clkgate_blcg_init_unk_1[] = {
|
||||
{ 0x405bf0, 1, 0x00004044 },
|
||||
{}
|
||||
};
|
||||
|
||||
const struct nvkm_therm_clkgate_init
|
||||
gk104_clkgate_blcg_init_gpc_ctxctl_0[] = {
|
||||
{ 0x41a890, 1, 0x00000042 },
|
||||
{ 0x41a8b0, 1, 0x0000007f },
|
||||
{}
|
||||
};
|
||||
|
||||
const struct nvkm_therm_clkgate_init
|
||||
gk104_clkgate_blcg_init_gpc_unk_0[] = {
|
||||
{ 0x418500, 1, 0x00004042 },
|
||||
{ 0x418608, 1, 0x00004042 },
|
||||
{ 0x418688, 1, 0x00004042 },
|
||||
{ 0x418718, 1, 0x00000042 },
|
||||
{}
|
||||
};
|
||||
|
||||
const struct nvkm_therm_clkgate_init
|
||||
gk104_clkgate_blcg_init_gpc_esetup_0[] = {
|
||||
{ 0x418828, 1, 0x00000044 },
|
||||
{}
|
||||
};
|
||||
|
||||
const struct nvkm_therm_clkgate_init
|
||||
gk104_clkgate_blcg_init_gpc_tpbus_0[] = {
|
||||
{ 0x418bbc, 1, 0x00004042 },
|
||||
{}
|
||||
};
|
||||
|
||||
const struct nvkm_therm_clkgate_init
|
||||
gk104_clkgate_blcg_init_gpc_zcull_0[] = {
|
||||
{ 0x418970, 1, 0x00004042 },
|
||||
{}
|
||||
};
|
||||
|
||||
const struct nvkm_therm_clkgate_init
|
||||
gk104_clkgate_blcg_init_gpc_tpconf_0[] = {
|
||||
{ 0x418c70, 1, 0x00004042 },
|
||||
{}
|
||||
};
|
||||
|
||||
const struct nvkm_therm_clkgate_init
|
||||
gk104_clkgate_blcg_init_gpc_unk_1[] = {
|
||||
{ 0x418cf0, 1, 0x00004042 },
|
||||
{ 0x418d70, 1, 0x00004042 },
|
||||
{ 0x418f0c, 1, 0x00004042 },
|
||||
{ 0x418e0c, 1, 0x00004042 },
|
||||
{}
|
||||
};
|
||||
|
||||
const struct nvkm_therm_clkgate_init
|
||||
gk104_clkgate_blcg_init_gpc_gcc_0[] = {
|
||||
{ 0x419020, 1, 0x00004042 },
|
||||
{ 0x419038, 1, 0x00000042 },
|
||||
{}
|
||||
};
|
||||
|
||||
const struct nvkm_therm_clkgate_init
|
||||
gk104_clkgate_blcg_init_gpc_ffb_0[] = {
|
||||
{ 0x418898, 1, 0x00000042 },
|
||||
{}
|
||||
};
|
||||
|
||||
const struct nvkm_therm_clkgate_init
|
||||
gk104_clkgate_blcg_init_gpc_tex_0[] = {
|
||||
{ 0x419a40, 9, 0x00004042 },
|
||||
{ 0x419acc, 1, 0x00004047 },
|
||||
{}
|
||||
};
|
||||
|
||||
const struct nvkm_therm_clkgate_init
|
||||
gk104_clkgate_blcg_init_gpc_poly_0[] = {
|
||||
{ 0x419868, 1, 0x00000042 },
|
||||
{}
|
||||
};
|
||||
|
||||
const struct nvkm_therm_clkgate_init
|
||||
gk104_clkgate_blcg_init_gpc_l1c_0[] = {
|
||||
{ 0x419ccc, 3, 0x00000042 },
|
||||
{}
|
||||
};
|
||||
|
||||
const struct nvkm_therm_clkgate_init
|
||||
gk104_clkgate_blcg_init_gpc_unk_2[] = {
|
||||
{ 0x419c70, 1, 0x00004045 },
|
||||
{}
|
||||
};
|
||||
|
||||
const struct nvkm_therm_clkgate_init
|
||||
gk104_clkgate_blcg_init_gpc_mp_0[] = {
|
||||
{ 0x419fd0, 1, 0x00004043 },
|
||||
{ 0x419fd8, 1, 0x00004049 },
|
||||
{ 0x419fe0, 2, 0x00004042 },
|
||||
{ 0x419ff0, 1, 0x00004046 },
|
||||
{ 0x419ff8, 1, 0x00004042 },
|
||||
{}
|
||||
};
|
||||
|
||||
const struct nvkm_therm_clkgate_init
|
||||
gk104_clkgate_blcg_init_gpc_ppc_0[] = {
|
||||
{ 0x41be28, 1, 0x00000042 },
|
||||
{ 0x41bfe8, 1, 0x00004042 },
|
||||
{ 0x41bed0, 1, 0x00004042 },
|
||||
{}
|
||||
};
|
||||
|
||||
const struct nvkm_therm_clkgate_init
|
||||
gk104_clkgate_blcg_init_rop_zrop_0[] = {
|
||||
{ 0x408810, 2, 0x00004042 },
|
||||
{}
|
||||
};
|
||||
|
||||
const struct nvkm_therm_clkgate_init
|
||||
gk104_clkgate_blcg_init_rop_0[] = {
|
||||
{ 0x408a80, 6, 0x00004042 },
|
||||
{}
|
||||
};
|
||||
|
||||
const struct nvkm_therm_clkgate_init
|
||||
gk104_clkgate_blcg_init_rop_crop_0[] = {
|
||||
{ 0x4089a8, 1, 0x00004042 },
|
||||
{ 0x4089b0, 1, 0x00000042 },
|
||||
{ 0x4089b8, 1, 0x00004042 },
|
||||
{}
|
||||
};
|
||||
|
||||
const struct nvkm_therm_clkgate_init
|
||||
gk104_clkgate_blcg_init_pxbar_0[] = {
|
||||
{ 0x13c820, 1, 0x0001007f },
|
||||
{ 0x13cbe0, 1, 0x00000042 },
|
||||
{}
|
||||
};
|
||||
|
||||
static const struct nvkm_therm_clkgate_pack
|
||||
gk104_clkgate_pack[] = {
|
||||
{ gk104_clkgate_blcg_init_main_0 },
|
||||
{ gk104_clkgate_blcg_init_rstr2d_0 },
|
||||
{ gk104_clkgate_blcg_init_unk_0 },
|
||||
{ gk104_clkgate_blcg_init_gcc_0 },
|
||||
{ gk104_clkgate_blcg_init_sked_0 },
|
||||
{ gk104_clkgate_blcg_init_unk_1 },
|
||||
{ gk104_clkgate_blcg_init_gpc_ctxctl_0 },
|
||||
{ gk104_clkgate_blcg_init_gpc_unk_0 },
|
||||
{ gk104_clkgate_blcg_init_gpc_esetup_0 },
|
||||
{ gk104_clkgate_blcg_init_gpc_tpbus_0 },
|
||||
{ gk104_clkgate_blcg_init_gpc_zcull_0 },
|
||||
{ gk104_clkgate_blcg_init_gpc_tpconf_0 },
|
||||
{ gk104_clkgate_blcg_init_gpc_unk_1 },
|
||||
{ gk104_clkgate_blcg_init_gpc_gcc_0 },
|
||||
{ gk104_clkgate_blcg_init_gpc_ffb_0 },
|
||||
{ gk104_clkgate_blcg_init_gpc_tex_0 },
|
||||
{ gk104_clkgate_blcg_init_gpc_poly_0 },
|
||||
{ gk104_clkgate_blcg_init_gpc_l1c_0 },
|
||||
{ gk104_clkgate_blcg_init_gpc_unk_2 },
|
||||
{ gk104_clkgate_blcg_init_gpc_mp_0 },
|
||||
{ gk104_clkgate_blcg_init_gpc_ppc_0 },
|
||||
{ gk104_clkgate_blcg_init_rop_zrop_0 },
|
||||
{ gk104_clkgate_blcg_init_rop_0 },
|
||||
{ gk104_clkgate_blcg_init_rop_crop_0 },
|
||||
{ gk104_clkgate_blcg_init_pxbar_0 },
|
||||
{}
|
||||
};
|
||||
|
||||
/*******************************************************************************
|
||||
* PGRAPH engine/subdev functions
|
||||
******************************************************************************/
|
||||
|
@ -214,6 +417,9 @@ gk104_gr_init(struct gf100_gr *gr)
|
|||
gr->func->init_gpc_mmu(gr);
|
||||
|
||||
gf100_gr_mmio(gr, gr->func->mmio);
|
||||
if (gr->func->clkgate_pack)
|
||||
nvkm_therm_clkgate_init(gr->base.engine.subdev.device->therm,
|
||||
gr->func->clkgate_pack);
|
||||
|
||||
nvkm_wr32(device, GPC_UNIT(0, 0x3018), 0x00000001);
|
||||
|
||||
|
@ -338,6 +544,7 @@ gk104_gr = {
|
|||
.rops = gf100_gr_rops,
|
||||
.ppc_nr = 1,
|
||||
.grctx = &gk104_grctx,
|
||||
.clkgate_pack = gk104_clkgate_pack,
|
||||
.sclass = {
|
||||
{ -1, -1, FERMI_TWOD_A },
|
||||
{ -1, -1, KEPLER_INLINE_TO_MEMORY_A },
|
||||
|
|
|
@ -0,0 +1,55 @@
|
|||
/*
|
||||
* Copyright 2018 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Lyude Paul <lyude@redhat.com>
|
||||
*/
|
||||
#ifndef __GK104_GR_H__
|
||||
#define __GK104_GR_H__
|
||||
|
||||
#include <subdev/therm.h>
|
||||
|
||||
extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_main_0[];
|
||||
extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_rstr2d_0[];
|
||||
extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_unk_0[];
|
||||
extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_gcc_0[];
|
||||
extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_sked_0[];
|
||||
extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_unk_1[];
|
||||
extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_gpc_ctxctl_0[];
|
||||
extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_gpc_unk_0[];
|
||||
extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_gpc_esetup_0[];
|
||||
extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_gpc_tpbus_0[];
|
||||
extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_gpc_zcull_0[];
|
||||
extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_gpc_tpconf_0[];
|
||||
extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_gpc_unk_1[];
|
||||
extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_gpc_gcc_0[];
|
||||
extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_gpc_ffb_0[];
|
||||
extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_gpc_tex_0[];
|
||||
extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_gpc_poly_0[];
|
||||
extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_gpc_l1c_0[];
|
||||
extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_gpc_unk_2[];
|
||||
extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_gpc_mp_0[];
|
||||
extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_gpc_ppc_0[];
|
||||
extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_rop_zrop_0[];
|
||||
extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_rop_0[];
|
||||
extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_rop_crop_0[];
|
||||
extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_pxbar_0[];
|
||||
|
||||
#endif
|
|
@ -22,6 +22,7 @@
|
|||
* Authors: Ben Skeggs <bskeggs@redhat.com>
|
||||
*/
|
||||
#include "gf100.h"
|
||||
#include "gk104.h"
|
||||
#include "ctxgf100.h"
|
||||
|
||||
#include <subdev/timer.h>
|
||||
|
@ -156,6 +157,159 @@ gk110_gr_pack_mmio[] = {
|
|||
{}
|
||||
};
|
||||
|
||||
static const struct nvkm_therm_clkgate_init
|
||||
gk110_clkgate_blcg_init_sked_0[] = {
|
||||
{ 0x407000, 1, 0x00004041 },
|
||||
{}
|
||||
};
|
||||
|
||||
static const struct nvkm_therm_clkgate_init
|
||||
gk110_clkgate_blcg_init_gpc_gcc_0[] = {
|
||||
{ 0x419020, 1, 0x00000042 },
|
||||
{ 0x419038, 1, 0x00000042 },
|
||||
{}
|
||||
};
|
||||
|
||||
static const struct nvkm_therm_clkgate_init
|
||||
gk110_clkgate_blcg_init_gpc_l1c_0[] = {
|
||||
{ 0x419cd4, 2, 0x00004042 },
|
||||
{}
|
||||
};
|
||||
|
||||
static const struct nvkm_therm_clkgate_init
|
||||
gk110_clkgate_blcg_init_gpc_mp_0[] = {
|
||||
{ 0x419fd0, 1, 0x00004043 },
|
||||
{ 0x419fd8, 1, 0x00004049 },
|
||||
{ 0x419fe0, 2, 0x00004042 },
|
||||
{ 0x419ff0, 1, 0x00000046 },
|
||||
{ 0x419ff8, 1, 0x00004042 },
|
||||
{ 0x419f90, 1, 0x00004042 },
|
||||
{}
|
||||
};
|
||||
|
||||
static const struct nvkm_therm_clkgate_init
|
||||
gk110_clkgate_slcg_init_main_0[] = {
|
||||
{ 0x4041f4, 1, 0x00000000 },
|
||||
{ 0x409894, 1, 0x00000000 },
|
||||
{}
|
||||
};
|
||||
|
||||
static const struct nvkm_therm_clkgate_init
|
||||
gk110_clkgate_slcg_init_unk_0[] = {
|
||||
{ 0x406004, 1, 0x00000000 },
|
||||
{}
|
||||
};
|
||||
|
||||
static const struct nvkm_therm_clkgate_init
|
||||
gk110_clkgate_slcg_init_sked_0[] = {
|
||||
{ 0x407004, 1, 0x00000000 },
|
||||
{}
|
||||
};
|
||||
|
||||
static const struct nvkm_therm_clkgate_init
|
||||
gk110_clkgate_slcg_init_gpc_ctxctl_0[] = {
|
||||
{ 0x41a894, 1, 0x00000000 },
|
||||
{}
|
||||
};
|
||||
|
||||
static const struct nvkm_therm_clkgate_init
|
||||
gk110_clkgate_slcg_init_gpc_unk_0[] = {
|
||||
{ 0x418504, 1, 0x00000000 },
|
||||
{ 0x41860c, 1, 0x00000000 },
|
||||
{ 0x41868c, 1, 0x00000000 },
|
||||
{}
|
||||
};
|
||||
|
||||
static const struct nvkm_therm_clkgate_init
|
||||
gk110_clkgate_slcg_init_gpc_esetup_0[] = {
|
||||
{ 0x41882c, 1, 0x00000000 },
|
||||
{}
|
||||
};
|
||||
|
||||
static const struct nvkm_therm_clkgate_init
|
||||
gk110_clkgate_slcg_init_gpc_zcull_0[] = {
|
||||
{ 0x418974, 1, 0x00000000 },
|
||||
{}
|
||||
};
|
||||
|
||||
static const struct nvkm_therm_clkgate_init
|
||||
gk110_clkgate_slcg_init_gpc_l1c_0[] = {
|
||||
{ 0x419cd8, 2, 0x00000000 },
|
||||
{}
|
||||
};
|
||||
|
||||
static const struct nvkm_therm_clkgate_init
|
||||
gk110_clkgate_slcg_init_gpc_unk_1[] = {
|
||||
{ 0x419c74, 1, 0x00000000 },
|
||||
{}
|
||||
};
|
||||
|
||||
static const struct nvkm_therm_clkgate_init
|
||||
gk110_clkgate_slcg_init_gpc_mp_0[] = {
|
||||
{ 0x419fd4, 1, 0x00004a4a },
|
||||
{ 0x419fdc, 1, 0x00000014 },
|
||||
{ 0x419fe4, 1, 0x00000000 },
|
||||
{ 0x419ff4, 1, 0x00001724 },
|
||||
{}
|
||||
};
|
||||
|
||||
static const struct nvkm_therm_clkgate_init
|
||||
gk110_clkgate_slcg_init_gpc_ppc_0[] = {
|
||||
{ 0x41be2c, 1, 0x00000000 },
|
||||
{}
|
||||
};
|
||||
|
||||
static const struct nvkm_therm_clkgate_init
|
||||
gk110_clkgate_slcg_init_pcounter_0[] = {
|
||||
{ 0x1be018, 1, 0x000001ff },
|
||||
{ 0x1bc018, 1, 0x000001ff },
|
||||
{ 0x1b8018, 1, 0x000001ff },
|
||||
{ 0x1b4124, 1, 0x00000000 },
|
||||
{}
|
||||
};
|
||||
|
||||
static const struct nvkm_therm_clkgate_pack
|
||||
gk110_clkgate_pack[] = {
|
||||
{ gk104_clkgate_blcg_init_main_0 },
|
||||
{ gk104_clkgate_blcg_init_rstr2d_0 },
|
||||
{ gk104_clkgate_blcg_init_unk_0 },
|
||||
{ gk104_clkgate_blcg_init_gcc_0 },
|
||||
{ gk110_clkgate_blcg_init_sked_0 },
|
||||
{ gk104_clkgate_blcg_init_unk_1 },
|
||||
{ gk104_clkgate_blcg_init_gpc_ctxctl_0 },
|
||||
{ gk104_clkgate_blcg_init_gpc_unk_0 },
|
||||
{ gk104_clkgate_blcg_init_gpc_esetup_0 },
|
||||
{ gk104_clkgate_blcg_init_gpc_tpbus_0 },
|
||||
{ gk104_clkgate_blcg_init_gpc_zcull_0 },
|
||||
{ gk104_clkgate_blcg_init_gpc_tpconf_0 },
|
||||
{ gk104_clkgate_blcg_init_gpc_unk_1 },
|
||||
{ gk110_clkgate_blcg_init_gpc_gcc_0 },
|
||||
{ gk104_clkgate_blcg_init_gpc_ffb_0 },
|
||||
{ gk104_clkgate_blcg_init_gpc_tex_0 },
|
||||
{ gk104_clkgate_blcg_init_gpc_poly_0 },
|
||||
{ gk110_clkgate_blcg_init_gpc_l1c_0 },
|
||||
{ gk104_clkgate_blcg_init_gpc_unk_2 },
|
||||
{ gk110_clkgate_blcg_init_gpc_mp_0 },
|
||||
{ gk104_clkgate_blcg_init_gpc_ppc_0 },
|
||||
{ gk104_clkgate_blcg_init_rop_zrop_0 },
|
||||
{ gk104_clkgate_blcg_init_rop_0 },
|
||||
{ gk104_clkgate_blcg_init_rop_crop_0 },
|
||||
{ gk104_clkgate_blcg_init_pxbar_0 },
|
||||
{ gk110_clkgate_slcg_init_main_0 },
|
||||
{ gk110_clkgate_slcg_init_unk_0 },
|
||||
{ gk110_clkgate_slcg_init_sked_0 },
|
||||
{ gk110_clkgate_slcg_init_gpc_ctxctl_0 },
|
||||
{ gk110_clkgate_slcg_init_gpc_unk_0 },
|
||||
{ gk110_clkgate_slcg_init_gpc_esetup_0 },
|
||||
{ gk110_clkgate_slcg_init_gpc_zcull_0 },
|
||||
{ gk110_clkgate_slcg_init_gpc_l1c_0 },
|
||||
{ gk110_clkgate_slcg_init_gpc_unk_1 },
|
||||
{ gk110_clkgate_slcg_init_gpc_mp_0 },
|
||||
{ gk110_clkgate_slcg_init_gpc_ppc_0 },
|
||||
{ gk110_clkgate_slcg_init_pcounter_0 },
|
||||
{}
|
||||
};
|
||||
|
||||
/*******************************************************************************
|
||||
* PGRAPH engine/subdev functions
|
||||
******************************************************************************/
|
||||
|
@ -192,6 +346,7 @@ gk110_gr = {
|
|||
.rops = gf100_gr_rops,
|
||||
.ppc_nr = 2,
|
||||
.grctx = &gk110_grctx,
|
||||
.clkgate_pack = gk110_clkgate_pack,
|
||||
.sclass = {
|
||||
{ -1, -1, FERMI_TWOD_A },
|
||||
{ -1, -1, KEPLER_INLINE_TO_MEMORY_B },
|
||||
|
|
|
@ -462,7 +462,7 @@ nvkm_perfmon_mthd_query_domain(struct nvkm_perfmon *perfmon,
|
|||
|
||||
args->v0.id = di;
|
||||
args->v0.signal_nr = nvkm_perfdom_count_perfsig(dom);
|
||||
strncpy(args->v0.name, dom->name, sizeof(args->v0.name));
|
||||
strncpy(args->v0.name, dom->name, sizeof(args->v0.name) - 1);
|
||||
|
||||
/* Currently only global counters (PCOUNTER) are implemented
|
||||
* but this will be different for local counters (MP). */
|
||||
|
@ -514,7 +514,7 @@ nvkm_perfmon_mthd_query_signal(struct nvkm_perfmon *perfmon,
|
|||
"/%s/%02x", dom->name, si);
|
||||
} else {
|
||||
strncpy(args->v0.name, sig->name,
|
||||
sizeof(args->v0.name));
|
||||
sizeof(args->v0.name) - 1);
|
||||
}
|
||||
|
||||
args->v0.signal = si;
|
||||
|
@ -572,7 +572,7 @@ nvkm_perfmon_mthd_query_source(struct nvkm_perfmon *perfmon,
|
|||
|
||||
args->v0.source = sig->source[si];
|
||||
args->v0.mask = src->mask;
|
||||
strncpy(args->v0.name, src->name, sizeof(args->v0.name));
|
||||
strncpy(args->v0.name, src->name, sizeof(args->v0.name) - 1);
|
||||
}
|
||||
|
||||
if (++si < source_nr) {
|
||||
|
|
|
@ -505,6 +505,7 @@ nvkm_msgqueue_new(u32 version, struct nvkm_falcon *falcon,
|
|||
ret = msgqueue_0137bca5_new(falcon, sb, queue);
|
||||
break;
|
||||
case 0x0148cdec:
|
||||
case 0x015ccf3e:
|
||||
ret = msgqueue_0148cdec_new(falcon, sb, queue);
|
||||
break;
|
||||
default:
|
||||
|
|
|
@ -110,6 +110,7 @@ read_pll(struct gt215_clk *clk, int idx, u32 pll)
|
|||
struct nvkm_device *device = clk->base.subdev.device;
|
||||
u32 ctrl = nvkm_rd32(device, pll + 0);
|
||||
u32 sclk = 0, P = 1, N = 1, M = 1;
|
||||
u32 MP;
|
||||
|
||||
if (!(ctrl & 0x00000008)) {
|
||||
if (ctrl & 0x00000001) {
|
||||
|
@ -130,10 +131,12 @@ read_pll(struct gt215_clk *clk, int idx, u32 pll)
|
|||
sclk = read_clk(clk, 0x10 + idx, false);
|
||||
}
|
||||
|
||||
if (M * P)
|
||||
return sclk * N / (M * P);
|
||||
MP = M * P;
|
||||
|
||||
return 0;
|
||||
if (!MP)
|
||||
return 0;
|
||||
|
||||
return sclk * N / MP;
|
||||
}
|
||||
|
||||
static int
|
||||
|
|
|
@ -22,6 +22,7 @@ nvkm-y += nvkm/subdev/fb/mcp89.o
|
|||
nvkm-y += nvkm/subdev/fb/gf100.o
|
||||
nvkm-y += nvkm/subdev/fb/gf108.o
|
||||
nvkm-y += nvkm/subdev/fb/gk104.o
|
||||
nvkm-y += nvkm/subdev/fb/gk110.o
|
||||
nvkm-y += nvkm/subdev/fb/gk20a.o
|
||||
nvkm-y += nvkm/subdev/fb/gm107.o
|
||||
nvkm-y += nvkm/subdev/fb/gm200.o
|
||||
|
|
|
@ -26,6 +26,7 @@
|
|||
|
||||
#include <core/memory.h>
|
||||
#include <core/option.h>
|
||||
#include <subdev/therm.h>
|
||||
|
||||
void
|
||||
gf100_fb_intr(struct nvkm_fb *base)
|
||||
|
@ -92,6 +93,11 @@ gf100_fb_init(struct nvkm_fb *base)
|
|||
|
||||
if (fb->r100c10_page)
|
||||
nvkm_wr32(device, 0x100c10, fb->r100c10 >> 8);
|
||||
|
||||
if (base->func->clkgate_pack) {
|
||||
nvkm_therm_clkgate_init(device->therm,
|
||||
base->func->clkgate_pack);
|
||||
}
|
||||
}
|
||||
|
||||
void *
|
||||
|
|
|
@ -20,10 +20,56 @@
|
|||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Ben Skeggs
|
||||
* Lyude Paul
|
||||
*/
|
||||
#include "gk104.h"
|
||||
#include "gf100.h"
|
||||
#include "ram.h"
|
||||
|
||||
/*
|
||||
*******************************************************************************
|
||||
* PGRAPH registers for clockgating
|
||||
*******************************************************************************
|
||||
*/
|
||||
const struct nvkm_therm_clkgate_init
|
||||
gk104_fb_clkgate_blcg_init_unk_0[] = {
|
||||
{ 0x100d10, 1, 0x0000c244 },
|
||||
{ 0x100d30, 1, 0x0000c242 },
|
||||
{ 0x100d3c, 1, 0x00000242 },
|
||||
{ 0x100d48, 1, 0x00000242 },
|
||||
{ 0x100d1c, 1, 0x00000042 },
|
||||
{}
|
||||
};
|
||||
|
||||
const struct nvkm_therm_clkgate_init
|
||||
gk104_fb_clkgate_blcg_init_vm_0[] = {
|
||||
{ 0x100c98, 1, 0x00000242 },
|
||||
{}
|
||||
};
|
||||
|
||||
const struct nvkm_therm_clkgate_init
|
||||
gk104_fb_clkgate_blcg_init_main_0[] = {
|
||||
{ 0x10f000, 1, 0x00000042 },
|
||||
{ 0x17e030, 1, 0x00000044 },
|
||||
{ 0x17e040, 1, 0x00000044 },
|
||||
{}
|
||||
};
|
||||
|
||||
const struct nvkm_therm_clkgate_init
|
||||
gk104_fb_clkgate_blcg_init_bcast_0[] = {
|
||||
{ 0x17ea60, 4, 0x00000044 },
|
||||
{}
|
||||
};
|
||||
|
||||
static const struct nvkm_therm_clkgate_pack
|
||||
gk104_fb_clkgate_pack[] = {
|
||||
{ gk104_fb_clkgate_blcg_init_unk_0 },
|
||||
{ gk104_fb_clkgate_blcg_init_vm_0 },
|
||||
{ gk104_fb_clkgate_blcg_init_main_0 },
|
||||
{ gk104_fb_clkgate_blcg_init_bcast_0 },
|
||||
{}
|
||||
};
|
||||
|
||||
static const struct nvkm_fb_func
|
||||
gk104_fb = {
|
||||
.dtor = gf100_fb_dtor,
|
||||
|
@ -33,6 +79,7 @@ gk104_fb = {
|
|||
.intr = gf100_fb_intr,
|
||||
.ram_new = gk104_ram_new,
|
||||
.default_bigpage = 17,
|
||||
.clkgate_pack = gk104_fb_clkgate_pack,
|
||||
};
|
||||
|
||||
int
|
||||
|
|
|
@ -0,0 +1,35 @@
|
|||
/*
|
||||
* Copyright 2018 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Lyude Paul
|
||||
*/
|
||||
|
||||
#ifndef __GK104_FB_H__
|
||||
#define __GK104_FB_H__
|
||||
|
||||
#include <subdev/therm.h>
|
||||
|
||||
extern const struct nvkm_therm_clkgate_init gk104_fb_clkgate_blcg_init_unk_0[];
|
||||
extern const struct nvkm_therm_clkgate_init gk104_fb_clkgate_blcg_init_vm_0[];
|
||||
extern const struct nvkm_therm_clkgate_init gk104_fb_clkgate_blcg_init_main_0[];
|
||||
extern const struct nvkm_therm_clkgate_init gk104_fb_clkgate_blcg_init_bcast_0[];
|
||||
|
||||
#endif
|
|
@ -0,0 +1,71 @@
|
|||
/*
|
||||
* Copyright 2017 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Lyude Paul
|
||||
*/
|
||||
#include "gf100.h"
|
||||
#include "gk104.h"
|
||||
#include "ram.h"
|
||||
#include <subdev/therm.h>
|
||||
#include <subdev/fb.h>
|
||||
|
||||
/*
|
||||
*******************************************************************************
|
||||
* PGRAPH registers for clockgating
|
||||
*******************************************************************************
|
||||
*/
|
||||
|
||||
static const struct nvkm_therm_clkgate_init
|
||||
gk110_fb_clkgate_blcg_init_unk_0[] = {
|
||||
{ 0x100d10, 1, 0x0000c242 },
|
||||
{ 0x100d30, 1, 0x0000c242 },
|
||||
{ 0x100d3c, 1, 0x00000242 },
|
||||
{ 0x100d48, 1, 0x0000c242 },
|
||||
{ 0x100d1c, 1, 0x00000042 },
|
||||
{}
|
||||
};
|
||||
|
||||
static const struct nvkm_therm_clkgate_pack
|
||||
gk110_fb_clkgate_pack[] = {
|
||||
{ gk110_fb_clkgate_blcg_init_unk_0 },
|
||||
{ gk104_fb_clkgate_blcg_init_vm_0 },
|
||||
{ gk104_fb_clkgate_blcg_init_main_0 },
|
||||
{ gk104_fb_clkgate_blcg_init_bcast_0 },
|
||||
{}
|
||||
};
|
||||
|
||||
static const struct nvkm_fb_func
|
||||
gk110_fb = {
|
||||
.dtor = gf100_fb_dtor,
|
||||
.oneinit = gf100_fb_oneinit,
|
||||
.init = gf100_fb_init,
|
||||
.init_page = gf100_fb_init_page,
|
||||
.intr = gf100_fb_intr,
|
||||
.ram_new = gk104_ram_new,
|
||||
.default_bigpage = 17,
|
||||
.clkgate_pack = gk110_fb_clkgate_pack,
|
||||
};
|
||||
|
||||
int
|
||||
gk110_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
|
||||
{
|
||||
return gf100_fb_new_(&gk110_fb, device, index, pfb);
|
||||
}
|
|
@ -3,6 +3,7 @@
|
|||
#define __NVKM_FB_PRIV_H__
|
||||
#define nvkm_fb(p) container_of((p), struct nvkm_fb, subdev)
|
||||
#include <subdev/fb.h>
|
||||
#include <subdev/therm.h>
|
||||
struct nvkm_bios;
|
||||
|
||||
struct nvkm_fb_func {
|
||||
|
@ -27,6 +28,7 @@ struct nvkm_fb_func {
|
|||
int (*ram_new)(struct nvkm_fb *, struct nvkm_ram **);
|
||||
|
||||
u8 default_bigpage;
|
||||
const struct nvkm_therm_clkgate_pack *clkgate_pack;
|
||||
};
|
||||
|
||||
void nvkm_fb_ctor(const struct nvkm_fb_func *, struct nvkm_device *device,
|
||||
|
|
|
@ -106,7 +106,8 @@ nvkm_uvmm_mthd_map(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
|
|||
} else
|
||||
return ret;
|
||||
|
||||
if (IS_ERR((memory = nvkm_umem_search(client, handle)))) {
|
||||
memory = nvkm_umem_search(client, handle);
|
||||
if (IS_ERR(memory)) {
|
||||
VMM_DEBUG(vmm, "memory %016llx %ld\n", handle, PTR_ERR(memory));
|
||||
return PTR_ERR(memory);
|
||||
}
|
||||
|
|
|
@ -642,7 +642,7 @@ nvkm_vmm_ptes_sparse(struct nvkm_vmm *vmm, u64 addr, u64 size, bool ref)
|
|||
else
|
||||
block = (size >> page[i].shift) << page[i].shift;
|
||||
} else {
|
||||
block = (size >> page[i].shift) << page[i].shift;;
|
||||
block = (size >> page[i].shift) << page[i].shift;
|
||||
}
|
||||
|
||||
/* Perform operation. */
|
||||
|
|
|
@ -47,8 +47,8 @@ static uint32_t gf100_pmu_data[] = {
|
|||
0x00000000,
|
||||
0x00000000,
|
||||
0x584d454d,
|
||||
0x00000756,
|
||||
0x00000748,
|
||||
0x00000754,
|
||||
0x00000746,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
|
@ -69,8 +69,8 @@ static uint32_t gf100_pmu_data[] = {
|
|||
0x00000000,
|
||||
0x00000000,
|
||||
0x46524550,
|
||||
0x0000075a,
|
||||
0x00000758,
|
||||
0x00000756,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
|
@ -91,8 +91,8 @@ static uint32_t gf100_pmu_data[] = {
|
|||
0x00000000,
|
||||
0x00000000,
|
||||
0x5f433249,
|
||||
0x00000b8a,
|
||||
0x00000a2d,
|
||||
0x00000b88,
|
||||
0x00000a2b,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
|
@ -113,8 +113,8 @@ static uint32_t gf100_pmu_data[] = {
|
|||
0x00000000,
|
||||
0x00000000,
|
||||
0x54534554,
|
||||
0x00000bb3,
|
||||
0x00000b8c,
|
||||
0x00000bb1,
|
||||
0x00000b8a,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
|
@ -135,8 +135,8 @@ static uint32_t gf100_pmu_data[] = {
|
|||
0x00000000,
|
||||
0x00000000,
|
||||
0x454c4449,
|
||||
0x00000bbf,
|
||||
0x00000bbd,
|
||||
0x00000bbb,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
|
@ -237,19 +237,19 @@ static uint32_t gf100_pmu_data[] = {
|
|||
0x000005d3,
|
||||
0x00000003,
|
||||
0x00000002,
|
||||
0x0000069d,
|
||||
0x0000069b,
|
||||
0x00040004,
|
||||
0x00000000,
|
||||
0x000006b9,
|
||||
0x000006b7,
|
||||
0x00010005,
|
||||
0x00000000,
|
||||
0x000006d6,
|
||||
0x000006d4,
|
||||
0x00010006,
|
||||
0x00000000,
|
||||
0x0000065b,
|
||||
0x00000007,
|
||||
0x00000000,
|
||||
0x000006e1,
|
||||
0x000006df,
|
||||
/* 0x03c4: memx_func_tail */
|
||||
/* 0x03c4: memx_ts_start */
|
||||
0x00000000,
|
||||
|
@ -1373,432 +1373,432 @@ static uint32_t gf100_pmu_code[] = {
|
|||
/* 0x065b: memx_func_wait_vblank */
|
||||
0x9800f840,
|
||||
0x66b00016,
|
||||
0x130bf400,
|
||||
0x120bf400,
|
||||
0xf40166b0,
|
||||
0x0ef4060b,
|
||||
/* 0x066d: memx_func_wait_vblank_head1 */
|
||||
0x2077f12e,
|
||||
0x070ef400,
|
||||
/* 0x0674: memx_func_wait_vblank_head0 */
|
||||
0x000877f1,
|
||||
/* 0x0678: memx_func_wait_vblank_0 */
|
||||
0x07c467f1,
|
||||
0xcf0664b6,
|
||||
0x67fd0066,
|
||||
0xf31bf404,
|
||||
/* 0x0688: memx_func_wait_vblank_1 */
|
||||
0x07c467f1,
|
||||
0xcf0664b6,
|
||||
0x67fd0066,
|
||||
0xf30bf404,
|
||||
/* 0x0698: memx_func_wait_vblank_fini */
|
||||
0xf80410b6,
|
||||
/* 0x069d: memx_func_wr32 */
|
||||
0x00169800,
|
||||
0xb6011598,
|
||||
0x60f90810,
|
||||
0xd0fc50f9,
|
||||
0x21f4e0fc,
|
||||
0x0242b640,
|
||||
0xf8e91bf4,
|
||||
/* 0x06b9: memx_func_wait */
|
||||
0x2c87f000,
|
||||
0xcf0684b6,
|
||||
0x1e980088,
|
||||
0x011d9800,
|
||||
0x98021c98,
|
||||
0x10b6031b,
|
||||
0xa321f410,
|
||||
/* 0x06d6: memx_func_delay */
|
||||
0x1e9800f8,
|
||||
0x0410b600,
|
||||
0xf87e21f4,
|
||||
/* 0x06e1: memx_func_train */
|
||||
/* 0x06e3: memx_exec */
|
||||
0xf900f800,
|
||||
0xb9d0f9e0,
|
||||
0xb2b902c1,
|
||||
/* 0x06ed: memx_exec_next */
|
||||
0x00139802,
|
||||
0xe70410b6,
|
||||
0xe701f034,
|
||||
0xb601e033,
|
||||
0x30f00132,
|
||||
0xde35980c,
|
||||
0x12b855f9,
|
||||
0xe41ef406,
|
||||
0x98f10b98,
|
||||
0xcbbbf20c,
|
||||
0xc4b7f102,
|
||||
0x06b4b607,
|
||||
0xfc00bbcf,
|
||||
0xf5e0fcd0,
|
||||
0xf8033621,
|
||||
/* 0x0729: memx_info */
|
||||
0x01c67000,
|
||||
/* 0x072f: memx_info_data */
|
||||
0xf10e0bf4,
|
||||
0xf103ccc7,
|
||||
0xf40800b7,
|
||||
/* 0x073a: memx_info_train */
|
||||
0xc7f10b0e,
|
||||
0xb7f10bcc,
|
||||
/* 0x0742: memx_info_send */
|
||||
0x21f50100,
|
||||
0x00f80336,
|
||||
/* 0x0748: memx_recv */
|
||||
0xf401d6b0,
|
||||
0xd6b0980b,
|
||||
0xd80bf400,
|
||||
/* 0x0756: memx_init */
|
||||
0x2077f02c,
|
||||
/* 0x0673: memx_func_wait_vblank_head0 */
|
||||
0xf0060ef4,
|
||||
/* 0x0676: memx_func_wait_vblank_0 */
|
||||
0x67f10877,
|
||||
0x64b607c4,
|
||||
0x0066cf06,
|
||||
0xf40467fd,
|
||||
/* 0x0686: memx_func_wait_vblank_1 */
|
||||
0x67f1f31b,
|
||||
0x64b607c4,
|
||||
0x0066cf06,
|
||||
0xf40467fd,
|
||||
/* 0x0696: memx_func_wait_vblank_fini */
|
||||
0x10b6f30b,
|
||||
/* 0x069b: memx_func_wr32 */
|
||||
0x9800f804,
|
||||
0x15980016,
|
||||
0x0810b601,
|
||||
0x50f960f9,
|
||||
0xe0fcd0fc,
|
||||
0xb64021f4,
|
||||
0x1bf40242,
|
||||
/* 0x06b7: memx_func_wait */
|
||||
0xf000f8e9,
|
||||
0x84b62c87,
|
||||
0x0088cf06,
|
||||
0x98001e98,
|
||||
0x1c98011d,
|
||||
0x031b9802,
|
||||
0xf41010b6,
|
||||
0x00f8a321,
|
||||
/* 0x06d4: memx_func_delay */
|
||||
0xb6001e98,
|
||||
0x21f40410,
|
||||
/* 0x06df: memx_func_train */
|
||||
0xf800f87e,
|
||||
/* 0x06e1: memx_exec */
|
||||
0xf9e0f900,
|
||||
0x02c1b9d0,
|
||||
/* 0x06eb: memx_exec_next */
|
||||
0x9802b2b9,
|
||||
0x10b60013,
|
||||
0xf034e704,
|
||||
0xe033e701,
|
||||
0x0132b601,
|
||||
0x980c30f0,
|
||||
0x55f9de35,
|
||||
0xf40612b8,
|
||||
0x0b98e41e,
|
||||
0xf20c98f1,
|
||||
0xf102cbbb,
|
||||
0xb607c4b7,
|
||||
0xbbcf06b4,
|
||||
0xfcd0fc00,
|
||||
0x3621f5e0,
|
||||
/* 0x0727: memx_info */
|
||||
0x7000f803,
|
||||
0x0bf401c6,
|
||||
/* 0x072d: memx_info_data */
|
||||
0xccc7f10e,
|
||||
0x00b7f103,
|
||||
0x0b0ef408,
|
||||
/* 0x0738: memx_info_train */
|
||||
0x0bccc7f1,
|
||||
0x0100b7f1,
|
||||
/* 0x0740: memx_info_send */
|
||||
0x033621f5,
|
||||
/* 0x0746: memx_recv */
|
||||
0xd6b000f8,
|
||||
0x980bf401,
|
||||
0xf400d6b0,
|
||||
0x00f8d80b,
|
||||
/* 0x0754: memx_init */
|
||||
/* 0x0756: perf_recv */
|
||||
0x00f800f8,
|
||||
/* 0x0758: perf_recv */
|
||||
/* 0x075a: perf_init */
|
||||
0x00f800f8,
|
||||
/* 0x075c: i2c_drive_scl */
|
||||
0xf40036b0,
|
||||
0x07f1110b,
|
||||
0x04b607e0,
|
||||
0x0001d006,
|
||||
0x00f804bd,
|
||||
/* 0x0770: i2c_drive_scl_lo */
|
||||
0x07e407f1,
|
||||
0xd00604b6,
|
||||
0x04bd0001,
|
||||
/* 0x077e: i2c_drive_sda */
|
||||
/* 0x0758: perf_init */
|
||||
/* 0x075a: i2c_drive_scl */
|
||||
0x36b000f8,
|
||||
0x110bf400,
|
||||
0x07e007f1,
|
||||
0xd00604b6,
|
||||
0x04bd0002,
|
||||
/* 0x0792: i2c_drive_sda_lo */
|
||||
0x04bd0001,
|
||||
/* 0x076e: i2c_drive_scl_lo */
|
||||
0x07f100f8,
|
||||
0x04b607e4,
|
||||
0x0001d006,
|
||||
0x00f804bd,
|
||||
/* 0x077c: i2c_drive_sda */
|
||||
0xf40036b0,
|
||||
0x07f1110b,
|
||||
0x04b607e0,
|
||||
0x0002d006,
|
||||
0x00f804bd,
|
||||
/* 0x07a0: i2c_sense_scl */
|
||||
0xf10132f4,
|
||||
0xb607c437,
|
||||
0x33cf0634,
|
||||
0x0431fd00,
|
||||
0xf4060bf4,
|
||||
/* 0x07b6: i2c_sense_scl_done */
|
||||
0x00f80131,
|
||||
/* 0x07b8: i2c_sense_sda */
|
||||
0xf10132f4,
|
||||
0xb607c437,
|
||||
0x33cf0634,
|
||||
0x0432fd00,
|
||||
0xf4060bf4,
|
||||
/* 0x07ce: i2c_sense_sda_done */
|
||||
0x00f80131,
|
||||
/* 0x07d0: i2c_raise_scl */
|
||||
0x47f140f9,
|
||||
0x37f00898,
|
||||
0x5c21f501,
|
||||
/* 0x07dd: i2c_raise_scl_wait */
|
||||
/* 0x0790: i2c_drive_sda_lo */
|
||||
0x07e407f1,
|
||||
0xd00604b6,
|
||||
0x04bd0002,
|
||||
/* 0x079e: i2c_sense_scl */
|
||||
0x32f400f8,
|
||||
0xc437f101,
|
||||
0x0634b607,
|
||||
0xfd0033cf,
|
||||
0x0bf40431,
|
||||
0x0131f406,
|
||||
/* 0x07b4: i2c_sense_scl_done */
|
||||
/* 0x07b6: i2c_sense_sda */
|
||||
0x32f400f8,
|
||||
0xc437f101,
|
||||
0x0634b607,
|
||||
0xfd0033cf,
|
||||
0x0bf40432,
|
||||
0x0131f406,
|
||||
/* 0x07cc: i2c_sense_sda_done */
|
||||
/* 0x07ce: i2c_raise_scl */
|
||||
0x40f900f8,
|
||||
0x089847f1,
|
||||
0xf50137f0,
|
||||
/* 0x07db: i2c_raise_scl_wait */
|
||||
0xf1075a21,
|
||||
0xf403e8e7,
|
||||
0x21f57e21,
|
||||
0x01f4079e,
|
||||
0x0142b609,
|
||||
/* 0x07ef: i2c_raise_scl_done */
|
||||
0xfcef1bf4,
|
||||
/* 0x07f3: i2c_start */
|
||||
0xf500f840,
|
||||
0xf4079e21,
|
||||
0x21f50d11,
|
||||
0x11f407b6,
|
||||
0x300ef406,
|
||||
/* 0x0804: i2c_start_rep */
|
||||
0xf50037f0,
|
||||
0xf0075a21,
|
||||
0x21f50137,
|
||||
0x76bb077c,
|
||||
0x0465b600,
|
||||
0x659450f9,
|
||||
0x0256bb04,
|
||||
0x75fd50bd,
|
||||
0xf550fc04,
|
||||
0xb607ce21,
|
||||
0x11f40464,
|
||||
/* 0x0831: i2c_start_send */
|
||||
0x0037f01f,
|
||||
0x077c21f5,
|
||||
0x1388e7f1,
|
||||
0xf07e21f4,
|
||||
0x21f50037,
|
||||
0xe7f1075a,
|
||||
0x21f41388,
|
||||
/* 0x084d: i2c_start_out */
|
||||
/* 0x084f: i2c_stop */
|
||||
0xf000f87e,
|
||||
0x21f50037,
|
||||
0x37f0075a,
|
||||
0x7c21f500,
|
||||
0xe8e7f107,
|
||||
0x7e21f403,
|
||||
0x07a021f5,
|
||||
0xb60901f4,
|
||||
0x1bf40142,
|
||||
/* 0x07f1: i2c_raise_scl_done */
|
||||
0xf840fcef,
|
||||
/* 0x07f5: i2c_start */
|
||||
0xa021f500,
|
||||
0x0d11f407,
|
||||
0x07b821f5,
|
||||
0xf40611f4,
|
||||
/* 0x0806: i2c_start_rep */
|
||||
0x37f0300e,
|
||||
0x5c21f500,
|
||||
0x0137f007,
|
||||
0x077e21f5,
|
||||
0xb60076bb,
|
||||
0x50f90465,
|
||||
0xbb046594,
|
||||
0x50bd0256,
|
||||
0xfc0475fd,
|
||||
0xd021f550,
|
||||
0x0464b607,
|
||||
/* 0x0833: i2c_start_send */
|
||||
0xf01f11f4,
|
||||
0x21f50037,
|
||||
0xe7f1077e,
|
||||
0x21f41388,
|
||||
0x0037f07e,
|
||||
0x075c21f5,
|
||||
0x1388e7f1,
|
||||
/* 0x084f: i2c_start_out */
|
||||
0xf87e21f4,
|
||||
/* 0x0851: i2c_stop */
|
||||
0x0037f000,
|
||||
0x075c21f5,
|
||||
0xf50037f0,
|
||||
0xf1077e21,
|
||||
0xf403e8e7,
|
||||
0x37f07e21,
|
||||
0x5c21f501,
|
||||
0x88e7f107,
|
||||
0x7e21f413,
|
||||
0xf50137f0,
|
||||
0xf1077e21,
|
||||
0xf1075a21,
|
||||
0xf41388e7,
|
||||
0x00f87e21,
|
||||
/* 0x0884: i2c_bitw */
|
||||
0x077e21f5,
|
||||
0x03e8e7f1,
|
||||
0xbb7e21f4,
|
||||
0x65b60076,
|
||||
0x9450f904,
|
||||
0x56bb0465,
|
||||
0xfd50bd02,
|
||||
0x50fc0475,
|
||||
0x07d021f5,
|
||||
0xf40464b6,
|
||||
0xe7f11811,
|
||||
0x21f41388,
|
||||
0x0037f07e,
|
||||
0x075c21f5,
|
||||
0x1388e7f1,
|
||||
/* 0x08c3: i2c_bitw_out */
|
||||
0xf87e21f4,
|
||||
/* 0x08c5: i2c_bitr */
|
||||
0x0137f000,
|
||||
0x077e21f5,
|
||||
0x03e8e7f1,
|
||||
0xbb7e21f4,
|
||||
0x65b60076,
|
||||
0x9450f904,
|
||||
0x56bb0465,
|
||||
0xfd50bd02,
|
||||
0x50fc0475,
|
||||
0x07d021f5,
|
||||
0xf40464b6,
|
||||
0x21f51b11,
|
||||
0x37f007b8,
|
||||
0x5c21f500,
|
||||
0x37f07e21,
|
||||
0x7c21f501,
|
||||
0x88e7f107,
|
||||
0x7e21f413,
|
||||
0xf4013cf0,
|
||||
/* 0x090a: i2c_bitr_done */
|
||||
0x00f80131,
|
||||
/* 0x090c: i2c_get_byte */
|
||||
0xf00057f0,
|
||||
/* 0x0912: i2c_get_byte_next */
|
||||
0x54b60847,
|
||||
0x0076bb01,
|
||||
0xf90465b6,
|
||||
0x04659450,
|
||||
0xbd0256bb,
|
||||
0x0475fd50,
|
||||
0x21f550fc,
|
||||
0x64b608c5,
|
||||
0x2b11f404,
|
||||
0xb60553fd,
|
||||
0x1bf40142,
|
||||
0x0137f0d8,
|
||||
0xb60076bb,
|
||||
0x50f90465,
|
||||
0xbb046594,
|
||||
0x50bd0256,
|
||||
0xfc0475fd,
|
||||
0x8421f550,
|
||||
0x0464b608,
|
||||
/* 0x095c: i2c_get_byte_done */
|
||||
/* 0x095e: i2c_put_byte */
|
||||
0x47f000f8,
|
||||
/* 0x0961: i2c_put_byte_next */
|
||||
0x0142b608,
|
||||
0xbb3854ff,
|
||||
0x65b60076,
|
||||
0x9450f904,
|
||||
0x56bb0465,
|
||||
0xfd50bd02,
|
||||
0x50fc0475,
|
||||
0x088421f5,
|
||||
0xf40464b6,
|
||||
0x46b03411,
|
||||
0xd81bf400,
|
||||
0xb60076bb,
|
||||
0x50f90465,
|
||||
0xbb046594,
|
||||
0x50bd0256,
|
||||
0xfc0475fd,
|
||||
0xc521f550,
|
||||
0x0464b608,
|
||||
0xbb0f11f4,
|
||||
0x36b00076,
|
||||
0x061bf401,
|
||||
/* 0x09b7: i2c_put_byte_done */
|
||||
0xf80132f4,
|
||||
/* 0x09b9: i2c_addr */
|
||||
0x0076bb00,
|
||||
0xf90465b6,
|
||||
0x04659450,
|
||||
0xbd0256bb,
|
||||
0x0475fd50,
|
||||
0x21f550fc,
|
||||
0x64b607f5,
|
||||
0x2911f404,
|
||||
0x012ec3e7,
|
||||
0xfd0134b6,
|
||||
0x76bb0553,
|
||||
0x0465b600,
|
||||
0x659450f9,
|
||||
0x0256bb04,
|
||||
0x75fd50bd,
|
||||
0xf550fc04,
|
||||
0xb6095e21,
|
||||
/* 0x09fe: i2c_addr_done */
|
||||
0x00f80464,
|
||||
/* 0x0a00: i2c_acquire_addr */
|
||||
0xb6f8cec7,
|
||||
0xe0b702e4,
|
||||
0xee980d1c,
|
||||
/* 0x0a0f: i2c_acquire */
|
||||
0xf500f800,
|
||||
0xf40a0021,
|
||||
0xd9f00421,
|
||||
0x4021f403,
|
||||
/* 0x0a1e: i2c_release */
|
||||
/* 0x0882: i2c_bitw */
|
||||
0x21f500f8,
|
||||
0x21f40a00,
|
||||
0x03daf004,
|
||||
0xf84021f4,
|
||||
/* 0x0a2d: i2c_recv */
|
||||
0x0132f400,
|
||||
0xb6f8c1c7,
|
||||
0x16b00214,
|
||||
0x3a1ff528,
|
||||
0xf413a001,
|
||||
0x0032980c,
|
||||
0x0ccc13a0,
|
||||
0xf4003198,
|
||||
0xd0f90231,
|
||||
0xd0f9e0f9,
|
||||
0x000067f1,
|
||||
0x100063f1,
|
||||
0xbb016792,
|
||||
0xe7f1077c,
|
||||
0x21f403e8,
|
||||
0x0076bb7e,
|
||||
0xf90465b6,
|
||||
0x04659450,
|
||||
0xbd0256bb,
|
||||
0x0475fd50,
|
||||
0x21f550fc,
|
||||
0x64b607ce,
|
||||
0x1811f404,
|
||||
0x1388e7f1,
|
||||
0xf07e21f4,
|
||||
0x21f50037,
|
||||
0xe7f1075a,
|
||||
0x21f41388,
|
||||
/* 0x08c1: i2c_bitw_out */
|
||||
/* 0x08c3: i2c_bitr */
|
||||
0xf000f87e,
|
||||
0x21f50137,
|
||||
0xe7f1077c,
|
||||
0x21f403e8,
|
||||
0x0076bb7e,
|
||||
0xf90465b6,
|
||||
0x04659450,
|
||||
0xbd0256bb,
|
||||
0x0475fd50,
|
||||
0x21f550fc,
|
||||
0x64b607ce,
|
||||
0x1b11f404,
|
||||
0x07b621f5,
|
||||
0xf50037f0,
|
||||
0xf1075a21,
|
||||
0xf41388e7,
|
||||
0x3cf07e21,
|
||||
0x0131f401,
|
||||
/* 0x0908: i2c_bitr_done */
|
||||
/* 0x090a: i2c_get_byte */
|
||||
0x57f000f8,
|
||||
0x0847f000,
|
||||
/* 0x0910: i2c_get_byte_next */
|
||||
0xbb0154b6,
|
||||
0x65b60076,
|
||||
0x9450f904,
|
||||
0x56bb0465,
|
||||
0xfd50bd02,
|
||||
0x50fc0475,
|
||||
0x0a0f21f5,
|
||||
0xfc0464b6,
|
||||
0x00d6b0d0,
|
||||
0x00b31bf5,
|
||||
0xbb0057f0,
|
||||
0x65b60076,
|
||||
0x9450f904,
|
||||
0x56bb0465,
|
||||
0xfd50bd02,
|
||||
0x50fc0475,
|
||||
0x09b921f5,
|
||||
0xf50464b6,
|
||||
0xc700d011,
|
||||
0x76bbe0c5,
|
||||
0x08c321f5,
|
||||
0xf40464b6,
|
||||
0x53fd2b11,
|
||||
0x0142b605,
|
||||
0xf0d81bf4,
|
||||
0x76bb0137,
|
||||
0x0465b600,
|
||||
0x659450f9,
|
||||
0x0256bb04,
|
||||
0x75fd50bd,
|
||||
0xf550fc04,
|
||||
0xb6095e21,
|
||||
0x11f50464,
|
||||
0x57f000ad,
|
||||
0xb6088221,
|
||||
/* 0x095a: i2c_get_byte_done */
|
||||
0x00f80464,
|
||||
/* 0x095c: i2c_put_byte */
|
||||
/* 0x095f: i2c_put_byte_next */
|
||||
0xb60847f0,
|
||||
0x54ff0142,
|
||||
0x0076bb38,
|
||||
0xf90465b6,
|
||||
0x04659450,
|
||||
0xbd0256bb,
|
||||
0x0475fd50,
|
||||
0x21f550fc,
|
||||
0x64b60882,
|
||||
0x3411f404,
|
||||
0xf40046b0,
|
||||
0x76bbd81b,
|
||||
0x0465b600,
|
||||
0x659450f9,
|
||||
0x0256bb04,
|
||||
0x75fd50bd,
|
||||
0xf550fc04,
|
||||
0xb608c321,
|
||||
0x11f40464,
|
||||
0x0076bb0f,
|
||||
0xf40136b0,
|
||||
0x32f4061b,
|
||||
/* 0x09b5: i2c_put_byte_done */
|
||||
/* 0x09b7: i2c_addr */
|
||||
0xbb00f801,
|
||||
0x65b60076,
|
||||
0x9450f904,
|
||||
0x56bb0465,
|
||||
0xfd50bd02,
|
||||
0x50fc0475,
|
||||
0x07f321f5,
|
||||
0xf40464b6,
|
||||
0xc3e72911,
|
||||
0x34b6012e,
|
||||
0x0553fd01,
|
||||
0xb60076bb,
|
||||
0x50f90465,
|
||||
0xbb046594,
|
||||
0x50bd0256,
|
||||
0xfc0475fd,
|
||||
0x5c21f550,
|
||||
0x0464b609,
|
||||
/* 0x09fc: i2c_addr_done */
|
||||
/* 0x09fe: i2c_acquire_addr */
|
||||
0xcec700f8,
|
||||
0x02e4b6f8,
|
||||
0x0d1ce0b7,
|
||||
0xf800ee98,
|
||||
/* 0x0a0d: i2c_acquire */
|
||||
0xfe21f500,
|
||||
0x0421f409,
|
||||
0xf403d9f0,
|
||||
0x00f84021,
|
||||
/* 0x0a1c: i2c_release */
|
||||
0x09fe21f5,
|
||||
0xf00421f4,
|
||||
0x21f403da,
|
||||
/* 0x0a2b: i2c_recv */
|
||||
0xf400f840,
|
||||
0xc1c70132,
|
||||
0x0214b6f8,
|
||||
0xf52816b0,
|
||||
0xa0013a1f,
|
||||
0x980cf413,
|
||||
0x13a00032,
|
||||
0x31980ccc,
|
||||
0x0231f400,
|
||||
0xe0f9d0f9,
|
||||
0x67f1d0f9,
|
||||
0x63f10000,
|
||||
0x67921000,
|
||||
0x0076bb01,
|
||||
0xf90465b6,
|
||||
0x04659450,
|
||||
0xbd0256bb,
|
||||
0x0475fd50,
|
||||
0x21f550fc,
|
||||
0x64b609b9,
|
||||
0x8a11f504,
|
||||
0x64b60a0d,
|
||||
0xb0d0fc04,
|
||||
0x1bf500d6,
|
||||
0x57f000b3,
|
||||
0x0076bb00,
|
||||
0xf90465b6,
|
||||
0x04659450,
|
||||
0xbd0256bb,
|
||||
0x0475fd50,
|
||||
0x21f550fc,
|
||||
0x64b6090c,
|
||||
0x6a11f404,
|
||||
0xbbe05bcb,
|
||||
0x64b609b7,
|
||||
0xd011f504,
|
||||
0xe0c5c700,
|
||||
0xb60076bb,
|
||||
0x50f90465,
|
||||
0xbb046594,
|
||||
0x50bd0256,
|
||||
0xfc0475fd,
|
||||
0x5c21f550,
|
||||
0x0464b609,
|
||||
0x00ad11f5,
|
||||
0xbb0157f0,
|
||||
0x65b60076,
|
||||
0x9450f904,
|
||||
0x56bb0465,
|
||||
0xfd50bd02,
|
||||
0x50fc0475,
|
||||
0x085121f5,
|
||||
0xb90464b6,
|
||||
0x74bd025b,
|
||||
/* 0x0b33: i2c_recv_not_rd08 */
|
||||
0xb0430ef4,
|
||||
0x1bf401d6,
|
||||
0x0057f03d,
|
||||
0x09b921f5,
|
||||
0xc73311f4,
|
||||
0x21f5e0c5,
|
||||
0x11f4095e,
|
||||
0x0057f029,
|
||||
0x09b921f5,
|
||||
0xc71f11f4,
|
||||
0x21f5e0b5,
|
||||
0x11f4095e,
|
||||
0x5121f515,
|
||||
0xc774bd08,
|
||||
0x1bf408c5,
|
||||
0x0232f409,
|
||||
/* 0x0b73: i2c_recv_not_wr08 */
|
||||
/* 0x0b73: i2c_recv_done */
|
||||
0xc7030ef4,
|
||||
0x21f5f8ce,
|
||||
0xe0fc0a1e,
|
||||
0x12f4d0fc,
|
||||
0x027cb90a,
|
||||
0x033621f5,
|
||||
/* 0x0b88: i2c_recv_exit */
|
||||
/* 0x0b8a: i2c_init */
|
||||
0x00f800f8,
|
||||
/* 0x0b8c: test_recv */
|
||||
0x05d817f1,
|
||||
0xcf0614b6,
|
||||
0x10b60011,
|
||||
0xd807f101,
|
||||
0x0604b605,
|
||||
0xbd0001d0,
|
||||
0x00e7f104,
|
||||
0x4fe3f1d9,
|
||||
0x5621f513,
|
||||
/* 0x0bb3: test_init */
|
||||
0xf100f802,
|
||||
0xf50800e7,
|
||||
0xf8025621,
|
||||
/* 0x0bbd: idle_recv */
|
||||
/* 0x0bbf: idle */
|
||||
0xf400f800,
|
||||
0x17f10031,
|
||||
0x14b605d4,
|
||||
0x09b721f5,
|
||||
0xf50464b6,
|
||||
0xbb008a11,
|
||||
0x65b60076,
|
||||
0x9450f904,
|
||||
0x56bb0465,
|
||||
0xfd50bd02,
|
||||
0x50fc0475,
|
||||
0x090a21f5,
|
||||
0xf40464b6,
|
||||
0x5bcb6a11,
|
||||
0x0076bbe0,
|
||||
0xf90465b6,
|
||||
0x04659450,
|
||||
0xbd0256bb,
|
||||
0x0475fd50,
|
||||
0x21f550fc,
|
||||
0x64b6084f,
|
||||
0x025bb904,
|
||||
0x0ef474bd,
|
||||
/* 0x0b31: i2c_recv_not_rd08 */
|
||||
0x01d6b043,
|
||||
0xf03d1bf4,
|
||||
0x21f50057,
|
||||
0x11f409b7,
|
||||
0xe0c5c733,
|
||||
0x095c21f5,
|
||||
0xf02911f4,
|
||||
0x21f50057,
|
||||
0x11f409b7,
|
||||
0xe0b5c71f,
|
||||
0x095c21f5,
|
||||
0xf51511f4,
|
||||
0xbd084f21,
|
||||
0x08c5c774,
|
||||
0xf4091bf4,
|
||||
0x0ef40232,
|
||||
/* 0x0b71: i2c_recv_not_wr08 */
|
||||
/* 0x0b71: i2c_recv_done */
|
||||
0xf8cec703,
|
||||
0x0a1c21f5,
|
||||
0xd0fce0fc,
|
||||
0xb90a12f4,
|
||||
0x21f5027c,
|
||||
/* 0x0b86: i2c_recv_exit */
|
||||
0x00f80336,
|
||||
/* 0x0b88: i2c_init */
|
||||
/* 0x0b8a: test_recv */
|
||||
0x17f100f8,
|
||||
0x14b605d8,
|
||||
0x0011cf06,
|
||||
0xf10110b6,
|
||||
0xb605d407,
|
||||
0xb605d807,
|
||||
0x01d00604,
|
||||
/* 0x0bdb: idle_loop */
|
||||
0xf004bd00,
|
||||
0x32f45817,
|
||||
/* 0x0be1: idle_proc */
|
||||
/* 0x0be1: idle_proc_exec */
|
||||
0xb910f902,
|
||||
0x21f5021e,
|
||||
0x10fc033f,
|
||||
0xf40911f4,
|
||||
0x0ef40231,
|
||||
/* 0x0bf5: idle_proc_next */
|
||||
0x5810b6ef,
|
||||
0xf4061fb8,
|
||||
0x02f4e61b,
|
||||
0x0028f4dd,
|
||||
0x00bb0ef4,
|
||||
0xf104bd00,
|
||||
0xf1d900e7,
|
||||
0xf5134fe3,
|
||||
0xf8025621,
|
||||
/* 0x0bb1: test_init */
|
||||
0x00e7f100,
|
||||
0x5621f508,
|
||||
/* 0x0bbb: idle_recv */
|
||||
0xf800f802,
|
||||
/* 0x0bbd: idle */
|
||||
0x0031f400,
|
||||
0x05d417f1,
|
||||
0xcf0614b6,
|
||||
0x10b60011,
|
||||
0xd407f101,
|
||||
0x0604b605,
|
||||
0xbd0001d0,
|
||||
/* 0x0bd9: idle_loop */
|
||||
0x5817f004,
|
||||
/* 0x0bdf: idle_proc */
|
||||
/* 0x0bdf: idle_proc_exec */
|
||||
0xf90232f4,
|
||||
0x021eb910,
|
||||
0x033f21f5,
|
||||
0x11f410fc,
|
||||
0x0231f409,
|
||||
/* 0x0bf3: idle_proc_next */
|
||||
0xb6ef0ef4,
|
||||
0x1fb85810,
|
||||
0xe61bf406,
|
||||
0xf4dd02f4,
|
||||
0x0ef40028,
|
||||
0x000000bb,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
|
|
|
@ -47,8 +47,8 @@ static uint32_t gk208_pmu_data[] = {
|
|||
0x00000000,
|
||||
0x00000000,
|
||||
0x584d454d,
|
||||
0x000005f3,
|
||||
0x000005e5,
|
||||
0x000005ee,
|
||||
0x000005e0,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
|
@ -69,8 +69,8 @@ static uint32_t gk208_pmu_data[] = {
|
|||
0x00000000,
|
||||
0x00000000,
|
||||
0x46524550,
|
||||
0x000005f7,
|
||||
0x000005f5,
|
||||
0x000005f2,
|
||||
0x000005f0,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
|
@ -91,8 +91,8 @@ static uint32_t gk208_pmu_data[] = {
|
|||
0x00000000,
|
||||
0x00000000,
|
||||
0x5f433249,
|
||||
0x000009f8,
|
||||
0x000008a2,
|
||||
0x000009f3,
|
||||
0x0000089d,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
|
@ -113,8 +113,8 @@ static uint32_t gk208_pmu_data[] = {
|
|||
0x00000000,
|
||||
0x00000000,
|
||||
0x54534554,
|
||||
0x00000a16,
|
||||
0x000009fa,
|
||||
0x00000a11,
|
||||
0x000009f5,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
|
@ -135,8 +135,8 @@ static uint32_t gk208_pmu_data[] = {
|
|||
0x00000000,
|
||||
0x00000000,
|
||||
0x454c4449,
|
||||
0x00000a21,
|
||||
0x00000a1f,
|
||||
0x00000a1c,
|
||||
0x00000a1a,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
|
@ -234,22 +234,22 @@ static uint32_t gk208_pmu_data[] = {
|
|||
/* 0x037c: memx_func_next */
|
||||
0x00000002,
|
||||
0x00000000,
|
||||
0x000004cf,
|
||||
0x000004cc,
|
||||
0x00000003,
|
||||
0x00000002,
|
||||
0x00000546,
|
||||
0x00000541,
|
||||
0x00040004,
|
||||
0x00000000,
|
||||
0x00000563,
|
||||
0x0000055e,
|
||||
0x00010005,
|
||||
0x00000000,
|
||||
0x0000057d,
|
||||
0x00000578,
|
||||
0x00010006,
|
||||
0x00000000,
|
||||
0x00000541,
|
||||
0x0000053c,
|
||||
0x00000007,
|
||||
0x00000000,
|
||||
0x00000589,
|
||||
0x00000584,
|
||||
/* 0x03c4: memx_func_tail */
|
||||
/* 0x03c4: memx_ts_start */
|
||||
0x00000000,
|
||||
|
@ -1239,454 +1239,454 @@ static uint32_t gk208_pmu_code[] = {
|
|||
0x0001f604,
|
||||
0x00f804bd,
|
||||
/* 0x045c: memx_func_enter */
|
||||
0x162067f1,
|
||||
0xf55d77f1,
|
||||
0x047e6eb2,
|
||||
0xd8b20000,
|
||||
0xf90487fd,
|
||||
0xfc80f960,
|
||||
0x7ee0fcd0,
|
||||
0x0700002d,
|
||||
0x7e6eb2fe,
|
||||
0x47162046,
|
||||
0x6eb2f55d,
|
||||
0x0000047e,
|
||||
0x87fdd8b2,
|
||||
0xf960f904,
|
||||
0xfcd0fc80,
|
||||
0x002d7ee0,
|
||||
0xb2fe0700,
|
||||
0x00047e6e,
|
||||
0xfdd8b200,
|
||||
0x60f90487,
|
||||
0xd0fc80f9,
|
||||
0x2d7ee0fc,
|
||||
0xf0460000,
|
||||
0x7e6eb226,
|
||||
0xb2000004,
|
||||
0x0487fdd8,
|
||||
0x80f960f9,
|
||||
0xe0fcd0fc,
|
||||
0x00002d7e,
|
||||
0x26f067f1,
|
||||
0x047e6eb2,
|
||||
0xd8b20000,
|
||||
0xf90487fd,
|
||||
0xfc80f960,
|
||||
0x7ee0fcd0,
|
||||
0x0600002d,
|
||||
0x07e04004,
|
||||
0xbd0006f6,
|
||||
/* 0x04b9: memx_func_enter_wait */
|
||||
0x07c04604,
|
||||
0xf00066cf,
|
||||
0x0bf40464,
|
||||
0xcf2c06f7,
|
||||
0x06b50066,
|
||||
/* 0x04cf: memx_func_leave */
|
||||
0x0600f8f1,
|
||||
0x0066cf2c,
|
||||
0x06f206b5,
|
||||
0x07e44004,
|
||||
0xbd0006f6,
|
||||
/* 0x04e1: memx_func_leave_wait */
|
||||
0x07c04604,
|
||||
0xf00066cf,
|
||||
0x1bf40464,
|
||||
0xf067f1f7,
|
||||
0xe0400406,
|
||||
0x0006f607,
|
||||
/* 0x04b6: memx_func_enter_wait */
|
||||
0xc04604bd,
|
||||
0x0066cf07,
|
||||
0xf40464f0,
|
||||
0x2c06f70b,
|
||||
0xb50066cf,
|
||||
0x00f8f106,
|
||||
/* 0x04cc: memx_func_leave */
|
||||
0x66cf2c06,
|
||||
0xf206b500,
|
||||
0xe4400406,
|
||||
0x0006f607,
|
||||
/* 0x04de: memx_func_leave_wait */
|
||||
0xc04604bd,
|
||||
0x0066cf07,
|
||||
0xf40464f0,
|
||||
0xf046f71b,
|
||||
0xb2010726,
|
||||
0x00047e6e,
|
||||
0xfdd8b200,
|
||||
0x60f90587,
|
||||
0xd0fc80f9,
|
||||
0x2d7ee0fc,
|
||||
0x67f10000,
|
||||
0x6eb21620,
|
||||
0x0000047e,
|
||||
0x87fdd8b2,
|
||||
0xf960f905,
|
||||
0xfcd0fc80,
|
||||
0x002d7ee0,
|
||||
0x0aa24700,
|
||||
0x047e6eb2,
|
||||
0xd8b20000,
|
||||
0xf90587fd,
|
||||
0xfc80f960,
|
||||
0x7ee0fcd0,
|
||||
0xf800002d,
|
||||
/* 0x0541: memx_func_wait_vblank */
|
||||
0x0410b600,
|
||||
/* 0x0546: memx_func_wr32 */
|
||||
0x169800f8,
|
||||
0x01159800,
|
||||
0xf90810b6,
|
||||
0xfc50f960,
|
||||
0x7ee0fcd0,
|
||||
0xb600002d,
|
||||
0x1bf40242,
|
||||
/* 0x0563: memx_func_wait */
|
||||
0x0800f8e8,
|
||||
0x0088cf2c,
|
||||
0x98001e98,
|
||||
0x1c98011d,
|
||||
0x031b9802,
|
||||
0x7e1010b6,
|
||||
0xf8000074,
|
||||
/* 0x057d: memx_func_delay */
|
||||
0x001e9800,
|
||||
0x7e0410b6,
|
||||
0xf8000058,
|
||||
/* 0x0589: memx_func_train */
|
||||
/* 0x058b: memx_exec */
|
||||
0xf900f800,
|
||||
0xb2d0f9e0,
|
||||
/* 0x0593: memx_exec_next */
|
||||
0x98b2b2c1,
|
||||
0x10b60013,
|
||||
0xf034e704,
|
||||
0xe033e701,
|
||||
0x0132b601,
|
||||
0x980c30f0,
|
||||
0x55f9de35,
|
||||
0x1ef412a6,
|
||||
0xf10b98e5,
|
||||
0xbbf20c98,
|
||||
0xc44b02cb,
|
||||
0x00bbcf07,
|
||||
0x20460000,
|
||||
0x7e6eb216,
|
||||
0xb2000004,
|
||||
0x0587fdd8,
|
||||
0x80f960f9,
|
||||
0xe0fcd0fc,
|
||||
0x00029f7e,
|
||||
/* 0x05ca: memx_info */
|
||||
0xc67000f8,
|
||||
0x0c0bf401,
|
||||
/* 0x05d0: memx_info_data */
|
||||
0x4b03cc4c,
|
||||
0x0ef40800,
|
||||
/* 0x05d9: memx_info_train */
|
||||
0x0bcc4c09,
|
||||
/* 0x05df: memx_info_send */
|
||||
0x7e01004b,
|
||||
0xf800029f,
|
||||
/* 0x05e5: memx_recv */
|
||||
0x01d6b000,
|
||||
0xb0a30bf4,
|
||||
0x0bf400d6,
|
||||
/* 0x05f3: memx_init */
|
||||
0xf800f8dc,
|
||||
/* 0x05f5: perf_recv */
|
||||
/* 0x05f7: perf_init */
|
||||
0xf800f800,
|
||||
/* 0x05f9: i2c_drive_scl */
|
||||
0x0036b000,
|
||||
0x400d0bf4,
|
||||
0x01f607e0,
|
||||
0xf804bd00,
|
||||
/* 0x0609: i2c_drive_scl_lo */
|
||||
0x07e44000,
|
||||
0xbd0001f6,
|
||||
/* 0x0613: i2c_drive_sda */
|
||||
0xb000f804,
|
||||
0x0bf40036,
|
||||
0x07e0400d,
|
||||
0xbd0002f6,
|
||||
/* 0x0623: i2c_drive_sda_lo */
|
||||
0x4000f804,
|
||||
0x02f607e4,
|
||||
0xf804bd00,
|
||||
/* 0x062d: i2c_sense_scl */
|
||||
0x0132f400,
|
||||
0xcf07c443,
|
||||
0x31fd0033,
|
||||
0x060bf404,
|
||||
/* 0x063f: i2c_sense_scl_done */
|
||||
0xf80131f4,
|
||||
/* 0x0641: i2c_sense_sda */
|
||||
0x0132f400,
|
||||
0xcf07c443,
|
||||
0x32fd0033,
|
||||
0x060bf404,
|
||||
/* 0x0653: i2c_sense_sda_done */
|
||||
0xf80131f4,
|
||||
/* 0x0655: i2c_raise_scl */
|
||||
0x4440f900,
|
||||
0x01030898,
|
||||
0x0005f97e,
|
||||
/* 0x0660: i2c_raise_scl_wait */
|
||||
0x7e03e84e,
|
||||
0x7e000058,
|
||||
0xf400062d,
|
||||
0x42b60901,
|
||||
0xef1bf401,
|
||||
/* 0x0674: i2c_raise_scl_done */
|
||||
0x00f840fc,
|
||||
/* 0x0678: i2c_start */
|
||||
0x00062d7e,
|
||||
0x7e0d11f4,
|
||||
0xf4000641,
|
||||
0x0ef40611,
|
||||
/* 0x0689: i2c_start_rep */
|
||||
0x7e00032e,
|
||||
0x030005f9,
|
||||
0x06137e01,
|
||||
0x0076bb00,
|
||||
0xf90465b6,
|
||||
0x04659450,
|
||||
0xbd0256bb,
|
||||
0x0475fd50,
|
||||
0x557e50fc,
|
||||
0x64b60006,
|
||||
0x1d11f404,
|
||||
/* 0x06b4: i2c_start_send */
|
||||
0x137e0003,
|
||||
0x884e0006,
|
||||
0x00587e13,
|
||||
0x7e000300,
|
||||
0x4e0005f9,
|
||||
0x587e1388,
|
||||
/* 0x06ce: i2c_start_out */
|
||||
0x00002d7e,
|
||||
0xb20aa247,
|
||||
0x00047e6e,
|
||||
0xfdd8b200,
|
||||
0x60f90587,
|
||||
0xd0fc80f9,
|
||||
0x2d7ee0fc,
|
||||
0x00f80000,
|
||||
/* 0x06d0: i2c_stop */
|
||||
0xf97e0003,
|
||||
0x00030005,
|
||||
0x0006137e,
|
||||
0x7e03e84e,
|
||||
0x03000058,
|
||||
0x05f97e01,
|
||||
/* 0x053c: memx_func_wait_vblank */
|
||||
0xf80410b6,
|
||||
/* 0x0541: memx_func_wr32 */
|
||||
0x00169800,
|
||||
0xb6011598,
|
||||
0x60f90810,
|
||||
0xd0fc50f9,
|
||||
0x2d7ee0fc,
|
||||
0x42b60000,
|
||||
0xe81bf402,
|
||||
/* 0x055e: memx_func_wait */
|
||||
0x2c0800f8,
|
||||
0x980088cf,
|
||||
0x1d98001e,
|
||||
0x021c9801,
|
||||
0xb6031b98,
|
||||
0x747e1010,
|
||||
0x00f80000,
|
||||
/* 0x0578: memx_func_delay */
|
||||
0xb6001e98,
|
||||
0x587e0410,
|
||||
0x00f80000,
|
||||
/* 0x0584: memx_func_train */
|
||||
/* 0x0586: memx_exec */
|
||||
0xe0f900f8,
|
||||
0xc1b2d0f9,
|
||||
/* 0x058e: memx_exec_next */
|
||||
0x1398b2b2,
|
||||
0x0410b600,
|
||||
0x01f034e7,
|
||||
0x01e033e7,
|
||||
0xf00132b6,
|
||||
0x35980c30,
|
||||
0xa655f9de,
|
||||
0xe51ef412,
|
||||
0x98f10b98,
|
||||
0xcbbbf20c,
|
||||
0x07c44b02,
|
||||
0xfc00bbcf,
|
||||
0x7ee0fcd0,
|
||||
0xf800029f,
|
||||
/* 0x05c5: memx_info */
|
||||
0x01c67000,
|
||||
/* 0x05cb: memx_info_data */
|
||||
0x4c0c0bf4,
|
||||
0x004b03cc,
|
||||
0x090ef408,
|
||||
/* 0x05d4: memx_info_train */
|
||||
0x4b0bcc4c,
|
||||
/* 0x05da: memx_info_send */
|
||||
0x9f7e0100,
|
||||
0x00f80002,
|
||||
/* 0x05e0: memx_recv */
|
||||
0xf401d6b0,
|
||||
0xd6b0a30b,
|
||||
0xdc0bf400,
|
||||
/* 0x05ee: memx_init */
|
||||
0x00f800f8,
|
||||
/* 0x05f0: perf_recv */
|
||||
/* 0x05f2: perf_init */
|
||||
0x00f800f8,
|
||||
/* 0x05f4: i2c_drive_scl */
|
||||
0xf40036b0,
|
||||
0xe0400d0b,
|
||||
0x0001f607,
|
||||
0x00f804bd,
|
||||
/* 0x0604: i2c_drive_scl_lo */
|
||||
0xf607e440,
|
||||
0x04bd0001,
|
||||
/* 0x060e: i2c_drive_sda */
|
||||
0x36b000f8,
|
||||
0x0d0bf400,
|
||||
0xf607e040,
|
||||
0x04bd0002,
|
||||
/* 0x061e: i2c_drive_sda_lo */
|
||||
0xe44000f8,
|
||||
0x0002f607,
|
||||
0x00f804bd,
|
||||
/* 0x0628: i2c_sense_scl */
|
||||
0x430132f4,
|
||||
0x33cf07c4,
|
||||
0x0431fd00,
|
||||
0xf4060bf4,
|
||||
/* 0x063a: i2c_sense_scl_done */
|
||||
0x00f80131,
|
||||
/* 0x063c: i2c_sense_sda */
|
||||
0x430132f4,
|
||||
0x33cf07c4,
|
||||
0x0432fd00,
|
||||
0xf4060bf4,
|
||||
/* 0x064e: i2c_sense_sda_done */
|
||||
0x00f80131,
|
||||
/* 0x0650: i2c_raise_scl */
|
||||
0x984440f9,
|
||||
0x7e010308,
|
||||
/* 0x065b: i2c_raise_scl_wait */
|
||||
0x4e0005f4,
|
||||
0x587e03e8,
|
||||
0x287e0000,
|
||||
0x01f40006,
|
||||
0x0142b609,
|
||||
/* 0x066f: i2c_raise_scl_done */
|
||||
0xfcef1bf4,
|
||||
/* 0x0673: i2c_start */
|
||||
0x7e00f840,
|
||||
0xf4000628,
|
||||
0x3c7e0d11,
|
||||
0x11f40006,
|
||||
0x2e0ef406,
|
||||
/* 0x0684: i2c_start_rep */
|
||||
0xf47e0003,
|
||||
0x01030005,
|
||||
0x00060e7e,
|
||||
0xb60076bb,
|
||||
0x50f90465,
|
||||
0xbb046594,
|
||||
0x50bd0256,
|
||||
0xfc0475fd,
|
||||
0x06507e50,
|
||||
0x0464b600,
|
||||
/* 0x06af: i2c_start_send */
|
||||
0x031d11f4,
|
||||
0x060e7e00,
|
||||
0x13884e00,
|
||||
0x0000587e,
|
||||
0x137e0103,
|
||||
0x884e0006,
|
||||
0x00587e13,
|
||||
/* 0x06ff: i2c_bitw */
|
||||
0x7e00f800,
|
||||
0x4e000613,
|
||||
0x587e03e8,
|
||||
0x76bb0000,
|
||||
0x0465b600,
|
||||
0x659450f9,
|
||||
0x0256bb04,
|
||||
0x75fd50bd,
|
||||
0x7e50fc04,
|
||||
0xb6000655,
|
||||
0x11f40464,
|
||||
0x13884e17,
|
||||
0x0000587e,
|
||||
0xf97e0003,
|
||||
0xf47e0003,
|
||||
0x884e0005,
|
||||
0x00587e13,
|
||||
/* 0x073d: i2c_bitw_out */
|
||||
/* 0x073f: i2c_bitr */
|
||||
/* 0x06c9: i2c_start_out */
|
||||
/* 0x06cb: i2c_stop */
|
||||
0x0300f800,
|
||||
0x06137e01,
|
||||
0x03e84e00,
|
||||
0x0000587e,
|
||||
0xb60076bb,
|
||||
0x50f90465,
|
||||
0xbb046594,
|
||||
0x50bd0256,
|
||||
0xfc0475fd,
|
||||
0x06557e50,
|
||||
0x0464b600,
|
||||
0x7e1a11f4,
|
||||
0x03000641,
|
||||
0x05f97e00,
|
||||
0x05f47e00,
|
||||
0x7e000300,
|
||||
0x4e00060e,
|
||||
0x587e03e8,
|
||||
0x01030000,
|
||||
0x0005f47e,
|
||||
0x7e13884e,
|
||||
0x03000058,
|
||||
0x060e7e01,
|
||||
0x13884e00,
|
||||
0x0000587e,
|
||||
0xf4013cf0,
|
||||
/* 0x0782: i2c_bitr_done */
|
||||
0x00f80131,
|
||||
/* 0x0784: i2c_get_byte */
|
||||
0x08040005,
|
||||
/* 0x0788: i2c_get_byte_next */
|
||||
0xbb0154b6,
|
||||
0x65b60076,
|
||||
0x9450f904,
|
||||
0x56bb0465,
|
||||
0xfd50bd02,
|
||||
0x50fc0475,
|
||||
0x00073f7e,
|
||||
0xf40464b6,
|
||||
0x53fd2a11,
|
||||
0x0142b605,
|
||||
0x03d81bf4,
|
||||
0x0076bb01,
|
||||
0xf90465b6,
|
||||
0x04659450,
|
||||
0xbd0256bb,
|
||||
0x0475fd50,
|
||||
0xff7e50fc,
|
||||
0x64b60006,
|
||||
/* 0x07d1: i2c_get_byte_done */
|
||||
/* 0x07d3: i2c_put_byte */
|
||||
0x0400f804,
|
||||
/* 0x07d5: i2c_put_byte_next */
|
||||
0x0142b608,
|
||||
0xbb3854ff,
|
||||
0x65b60076,
|
||||
0x9450f904,
|
||||
0x56bb0465,
|
||||
0xfd50bd02,
|
||||
0x50fc0475,
|
||||
0x0006ff7e,
|
||||
0xf40464b6,
|
||||
0x46b03411,
|
||||
0xd81bf400,
|
||||
0xb60076bb,
|
||||
0x50f90465,
|
||||
0xbb046594,
|
||||
0x50bd0256,
|
||||
0xfc0475fd,
|
||||
0x073f7e50,
|
||||
0x0464b600,
|
||||
0xbb0f11f4,
|
||||
0x36b00076,
|
||||
0x061bf401,
|
||||
/* 0x082b: i2c_put_byte_done */
|
||||
0xf80132f4,
|
||||
/* 0x082d: i2c_addr */
|
||||
/* 0x06fa: i2c_bitw */
|
||||
0x0e7e00f8,
|
||||
0xe84e0006,
|
||||
0x00587e03,
|
||||
0x0076bb00,
|
||||
0xf90465b6,
|
||||
0x04659450,
|
||||
0xbd0256bb,
|
||||
0x0475fd50,
|
||||
0x787e50fc,
|
||||
0x507e50fc,
|
||||
0x64b60006,
|
||||
0x2911f404,
|
||||
0x012ec3e7,
|
||||
0xfd0134b6,
|
||||
0x76bb0553,
|
||||
0x0465b600,
|
||||
0x659450f9,
|
||||
0x0256bb04,
|
||||
0x75fd50bd,
|
||||
0x7e50fc04,
|
||||
0xb60007d3,
|
||||
/* 0x0872: i2c_addr_done */
|
||||
0x00f80464,
|
||||
/* 0x0874: i2c_acquire_addr */
|
||||
0xb6f8cec7,
|
||||
0xe0b705e4,
|
||||
0x00f8d014,
|
||||
/* 0x0880: i2c_acquire */
|
||||
0x0008747e,
|
||||
0x0000047e,
|
||||
0x7e03d9f0,
|
||||
0xf800002d,
|
||||
/* 0x0891: i2c_release */
|
||||
0x08747e00,
|
||||
0x00047e00,
|
||||
0x03daf000,
|
||||
0x00002d7e,
|
||||
/* 0x08a2: i2c_recv */
|
||||
0x32f400f8,
|
||||
0xf8c1c701,
|
||||
0xb00214b6,
|
||||
0x1ff52816,
|
||||
0x13b80134,
|
||||
0x98000cf4,
|
||||
0x13b80032,
|
||||
0x98000ccc,
|
||||
0x31f40031,
|
||||
0xf9d0f902,
|
||||
0xd6d0f9e0,
|
||||
0x10000000,
|
||||
0xbb016792,
|
||||
0x1711f404,
|
||||
0x7e13884e,
|
||||
0x03000058,
|
||||
0x05f47e00,
|
||||
0x13884e00,
|
||||
0x0000587e,
|
||||
/* 0x0738: i2c_bitw_out */
|
||||
/* 0x073a: i2c_bitr */
|
||||
0x010300f8,
|
||||
0x00060e7e,
|
||||
0x7e03e84e,
|
||||
0xbb000058,
|
||||
0x65b60076,
|
||||
0x9450f904,
|
||||
0x56bb0465,
|
||||
0xfd50bd02,
|
||||
0x50fc0475,
|
||||
0x0008807e,
|
||||
0xfc0464b6,
|
||||
0x00d6b0d0,
|
||||
0x00b01bf5,
|
||||
0x76bb0005,
|
||||
0x0465b600,
|
||||
0x659450f9,
|
||||
0x0256bb04,
|
||||
0x75fd50bd,
|
||||
0x7e50fc04,
|
||||
0xb600082d,
|
||||
0x11f50464,
|
||||
0xc5c700cc,
|
||||
0x0076bbe0,
|
||||
0xf90465b6,
|
||||
0x04659450,
|
||||
0xbd0256bb,
|
||||
0x0475fd50,
|
||||
0xd37e50fc,
|
||||
0x64b60007,
|
||||
0xa911f504,
|
||||
0xbb010500,
|
||||
0x65b60076,
|
||||
0x9450f904,
|
||||
0x56bb0465,
|
||||
0xfd50bd02,
|
||||
0x50fc0475,
|
||||
0x00082d7e,
|
||||
0xf50464b6,
|
||||
0xbb008711,
|
||||
0x65b60076,
|
||||
0x9450f904,
|
||||
0x56bb0465,
|
||||
0xfd50bd02,
|
||||
0x50fc0475,
|
||||
0x0007847e,
|
||||
0x0006507e,
|
||||
0xf40464b6,
|
||||
0x5bcb6711,
|
||||
0x0076bbe0,
|
||||
0x3c7e1a11,
|
||||
0x00030006,
|
||||
0x0005f47e,
|
||||
0x7e13884e,
|
||||
0xf0000058,
|
||||
0x31f4013c,
|
||||
/* 0x077d: i2c_bitr_done */
|
||||
/* 0x077f: i2c_get_byte */
|
||||
0x0500f801,
|
||||
/* 0x0783: i2c_get_byte_next */
|
||||
0xb6080400,
|
||||
0x76bb0154,
|
||||
0x0465b600,
|
||||
0x659450f9,
|
||||
0x0256bb04,
|
||||
0x75fd50bd,
|
||||
0x7e50fc04,
|
||||
0xb600073a,
|
||||
0x11f40464,
|
||||
0x0553fd2a,
|
||||
0xf40142b6,
|
||||
0x0103d81b,
|
||||
0xb60076bb,
|
||||
0x50f90465,
|
||||
0xbb046594,
|
||||
0x50bd0256,
|
||||
0xfc0475fd,
|
||||
0x06fa7e50,
|
||||
0x0464b600,
|
||||
/* 0x07cc: i2c_get_byte_done */
|
||||
/* 0x07ce: i2c_put_byte */
|
||||
0x080400f8,
|
||||
/* 0x07d0: i2c_put_byte_next */
|
||||
0xff0142b6,
|
||||
0x76bb3854,
|
||||
0x0465b600,
|
||||
0x659450f9,
|
||||
0x0256bb04,
|
||||
0x75fd50bd,
|
||||
0x7e50fc04,
|
||||
0xb60006fa,
|
||||
0x11f40464,
|
||||
0x0046b034,
|
||||
0xbbd81bf4,
|
||||
0x65b60076,
|
||||
0x9450f904,
|
||||
0x56bb0465,
|
||||
0xfd50bd02,
|
||||
0x50fc0475,
|
||||
0x00073a7e,
|
||||
0xf40464b6,
|
||||
0x76bb0f11,
|
||||
0x0136b000,
|
||||
0xf4061bf4,
|
||||
/* 0x0826: i2c_put_byte_done */
|
||||
0x00f80132,
|
||||
/* 0x0828: i2c_addr */
|
||||
0xb60076bb,
|
||||
0x50f90465,
|
||||
0xbb046594,
|
||||
0x50bd0256,
|
||||
0xfc0475fd,
|
||||
0x06737e50,
|
||||
0x0464b600,
|
||||
0xe72911f4,
|
||||
0xb6012ec3,
|
||||
0x53fd0134,
|
||||
0x0076bb05,
|
||||
0xf90465b6,
|
||||
0x04659450,
|
||||
0xbd0256bb,
|
||||
0x0475fd50,
|
||||
0xd07e50fc,
|
||||
0x64b60006,
|
||||
0xbd5bb204,
|
||||
0x410ef474,
|
||||
/* 0x09a4: i2c_recv_not_rd08 */
|
||||
0xf401d6b0,
|
||||
0x00053b1b,
|
||||
0x00082d7e,
|
||||
0xc73211f4,
|
||||
0xd37ee0c5,
|
||||
0x11f40007,
|
||||
0x7e000528,
|
||||
0xf400082d,
|
||||
0xb5c71f11,
|
||||
0x07d37ee0,
|
||||
0x1511f400,
|
||||
0x0006d07e,
|
||||
0xc5c774bd,
|
||||
0x091bf408,
|
||||
0xf40232f4,
|
||||
/* 0x09e2: i2c_recv_not_wr08 */
|
||||
/* 0x09e2: i2c_recv_done */
|
||||
0xcec7030e,
|
||||
0x08917ef8,
|
||||
0xfce0fc00,
|
||||
0x0912f4d0,
|
||||
0x9f7e7cb2,
|
||||
/* 0x09f6: i2c_recv_exit */
|
||||
0x00f80002,
|
||||
/* 0x09f8: i2c_init */
|
||||
/* 0x09fa: test_recv */
|
||||
0x584100f8,
|
||||
0x0011cf04,
|
||||
0x400110b6,
|
||||
0x01f60458,
|
||||
0xde04bd00,
|
||||
0x134fd900,
|
||||
0x0001de7e,
|
||||
/* 0x0a16: test_init */
|
||||
0x004e00f8,
|
||||
0x01de7e08,
|
||||
/* 0x0a1f: idle_recv */
|
||||
0xce7e50fc,
|
||||
0x64b60007,
|
||||
/* 0x086d: i2c_addr_done */
|
||||
/* 0x086f: i2c_acquire_addr */
|
||||
0xc700f804,
|
||||
0xe4b6f8ce,
|
||||
0x14e0b705,
|
||||
/* 0x087b: i2c_acquire */
|
||||
0x7e00f8d0,
|
||||
0x7e00086f,
|
||||
0xf0000004,
|
||||
0x2d7e03d9,
|
||||
0x00f80000,
|
||||
/* 0x088c: i2c_release */
|
||||
0x00086f7e,
|
||||
0x0000047e,
|
||||
0x7e03daf0,
|
||||
0xf800002d,
|
||||
/* 0x089d: i2c_recv */
|
||||
0x0132f400,
|
||||
0xb6f8c1c7,
|
||||
0x16b00214,
|
||||
0x341ff528,
|
||||
0xf413b801,
|
||||
0x3298000c,
|
||||
0xcc13b800,
|
||||
0x3198000c,
|
||||
0x0231f400,
|
||||
0xe0f9d0f9,
|
||||
0x00d6d0f9,
|
||||
0x92100000,
|
||||
0x76bb0167,
|
||||
0x0465b600,
|
||||
0x659450f9,
|
||||
0x0256bb04,
|
||||
0x75fd50bd,
|
||||
0x7e50fc04,
|
||||
0xb600087b,
|
||||
0xd0fc0464,
|
||||
0xf500d6b0,
|
||||
0x0500b01b,
|
||||
0x0076bb00,
|
||||
0xf90465b6,
|
||||
0x04659450,
|
||||
0xbd0256bb,
|
||||
0x0475fd50,
|
||||
0x287e50fc,
|
||||
0x64b60008,
|
||||
0xcc11f504,
|
||||
0xe0c5c700,
|
||||
0xb60076bb,
|
||||
0x50f90465,
|
||||
0xbb046594,
|
||||
0x50bd0256,
|
||||
0xfc0475fd,
|
||||
0x07ce7e50,
|
||||
0x0464b600,
|
||||
0x00a911f5,
|
||||
0x76bb0105,
|
||||
0x0465b600,
|
||||
0x659450f9,
|
||||
0x0256bb04,
|
||||
0x75fd50bd,
|
||||
0x7e50fc04,
|
||||
0xb6000828,
|
||||
0x11f50464,
|
||||
0x76bb0087,
|
||||
0x0465b600,
|
||||
0x659450f9,
|
||||
0x0256bb04,
|
||||
0x75fd50bd,
|
||||
0x7e50fc04,
|
||||
0xb600077f,
|
||||
0x11f40464,
|
||||
0xe05bcb67,
|
||||
0xb60076bb,
|
||||
0x50f90465,
|
||||
0xbb046594,
|
||||
0x50bd0256,
|
||||
0xfc0475fd,
|
||||
0x06cb7e50,
|
||||
0x0464b600,
|
||||
0x74bd5bb2,
|
||||
/* 0x099f: i2c_recv_not_rd08 */
|
||||
0xb0410ef4,
|
||||
0x1bf401d6,
|
||||
0x7e00053b,
|
||||
0xf4000828,
|
||||
0xc5c73211,
|
||||
0x07ce7ee0,
|
||||
0x2811f400,
|
||||
0x287e0005,
|
||||
0x11f40008,
|
||||
0xe0b5c71f,
|
||||
0x0007ce7e,
|
||||
0x7e1511f4,
|
||||
0xbd0006cb,
|
||||
0x08c5c774,
|
||||
0xf4091bf4,
|
||||
0x0ef40232,
|
||||
/* 0x09dd: i2c_recv_not_wr08 */
|
||||
/* 0x09dd: i2c_recv_done */
|
||||
0xf8cec703,
|
||||
0x00088c7e,
|
||||
0xd0fce0fc,
|
||||
0xb20912f4,
|
||||
0x029f7e7c,
|
||||
/* 0x09f1: i2c_recv_exit */
|
||||
/* 0x09f3: i2c_init */
|
||||
0xf800f800,
|
||||
/* 0x0a21: idle */
|
||||
0x0031f400,
|
||||
0xcf045441,
|
||||
0x10b60011,
|
||||
0x04544001,
|
||||
0xbd0001f6,
|
||||
/* 0x0a35: idle_loop */
|
||||
0xf4580104,
|
||||
/* 0x0a3a: idle_proc */
|
||||
/* 0x0a3a: idle_proc_exec */
|
||||
0x10f90232,
|
||||
0xa87e1eb2,
|
||||
0x10fc0002,
|
||||
0xf40911f4,
|
||||
0x0ef40231,
|
||||
/* 0x0a4d: idle_proc_next */
|
||||
0x5810b6f0,
|
||||
0x1bf41fa6,
|
||||
0xe002f4e8,
|
||||
0xf40028f4,
|
||||
0x0000c60e,
|
||||
/* 0x09f5: test_recv */
|
||||
0x04584100,
|
||||
0xb60011cf,
|
||||
0x58400110,
|
||||
0x0001f604,
|
||||
0x00de04bd,
|
||||
0x7e134fd9,
|
||||
0xf80001de,
|
||||
/* 0x0a11: test_init */
|
||||
0x08004e00,
|
||||
0x0001de7e,
|
||||
/* 0x0a1a: idle_recv */
|
||||
0x00f800f8,
|
||||
/* 0x0a1c: idle */
|
||||
0x410031f4,
|
||||
0x11cf0454,
|
||||
0x0110b600,
|
||||
0xf6045440,
|
||||
0x04bd0001,
|
||||
/* 0x0a30: idle_loop */
|
||||
0x32f45801,
|
||||
/* 0x0a35: idle_proc */
|
||||
/* 0x0a35: idle_proc_exec */
|
||||
0xb210f902,
|
||||
0x02a87e1e,
|
||||
0xf410fc00,
|
||||
0x31f40911,
|
||||
0xf00ef402,
|
||||
/* 0x0a48: idle_proc_next */
|
||||
0xa65810b6,
|
||||
0xe81bf41f,
|
||||
0xf4e002f4,
|
||||
0x0ef40028,
|
||||
0x000000c6,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -82,15 +82,15 @@ memx_train_tail:
|
|||
// $r0 - zero
|
||||
memx_func_enter:
|
||||
#if NVKM_PPWR_CHIPSET == GT215
|
||||
movw $r8 0x1610
|
||||
mov $r8 0x1610
|
||||
nv_rd32($r7, $r8)
|
||||
imm32($r6, 0xfffffffc)
|
||||
and $r7 $r6
|
||||
movw $r6 0x2
|
||||
mov $r6 0x2
|
||||
or $r7 $r6
|
||||
nv_wr32($r8, $r7)
|
||||
#else
|
||||
movw $r6 0x001620
|
||||
mov $r6 0x001620
|
||||
imm32($r7, ~0x00000aa2);
|
||||
nv_rd32($r8, $r6)
|
||||
and $r8 $r7
|
||||
|
@ -101,7 +101,7 @@ memx_func_enter:
|
|||
and $r8 $r7
|
||||
nv_wr32($r6, $r8)
|
||||
|
||||
movw $r6 0x0026f0
|
||||
mov $r6 0x0026f0
|
||||
nv_rd32($r8, $r6)
|
||||
and $r8 $r7
|
||||
nv_wr32($r6, $r8)
|
||||
|
@ -136,19 +136,19 @@ memx_func_leave:
|
|||
bra nz #memx_func_leave_wait
|
||||
|
||||
#if NVKM_PPWR_CHIPSET == GT215
|
||||
movw $r8 0x1610
|
||||
mov $r8 0x1610
|
||||
nv_rd32($r7, $r8)
|
||||
imm32($r6, 0xffffffcc)
|
||||
and $r7 $r6
|
||||
nv_wr32($r8, $r7)
|
||||
#else
|
||||
movw $r6 0x0026f0
|
||||
mov $r6 0x0026f0
|
||||
imm32($r7, 0x00000001)
|
||||
nv_rd32($r8, $r6)
|
||||
or $r8 $r7
|
||||
nv_wr32($r6, $r8)
|
||||
|
||||
movw $r6 0x001620
|
||||
mov $r6 0x001620
|
||||
nv_rd32($r8, $r6)
|
||||
or $r8 $r7
|
||||
nv_wr32($r6, $r8)
|
||||
|
@ -177,11 +177,11 @@ memx_func_wait_vblank:
|
|||
bra #memx_func_wait_vblank_fini
|
||||
|
||||
memx_func_wait_vblank_head1:
|
||||
movw $r7 0x20
|
||||
mov $r7 0x20
|
||||
bra #memx_func_wait_vblank_0
|
||||
|
||||
memx_func_wait_vblank_head0:
|
||||
movw $r7 0x8
|
||||
mov $r7 0x8
|
||||
|
||||
memx_func_wait_vblank_0:
|
||||
nv_iord($r6, NV_PPWR_INPUT)
|
||||
|
@ -273,13 +273,13 @@ memx_func_train:
|
|||
// $r5 - outer loop counter
|
||||
// $r6 - inner loop counter
|
||||
// $r7 - entry counter (#memx_train_head + $r7)
|
||||
movw $r5 0x3
|
||||
movw $r7 0x0
|
||||
mov $r5 0x3
|
||||
mov $r7 0x0
|
||||
|
||||
// Read random memory to wake up... things
|
||||
imm32($r9, 0x700000)
|
||||
nv_rd32($r8,$r9)
|
||||
movw $r14 0x2710
|
||||
mov $r14 0x2710
|
||||
call(nsec)
|
||||
|
||||
memx_func_train_loop_outer:
|
||||
|
@ -289,9 +289,9 @@ memx_func_train:
|
|||
nv_wr32($r9, $r8)
|
||||
push $r5
|
||||
|
||||
movw $r6 0x0
|
||||
mov $r6 0x0
|
||||
memx_func_train_loop_inner:
|
||||
movw $r8 0x1111
|
||||
mov $r8 0x1111
|
||||
mulu $r9 $r6 $r8
|
||||
shl b32 $r8 $r9 0x10
|
||||
or $r8 $r9
|
||||
|
@ -315,7 +315,7 @@ memx_func_train:
|
|||
|
||||
// $r5 - inner inner loop counter
|
||||
// $r9 - result
|
||||
movw $r5 0
|
||||
mov $r5 0
|
||||
imm32($r9, 0x8300ffff)
|
||||
memx_func_train_loop_4x:
|
||||
imm32($r10, 0x100080)
|
||||
|
|
|
@ -7,8 +7,10 @@ nvkm-y += nvkm/subdev/secboot/acr_r352.o
|
|||
nvkm-y += nvkm/subdev/secboot/acr_r361.o
|
||||
nvkm-y += nvkm/subdev/secboot/acr_r364.o
|
||||
nvkm-y += nvkm/subdev/secboot/acr_r367.o
|
||||
nvkm-y += nvkm/subdev/secboot/acr_r370.o
|
||||
nvkm-y += nvkm/subdev/secboot/acr_r375.o
|
||||
nvkm-y += nvkm/subdev/secboot/gm200.o
|
||||
nvkm-y += nvkm/subdev/secboot/gm20b.o
|
||||
nvkm-y += nvkm/subdev/secboot/gp102.o
|
||||
nvkm-y += nvkm/subdev/secboot/gp108.o
|
||||
nvkm-y += nvkm/subdev/secboot/gp10b.o
|
||||
|
|
|
@ -64,6 +64,7 @@ struct nvkm_acr *acr_r352_new(unsigned long);
|
|||
struct nvkm_acr *acr_r361_new(unsigned long);
|
||||
struct nvkm_acr *acr_r364_new(unsigned long);
|
||||
struct nvkm_acr *acr_r367_new(enum nvkm_secboot_falcon, unsigned long);
|
||||
struct nvkm_acr *acr_r370_new(enum nvkm_secboot_falcon, unsigned long);
|
||||
struct nvkm_acr *acr_r375_new(enum nvkm_secboot_falcon, unsigned long);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -0,0 +1,144 @@
|
|||
/*
|
||||
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "acr_r370.h"
|
||||
#include "acr_r367.h"
|
||||
|
||||
#include <core/msgqueue.h>
|
||||
#include <engine/falcon.h>
|
||||
#include <engine/sec2.h>
|
||||
|
||||
static void
|
||||
acr_r370_generate_flcn_bl_desc(const struct nvkm_acr *acr,
|
||||
const struct ls_ucode_img *img, u64 wpr_addr,
|
||||
void *_desc)
|
||||
{
|
||||
struct acr_r370_flcn_bl_desc *desc = _desc;
|
||||
const struct ls_ucode_img_desc *pdesc = &img->ucode_desc;
|
||||
u64 base, addr_code, addr_data;
|
||||
|
||||
base = wpr_addr + img->ucode_off + pdesc->app_start_offset;
|
||||
addr_code = base + pdesc->app_resident_code_offset;
|
||||
addr_data = base + pdesc->app_resident_data_offset;
|
||||
|
||||
desc->ctx_dma = FALCON_DMAIDX_UCODE;
|
||||
desc->code_dma_base = u64_to_flcn64(addr_code);
|
||||
desc->non_sec_code_off = pdesc->app_resident_code_offset;
|
||||
desc->non_sec_code_size = pdesc->app_resident_code_size;
|
||||
desc->code_entry_point = pdesc->app_imem_entry;
|
||||
desc->data_dma_base = u64_to_flcn64(addr_data);
|
||||
desc->data_size = pdesc->app_resident_data_size;
|
||||
}
|
||||
|
||||
const struct acr_r352_ls_func
|
||||
acr_r370_ls_fecs_func = {
|
||||
.load = acr_ls_ucode_load_fecs,
|
||||
.generate_bl_desc = acr_r370_generate_flcn_bl_desc,
|
||||
.bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc),
|
||||
};
|
||||
|
||||
const struct acr_r352_ls_func
|
||||
acr_r370_ls_gpccs_func = {
|
||||
.load = acr_ls_ucode_load_gpccs,
|
||||
.generate_bl_desc = acr_r370_generate_flcn_bl_desc,
|
||||
.bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc),
|
||||
/* GPCCS will be loaded using PRI */
|
||||
.lhdr_flags = LSF_FLAG_FORCE_PRIV_LOAD,
|
||||
};
|
||||
|
||||
static void
|
||||
acr_r370_generate_sec2_bl_desc(const struct nvkm_acr *acr,
|
||||
const struct ls_ucode_img *img, u64 wpr_addr,
|
||||
void *_desc)
|
||||
{
|
||||
const struct ls_ucode_img_desc *pdesc = &img->ucode_desc;
|
||||
const struct nvkm_sec2 *sec = acr->subdev->device->sec2;
|
||||
struct acr_r370_flcn_bl_desc *desc = _desc;
|
||||
u64 base, addr_code, addr_data;
|
||||
u32 addr_args;
|
||||
|
||||
base = wpr_addr + img->ucode_off + pdesc->app_start_offset;
|
||||
/* For some reason we should not add app_resident_code_offset here */
|
||||
addr_code = base;
|
||||
addr_data = base + pdesc->app_resident_data_offset;
|
||||
addr_args = sec->falcon->data.limit;
|
||||
addr_args -= NVKM_MSGQUEUE_CMDLINE_SIZE;
|
||||
|
||||
desc->ctx_dma = FALCON_SEC2_DMAIDX_UCODE;
|
||||
desc->code_dma_base = u64_to_flcn64(addr_code);
|
||||
desc->non_sec_code_off = pdesc->app_resident_code_offset;
|
||||
desc->non_sec_code_size = pdesc->app_resident_code_size;
|
||||
desc->code_entry_point = pdesc->app_imem_entry;
|
||||
desc->data_dma_base = u64_to_flcn64(addr_data);
|
||||
desc->data_size = pdesc->app_resident_data_size;
|
||||
desc->argc = 1;
|
||||
/* args are stored at the beginning of EMEM */
|
||||
desc->argv = 0x01000000;
|
||||
}
|
||||
|
||||
const struct acr_r352_ls_func
|
||||
acr_r370_ls_sec2_func = {
|
||||
.load = acr_ls_ucode_load_sec2,
|
||||
.generate_bl_desc = acr_r370_generate_sec2_bl_desc,
|
||||
.bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc),
|
||||
.post_run = acr_ls_sec2_post_run,
|
||||
};
|
||||
|
||||
void
|
||||
acr_r370_generate_hs_bl_desc(const struct hsf_load_header *hdr, void *_bl_desc,
|
||||
u64 offset)
|
||||
{
|
||||
struct acr_r370_flcn_bl_desc *bl_desc = _bl_desc;
|
||||
|
||||
bl_desc->ctx_dma = FALCON_DMAIDX_VIRT;
|
||||
bl_desc->non_sec_code_off = hdr->non_sec_code_off;
|
||||
bl_desc->non_sec_code_size = hdr->non_sec_code_size;
|
||||
bl_desc->sec_code_off = hsf_load_header_app_off(hdr, 0);
|
||||
bl_desc->sec_code_size = hsf_load_header_app_size(hdr, 0);
|
||||
bl_desc->code_entry_point = 0;
|
||||
bl_desc->code_dma_base = u64_to_flcn64(offset);
|
||||
bl_desc->data_dma_base = u64_to_flcn64(offset + hdr->data_dma_base);
|
||||
bl_desc->data_size = hdr->data_size;
|
||||
}
|
||||
|
||||
const struct acr_r352_func
|
||||
acr_r370_func = {
|
||||
.fixup_hs_desc = acr_r367_fixup_hs_desc,
|
||||
.generate_hs_bl_desc = acr_r370_generate_hs_bl_desc,
|
||||
.hs_bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc),
|
||||
.shadow_blob = true,
|
||||
.ls_ucode_img_load = acr_r367_ls_ucode_img_load,
|
||||
.ls_fill_headers = acr_r367_ls_fill_headers,
|
||||
.ls_write_wpr = acr_r367_ls_write_wpr,
|
||||
.ls_func = {
|
||||
[NVKM_SECBOOT_FALCON_SEC2] = &acr_r370_ls_sec2_func,
|
||||
[NVKM_SECBOOT_FALCON_FECS] = &acr_r370_ls_fecs_func,
|
||||
[NVKM_SECBOOT_FALCON_GPCCS] = &acr_r370_ls_gpccs_func,
|
||||
},
|
||||
};
|
||||
|
||||
struct nvkm_acr *
|
||||
acr_r370_new(enum nvkm_secboot_falcon boot_falcon,
|
||||
unsigned long managed_falcons)
|
||||
{
|
||||
return acr_r352_new_(&acr_r370_func, boot_falcon, managed_falcons);
|
||||
}
|
|
@ -0,0 +1,49 @@
|
|||
/*
|
||||
* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef __NVKM_SECBOOT_ACR_R370_H__
|
||||
#define __NVKM_SECBOOT_ACR_R370_H__
|
||||
|
||||
#include "priv.h"
|
||||
struct hsf_load_header;
|
||||
|
||||
/* Same as acr_r361_flcn_bl_desc, plus argc/argv */
|
||||
struct acr_r370_flcn_bl_desc {
|
||||
u32 reserved[4];
|
||||
u32 signature[4];
|
||||
u32 ctx_dma;
|
||||
struct flcn_u64 code_dma_base;
|
||||
u32 non_sec_code_off;
|
||||
u32 non_sec_code_size;
|
||||
u32 sec_code_off;
|
||||
u32 sec_code_size;
|
||||
u32 code_entry_point;
|
||||
struct flcn_u64 data_dma_base;
|
||||
u32 data_size;
|
||||
u32 argc;
|
||||
u32 argv;
|
||||
};
|
||||
|
||||
void acr_r370_generate_hs_bl_desc(const struct hsf_load_header *, void *, u64);
|
||||
extern const struct acr_r352_ls_func acr_r370_ls_fecs_func;
|
||||
extern const struct acr_r352_ls_func acr_r370_ls_gpccs_func;
|
||||
#endif
|
|
@ -20,90 +20,12 @@
|
|||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "acr_r370.h"
|
||||
#include "acr_r367.h"
|
||||
|
||||
#include <engine/falcon.h>
|
||||
#include <core/msgqueue.h>
|
||||
#include <subdev/pmu.h>
|
||||
|
||||
/*
|
||||
* r375 ACR: similar to r367, but with a unified bootloader descriptor
|
||||
* structure for GR and PMU falcons.
|
||||
*/
|
||||
|
||||
/* Same as acr_r361_flcn_bl_desc, plus argc/argv */
|
||||
struct acr_r375_flcn_bl_desc {
|
||||
u32 reserved[4];
|
||||
u32 signature[4];
|
||||
u32 ctx_dma;
|
||||
struct flcn_u64 code_dma_base;
|
||||
u32 non_sec_code_off;
|
||||
u32 non_sec_code_size;
|
||||
u32 sec_code_off;
|
||||
u32 sec_code_size;
|
||||
u32 code_entry_point;
|
||||
struct flcn_u64 data_dma_base;
|
||||
u32 data_size;
|
||||
u32 argc;
|
||||
u32 argv;
|
||||
};
|
||||
|
||||
static void
|
||||
acr_r375_generate_flcn_bl_desc(const struct nvkm_acr *acr,
|
||||
const struct ls_ucode_img *img, u64 wpr_addr,
|
||||
void *_desc)
|
||||
{
|
||||
struct acr_r375_flcn_bl_desc *desc = _desc;
|
||||
const struct ls_ucode_img_desc *pdesc = &img->ucode_desc;
|
||||
u64 base, addr_code, addr_data;
|
||||
|
||||
base = wpr_addr + img->ucode_off + pdesc->app_start_offset;
|
||||
addr_code = base + pdesc->app_resident_code_offset;
|
||||
addr_data = base + pdesc->app_resident_data_offset;
|
||||
|
||||
desc->ctx_dma = FALCON_DMAIDX_UCODE;
|
||||
desc->code_dma_base = u64_to_flcn64(addr_code);
|
||||
desc->non_sec_code_off = pdesc->app_resident_code_offset;
|
||||
desc->non_sec_code_size = pdesc->app_resident_code_size;
|
||||
desc->code_entry_point = pdesc->app_imem_entry;
|
||||
desc->data_dma_base = u64_to_flcn64(addr_data);
|
||||
desc->data_size = pdesc->app_resident_data_size;
|
||||
}
|
||||
|
||||
static void
|
||||
acr_r375_generate_hs_bl_desc(const struct hsf_load_header *hdr, void *_bl_desc,
|
||||
u64 offset)
|
||||
{
|
||||
struct acr_r375_flcn_bl_desc *bl_desc = _bl_desc;
|
||||
|
||||
bl_desc->ctx_dma = FALCON_DMAIDX_VIRT;
|
||||
bl_desc->non_sec_code_off = hdr->non_sec_code_off;
|
||||
bl_desc->non_sec_code_size = hdr->non_sec_code_size;
|
||||
bl_desc->sec_code_off = hsf_load_header_app_off(hdr, 0);
|
||||
bl_desc->sec_code_size = hsf_load_header_app_size(hdr, 0);
|
||||
bl_desc->code_entry_point = 0;
|
||||
bl_desc->code_dma_base = u64_to_flcn64(offset);
|
||||
bl_desc->data_dma_base = u64_to_flcn64(offset + hdr->data_dma_base);
|
||||
bl_desc->data_size = hdr->data_size;
|
||||
}
|
||||
|
||||
const struct acr_r352_ls_func
|
||||
acr_r375_ls_fecs_func = {
|
||||
.load = acr_ls_ucode_load_fecs,
|
||||
.generate_bl_desc = acr_r375_generate_flcn_bl_desc,
|
||||
.bl_desc_size = sizeof(struct acr_r375_flcn_bl_desc),
|
||||
};
|
||||
|
||||
const struct acr_r352_ls_func
|
||||
acr_r375_ls_gpccs_func = {
|
||||
.load = acr_ls_ucode_load_gpccs,
|
||||
.generate_bl_desc = acr_r375_generate_flcn_bl_desc,
|
||||
.bl_desc_size = sizeof(struct acr_r375_flcn_bl_desc),
|
||||
/* GPCCS will be loaded using PRI */
|
||||
.lhdr_flags = LSF_FLAG_FORCE_PRIV_LOAD,
|
||||
};
|
||||
|
||||
|
||||
static void
|
||||
acr_r375_generate_pmu_bl_desc(const struct nvkm_acr *acr,
|
||||
const struct ls_ucode_img *img, u64 wpr_addr,
|
||||
|
@ -111,7 +33,7 @@ acr_r375_generate_pmu_bl_desc(const struct nvkm_acr *acr,
|
|||
{
|
||||
const struct ls_ucode_img_desc *pdesc = &img->ucode_desc;
|
||||
const struct nvkm_pmu *pmu = acr->subdev->device->pmu;
|
||||
struct acr_r375_flcn_bl_desc *desc = _desc;
|
||||
struct acr_r370_flcn_bl_desc *desc = _desc;
|
||||
u64 base, addr_code, addr_data;
|
||||
u32 addr_args;
|
||||
|
||||
|
@ -136,23 +58,22 @@ const struct acr_r352_ls_func
|
|||
acr_r375_ls_pmu_func = {
|
||||
.load = acr_ls_ucode_load_pmu,
|
||||
.generate_bl_desc = acr_r375_generate_pmu_bl_desc,
|
||||
.bl_desc_size = sizeof(struct acr_r375_flcn_bl_desc),
|
||||
.bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc),
|
||||
.post_run = acr_ls_pmu_post_run,
|
||||
};
|
||||
|
||||
|
||||
const struct acr_r352_func
|
||||
acr_r375_func = {
|
||||
.fixup_hs_desc = acr_r367_fixup_hs_desc,
|
||||
.generate_hs_bl_desc = acr_r375_generate_hs_bl_desc,
|
||||
.hs_bl_desc_size = sizeof(struct acr_r375_flcn_bl_desc),
|
||||
.generate_hs_bl_desc = acr_r370_generate_hs_bl_desc,
|
||||
.hs_bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc),
|
||||
.shadow_blob = true,
|
||||
.ls_ucode_img_load = acr_r367_ls_ucode_img_load,
|
||||
.ls_fill_headers = acr_r367_ls_fill_headers,
|
||||
.ls_write_wpr = acr_r367_ls_write_wpr,
|
||||
.ls_func = {
|
||||
[NVKM_SECBOOT_FALCON_FECS] = &acr_r375_ls_fecs_func,
|
||||
[NVKM_SECBOOT_FALCON_GPCCS] = &acr_r375_ls_gpccs_func,
|
||||
[NVKM_SECBOOT_FALCON_FECS] = &acr_r370_ls_fecs_func,
|
||||
[NVKM_SECBOOT_FALCON_GPCCS] = &acr_r370_ls_gpccs_func,
|
||||
[NVKM_SECBOOT_FALCON_PMU] = &acr_r375_ls_pmu_func,
|
||||
},
|
||||
};
|
||||
|
|
|
@ -133,7 +133,7 @@ gp102_secboot_run_blob(struct nvkm_secboot *sb, struct nvkm_gpuobj *blob,
|
|||
return gm200_secboot_run_blob(sb, blob, falcon);
|
||||
}
|
||||
|
||||
static const struct nvkm_secboot_func
|
||||
const struct nvkm_secboot_func
|
||||
gp102_secboot = {
|
||||
.dtor = gm200_secboot_dtor,
|
||||
.oneinit = gm200_secboot_oneinit,
|
||||
|
|
|
@ -0,0 +1,67 @@
|
|||
/*
|
||||
* Copyright 2017 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
#include "gm200.h"
|
||||
#include "acr.h"
|
||||
|
||||
int
|
||||
gp108_secboot_new(struct nvkm_device *device, int index,
|
||||
struct nvkm_secboot **psb)
|
||||
{
|
||||
struct gm200_secboot *gsb;
|
||||
struct nvkm_acr *acr;
|
||||
|
||||
acr = acr_r370_new(NVKM_SECBOOT_FALCON_SEC2,
|
||||
BIT(NVKM_SECBOOT_FALCON_FECS) |
|
||||
BIT(NVKM_SECBOOT_FALCON_GPCCS) |
|
||||
BIT(NVKM_SECBOOT_FALCON_SEC2));
|
||||
if (IS_ERR(acr))
|
||||
return PTR_ERR(acr);
|
||||
|
||||
if (!(gsb = kzalloc(sizeof(*gsb), GFP_KERNEL))) {
|
||||
acr->func->dtor(acr);
|
||||
return -ENOMEM;
|
||||
}
|
||||
*psb = &gsb->base;
|
||||
|
||||
return nvkm_secboot_ctor(&gp102_secboot, acr, device, index, &gsb->base);
|
||||
}
|
||||
|
||||
MODULE_FIRMWARE("nvidia/gp108/acr/bl.bin");
|
||||
MODULE_FIRMWARE("nvidia/gp108/acr/unload_bl.bin");
|
||||
MODULE_FIRMWARE("nvidia/gp108/acr/ucode_load.bin");
|
||||
MODULE_FIRMWARE("nvidia/gp108/acr/ucode_unload.bin");
|
||||
MODULE_FIRMWARE("nvidia/gp108/gr/fecs_bl.bin");
|
||||
MODULE_FIRMWARE("nvidia/gp108/gr/fecs_inst.bin");
|
||||
MODULE_FIRMWARE("nvidia/gp108/gr/fecs_data.bin");
|
||||
MODULE_FIRMWARE("nvidia/gp108/gr/fecs_sig.bin");
|
||||
MODULE_FIRMWARE("nvidia/gp108/gr/gpccs_bl.bin");
|
||||
MODULE_FIRMWARE("nvidia/gp108/gr/gpccs_inst.bin");
|
||||
MODULE_FIRMWARE("nvidia/gp108/gr/gpccs_data.bin");
|
||||
MODULE_FIRMWARE("nvidia/gp108/gr/gpccs_sig.bin");
|
||||
MODULE_FIRMWARE("nvidia/gp108/gr/sw_ctx.bin");
|
||||
MODULE_FIRMWARE("nvidia/gp108/gr/sw_nonctx.bin");
|
||||
MODULE_FIRMWARE("nvidia/gp108/gr/sw_bundle_init.bin");
|
||||
MODULE_FIRMWARE("nvidia/gp108/gr/sw_method_init.bin");
|
||||
MODULE_FIRMWARE("nvidia/gp108/nvdec/scrubber.bin");
|
||||
MODULE_FIRMWARE("nvidia/gp108/sec2/desc.bin");
|
||||
MODULE_FIRMWARE("nvidia/gp108/sec2/image.bin");
|
||||
MODULE_FIRMWARE("nvidia/gp108/sec2/sig.bin");
|
|
@ -40,6 +40,8 @@ int nvkm_secboot_ctor(const struct nvkm_secboot_func *, struct nvkm_acr *,
|
|||
int nvkm_secboot_falcon_reset(struct nvkm_secboot *);
|
||||
int nvkm_secboot_falcon_run(struct nvkm_secboot *);
|
||||
|
||||
extern const struct nvkm_secboot_func gp102_secboot;
|
||||
|
||||
struct flcn_u64 {
|
||||
u32 lo;
|
||||
u32 hi;
|
||||
|
|
|
@ -9,7 +9,9 @@ nvkm-y += nvkm/subdev/therm/nv40.o
|
|||
nvkm-y += nvkm/subdev/therm/nv50.o
|
||||
nvkm-y += nvkm/subdev/therm/g84.o
|
||||
nvkm-y += nvkm/subdev/therm/gt215.o
|
||||
nvkm-y += nvkm/subdev/therm/gf100.o
|
||||
nvkm-y += nvkm/subdev/therm/gf119.o
|
||||
nvkm-y += nvkm/subdev/therm/gk104.o
|
||||
nvkm-y += nvkm/subdev/therm/gm107.o
|
||||
nvkm-y += nvkm/subdev/therm/gm200.o
|
||||
nvkm-y += nvkm/subdev/therm/gp100.o
|
||||
|
|
|
@ -21,6 +21,7 @@
|
|||
*
|
||||
* Authors: Martin Peres
|
||||
*/
|
||||
#include <nvkm/core/option.h>
|
||||
#include "priv.h"
|
||||
|
||||
int
|
||||
|
@ -297,6 +298,38 @@ nvkm_therm_attr_set(struct nvkm_therm *therm,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
void
|
||||
nvkm_therm_clkgate_enable(struct nvkm_therm *therm)
|
||||
{
|
||||
if (!therm->func->clkgate_enable || !therm->clkgating_enabled)
|
||||
return;
|
||||
|
||||
nvkm_debug(&therm->subdev,
|
||||
"Enabling clockgating\n");
|
||||
therm->func->clkgate_enable(therm);
|
||||
}
|
||||
|
||||
void
|
||||
nvkm_therm_clkgate_fini(struct nvkm_therm *therm, bool suspend)
|
||||
{
|
||||
if (!therm->func->clkgate_fini || !therm->clkgating_enabled)
|
||||
return;
|
||||
|
||||
nvkm_debug(&therm->subdev,
|
||||
"Preparing clockgating for %s\n",
|
||||
suspend ? "suspend" : "fini");
|
||||
therm->func->clkgate_fini(therm, suspend);
|
||||
}
|
||||
|
||||
static void
|
||||
nvkm_therm_clkgate_oneinit(struct nvkm_therm *therm)
|
||||
{
|
||||
if (!therm->func->clkgate_enable || !therm->clkgating_enabled)
|
||||
return;
|
||||
|
||||
nvkm_info(&therm->subdev, "Clockgating enabled\n");
|
||||
}
|
||||
|
||||
static void
|
||||
nvkm_therm_intr(struct nvkm_subdev *subdev)
|
||||
{
|
||||
|
@ -333,6 +366,7 @@ nvkm_therm_oneinit(struct nvkm_subdev *subdev)
|
|||
nvkm_therm_fan_ctor(therm);
|
||||
nvkm_therm_fan_mode(therm, NVKM_THERM_CTRL_AUTO);
|
||||
nvkm_therm_sensor_preinit(therm);
|
||||
nvkm_therm_clkgate_oneinit(therm);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -357,6 +391,16 @@ nvkm_therm_init(struct nvkm_subdev *subdev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
nvkm_therm_clkgate_init(struct nvkm_therm *therm,
|
||||
const struct nvkm_therm_clkgate_pack *p)
|
||||
{
|
||||
if (!therm->func->clkgate_init || !therm->clkgating_enabled)
|
||||
return;
|
||||
|
||||
therm->func->clkgate_init(therm, p);
|
||||
}
|
||||
|
||||
static void *
|
||||
nvkm_therm_dtor(struct nvkm_subdev *subdev)
|
||||
{
|
||||
|
@ -374,15 +418,10 @@ nvkm_therm = {
|
|||
.intr = nvkm_therm_intr,
|
||||
};
|
||||
|
||||
int
|
||||
nvkm_therm_new_(const struct nvkm_therm_func *func, struct nvkm_device *device,
|
||||
int index, struct nvkm_therm **ptherm)
|
||||
void
|
||||
nvkm_therm_ctor(struct nvkm_therm *therm, struct nvkm_device *device,
|
||||
int index, const struct nvkm_therm_func *func)
|
||||
{
|
||||
struct nvkm_therm *therm;
|
||||
|
||||
if (!(therm = *ptherm = kzalloc(sizeof(*therm), GFP_KERNEL)))
|
||||
return -ENOMEM;
|
||||
|
||||
nvkm_subdev_ctor(&nvkm_therm, device, index, &therm->subdev);
|
||||
therm->func = func;
|
||||
|
||||
|
@ -395,5 +434,20 @@ nvkm_therm_new_(const struct nvkm_therm_func *func, struct nvkm_device *device,
|
|||
therm->attr_get = nvkm_therm_attr_get;
|
||||
therm->attr_set = nvkm_therm_attr_set;
|
||||
therm->mode = therm->suspend = -1; /* undefined */
|
||||
|
||||
therm->clkgating_enabled = nvkm_boolopt(device->cfgopt,
|
||||
"NvPmEnableGating", false);
|
||||
}
|
||||
|
||||
int
|
||||
nvkm_therm_new_(const struct nvkm_therm_func *func, struct nvkm_device *device,
|
||||
int index, struct nvkm_therm **ptherm)
|
||||
{
|
||||
struct nvkm_therm *therm;
|
||||
|
||||
if (!(therm = *ptherm = kzalloc(sizeof(*therm), GFP_KERNEL)))
|
||||
return -ENOMEM;
|
||||
|
||||
nvkm_therm_ctor(therm, device, index, func);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -0,0 +1,58 @@
|
|||
/*
|
||||
* Copyright 2018 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Lyude Paul
|
||||
*/
|
||||
#include <core/device.h>
|
||||
|
||||
#include "priv.h"
|
||||
|
||||
#define pack_for_each_init(init, pack, head) \
|
||||
for (pack = head; pack && pack->init; pack++) \
|
||||
for (init = pack->init; init && init->count; init++)
|
||||
void
|
||||
gf100_clkgate_init(struct nvkm_therm *therm,
|
||||
const struct nvkm_therm_clkgate_pack *p)
|
||||
{
|
||||
struct nvkm_device *device = therm->subdev.device;
|
||||
const struct nvkm_therm_clkgate_pack *pack;
|
||||
const struct nvkm_therm_clkgate_init *init;
|
||||
u32 next, addr;
|
||||
|
||||
pack_for_each_init(init, pack, p) {
|
||||
next = init->addr + init->count * 8;
|
||||
addr = init->addr;
|
||||
|
||||
nvkm_trace(&therm->subdev, "{ 0x%06x, %d, 0x%08x }\n",
|
||||
init->addr, init->count, init->data);
|
||||
while (addr < next) {
|
||||
nvkm_trace(&therm->subdev, "\t0x%06x = 0x%08x\n",
|
||||
addr, init->data);
|
||||
nvkm_wr32(device, addr, init->data);
|
||||
addr += 8;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* TODO: Fermi clockgating isn't understood fully yet, so we don't specify any
|
||||
* clockgate functions to use
|
||||
*/
|
|
@ -0,0 +1,35 @@
|
|||
/*
|
||||
* Copyright 2018 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Lyude Paul
|
||||
*/
|
||||
|
||||
#ifndef __GF100_THERM_H__
|
||||
#define __GF100_THERM_H__
|
||||
|
||||
#include <core/device.h>
|
||||
|
||||
struct gf100_idle_filter {
|
||||
u32 fecs;
|
||||
u32 hubmmu;
|
||||
};
|
||||
|
||||
#endif
|
|
@ -49,7 +49,7 @@ pwm_info(struct nvkm_therm *therm, int line)
|
|||
return -ENODEV;
|
||||
}
|
||||
|
||||
static int
|
||||
int
|
||||
gf119_fan_pwm_ctrl(struct nvkm_therm *therm, int line, bool enable)
|
||||
{
|
||||
struct nvkm_device *device = therm->subdev.device;
|
||||
|
@ -63,7 +63,7 @@ gf119_fan_pwm_ctrl(struct nvkm_therm *therm, int line, bool enable)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
int
|
||||
gf119_fan_pwm_get(struct nvkm_therm *therm, int line, u32 *divs, u32 *duty)
|
||||
{
|
||||
struct nvkm_device *device = therm->subdev.device;
|
||||
|
@ -85,7 +85,7 @@ gf119_fan_pwm_get(struct nvkm_therm *therm, int line, u32 *divs, u32 *duty)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int
|
||||
int
|
||||
gf119_fan_pwm_set(struct nvkm_therm *therm, int line, u32 divs, u32 duty)
|
||||
{
|
||||
struct nvkm_device *device = therm->subdev.device;
|
||||
|
@ -102,7 +102,7 @@ gf119_fan_pwm_set(struct nvkm_therm *therm, int line, u32 divs, u32 duty)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
int
|
||||
gf119_fan_pwm_clock(struct nvkm_therm *therm, int line)
|
||||
{
|
||||
struct nvkm_device *device = therm->subdev.device;
|
||||
|
|
|
@ -0,0 +1,136 @@
|
|||
/*
|
||||
* Copyright 2018 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Lyude Paul
|
||||
*/
|
||||
#include <core/device.h>
|
||||
|
||||
#include "priv.h"
|
||||
#include "gk104.h"
|
||||
|
||||
void
|
||||
gk104_clkgate_enable(struct nvkm_therm *base)
|
||||
{
|
||||
struct gk104_therm *therm = gk104_therm(base);
|
||||
struct nvkm_device *dev = therm->base.subdev.device;
|
||||
const struct gk104_clkgate_engine_info *order = therm->clkgate_order;
|
||||
int i;
|
||||
|
||||
/* Program ENG_MANT, ENG_FILTER */
|
||||
for (i = 0; order[i].engine != NVKM_SUBDEV_NR; i++) {
|
||||
if (!nvkm_device_subdev(dev, order[i].engine))
|
||||
continue;
|
||||
|
||||
nvkm_mask(dev, 0x20200 + order[i].offset, 0xff00, 0x4500);
|
||||
}
|
||||
|
||||
/* magic */
|
||||
nvkm_wr32(dev, 0x020288, therm->idle_filter->fecs);
|
||||
nvkm_wr32(dev, 0x02028c, therm->idle_filter->hubmmu);
|
||||
|
||||
/* Enable clockgating (ENG_CLK = RUN->AUTO) */
|
||||
for (i = 0; order[i].engine != NVKM_SUBDEV_NR; i++) {
|
||||
if (!nvkm_device_subdev(dev, order[i].engine))
|
||||
continue;
|
||||
|
||||
nvkm_mask(dev, 0x20200 + order[i].offset, 0x00ff, 0x0045);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
gk104_clkgate_fini(struct nvkm_therm *base, bool suspend)
|
||||
{
|
||||
struct gk104_therm *therm = gk104_therm(base);
|
||||
struct nvkm_device *dev = therm->base.subdev.device;
|
||||
const struct gk104_clkgate_engine_info *order = therm->clkgate_order;
|
||||
int i;
|
||||
|
||||
/* ENG_CLK = AUTO->RUN, ENG_PWR = RUN->AUTO */
|
||||
for (i = 0; order[i].engine != NVKM_SUBDEV_NR; i++) {
|
||||
if (!nvkm_device_subdev(dev, order[i].engine))
|
||||
continue;
|
||||
|
||||
nvkm_mask(dev, 0x20200 + order[i].offset, 0xff, 0x54);
|
||||
}
|
||||
}
|
||||
|
||||
const struct gk104_clkgate_engine_info gk104_clkgate_engine_info[] = {
|
||||
{ NVKM_ENGINE_GR, 0x00 },
|
||||
{ NVKM_ENGINE_MSPDEC, 0x04 },
|
||||
{ NVKM_ENGINE_MSPPP, 0x08 },
|
||||
{ NVKM_ENGINE_MSVLD, 0x0c },
|
||||
{ NVKM_ENGINE_CE0, 0x10 },
|
||||
{ NVKM_ENGINE_CE1, 0x14 },
|
||||
{ NVKM_ENGINE_MSENC, 0x18 },
|
||||
{ NVKM_ENGINE_CE2, 0x1c },
|
||||
{ NVKM_SUBDEV_NR, 0 },
|
||||
};
|
||||
|
||||
const struct gf100_idle_filter gk104_idle_filter = {
|
||||
.fecs = 0x00001000,
|
||||
.hubmmu = 0x00001000,
|
||||
};
|
||||
|
||||
static const struct nvkm_therm_func
|
||||
gk104_therm_func = {
|
||||
.init = gf119_therm_init,
|
||||
.fini = g84_therm_fini,
|
||||
.pwm_ctrl = gf119_fan_pwm_ctrl,
|
||||
.pwm_get = gf119_fan_pwm_get,
|
||||
.pwm_set = gf119_fan_pwm_set,
|
||||
.pwm_clock = gf119_fan_pwm_clock,
|
||||
.temp_get = g84_temp_get,
|
||||
.fan_sense = gt215_therm_fan_sense,
|
||||
.program_alarms = nvkm_therm_program_alarms_polling,
|
||||
.clkgate_init = gf100_clkgate_init,
|
||||
.clkgate_enable = gk104_clkgate_enable,
|
||||
.clkgate_fini = gk104_clkgate_fini,
|
||||
};
|
||||
|
||||
static int
|
||||
gk104_therm_new_(const struct nvkm_therm_func *func,
|
||||
struct nvkm_device *device,
|
||||
int index,
|
||||
const struct gk104_clkgate_engine_info *clkgate_order,
|
||||
const struct gf100_idle_filter *idle_filter,
|
||||
struct nvkm_therm **ptherm)
|
||||
{
|
||||
struct gk104_therm *therm = kzalloc(sizeof(*therm), GFP_KERNEL);
|
||||
|
||||
if (!therm)
|
||||
return -ENOMEM;
|
||||
|
||||
nvkm_therm_ctor(&therm->base, device, index, func);
|
||||
*ptherm = &therm->base;
|
||||
therm->clkgate_order = clkgate_order;
|
||||
therm->idle_filter = idle_filter;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
gk104_therm_new(struct nvkm_device *device,
|
||||
int index, struct nvkm_therm **ptherm)
|
||||
{
|
||||
return gk104_therm_new_(&gk104_therm_func, device, index,
|
||||
gk104_clkgate_engine_info, &gk104_idle_filter,
|
||||
ptherm);
|
||||
}
|
|
@ -0,0 +1,48 @@
|
|||
/*
|
||||
* Copyright 2018 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Lyude Paul
|
||||
*/
|
||||
|
||||
#ifndef __GK104_THERM_H__
|
||||
#define __GK104_THERM_H__
|
||||
#define gk104_therm(p) (container_of((p), struct gk104_therm, base))
|
||||
|
||||
#include <subdev/therm.h>
|
||||
#include "priv.h"
|
||||
#include "gf100.h"
|
||||
|
||||
struct gk104_clkgate_engine_info {
|
||||
enum nvkm_devidx engine;
|
||||
u8 offset;
|
||||
};
|
||||
|
||||
struct gk104_therm {
|
||||
struct nvkm_therm base;
|
||||
|
||||
const struct gk104_clkgate_engine_info *clkgate_order;
|
||||
const struct gf100_idle_filter *idle_filter;
|
||||
};
|
||||
|
||||
extern const struct gk104_clkgate_engine_info gk104_clkgate_engine_info[];
|
||||
extern const struct gf100_idle_filter gk104_idle_filter;
|
||||
|
||||
#endif
|
|
@ -36,7 +36,7 @@ gt215_therm_fan_sense(struct nvkm_therm *therm)
|
|||
return -ENODEV;
|
||||
}
|
||||
|
||||
static void
|
||||
void
|
||||
gt215_therm_init(struct nvkm_therm *therm)
|
||||
{
|
||||
struct nvkm_device *device = therm->subdev.device;
|
||||
|
|
|
@ -32,6 +32,8 @@
|
|||
|
||||
int nvkm_therm_new_(const struct nvkm_therm_func *, struct nvkm_device *,
|
||||
int index, struct nvkm_therm **);
|
||||
void nvkm_therm_ctor(struct nvkm_therm *therm, struct nvkm_device *device,
|
||||
int index, const struct nvkm_therm_func *func);
|
||||
|
||||
struct nvkm_fan {
|
||||
struct nvkm_therm *parent;
|
||||
|
@ -66,8 +68,6 @@ int nvkm_therm_fan_set(struct nvkm_therm *, bool now, int percent);
|
|||
int nvkm_therm_fan_user_get(struct nvkm_therm *);
|
||||
int nvkm_therm_fan_user_set(struct nvkm_therm *, int percent);
|
||||
|
||||
int nvkm_therm_preinit(struct nvkm_therm *);
|
||||
|
||||
int nvkm_therm_sensor_init(struct nvkm_therm *);
|
||||
int nvkm_therm_sensor_fini(struct nvkm_therm *, bool suspend);
|
||||
void nvkm_therm_sensor_preinit(struct nvkm_therm *);
|
||||
|
@ -96,6 +96,11 @@ struct nvkm_therm_func {
|
|||
int (*fan_sense)(struct nvkm_therm *);
|
||||
|
||||
void (*program_alarms)(struct nvkm_therm *);
|
||||
|
||||
void (*clkgate_init)(struct nvkm_therm *,
|
||||
const struct nvkm_therm_clkgate_pack *);
|
||||
void (*clkgate_enable)(struct nvkm_therm *);
|
||||
void (*clkgate_fini)(struct nvkm_therm *, bool);
|
||||
};
|
||||
|
||||
void nv40_therm_intr(struct nvkm_therm *);
|
||||
|
@ -111,9 +116,21 @@ void g84_therm_fini(struct nvkm_therm *);
|
|||
|
||||
int gt215_therm_fan_sense(struct nvkm_therm *);
|
||||
|
||||
void gf100_clkgate_init(struct nvkm_therm *,
|
||||
const struct nvkm_therm_clkgate_pack *);
|
||||
|
||||
void g84_therm_init(struct nvkm_therm *);
|
||||
|
||||
int gf119_fan_pwm_ctrl(struct nvkm_therm *, int, bool);
|
||||
int gf119_fan_pwm_get(struct nvkm_therm *, int, u32 *, u32 *);
|
||||
int gf119_fan_pwm_set(struct nvkm_therm *, int, u32, u32);
|
||||
int gf119_fan_pwm_clock(struct nvkm_therm *, int);
|
||||
void gf119_therm_init(struct nvkm_therm *);
|
||||
|
||||
void gk104_therm_init(struct nvkm_therm *);
|
||||
void gk104_clkgate_enable(struct nvkm_therm *);
|
||||
void gk104_clkgate_fini(struct nvkm_therm *, bool);
|
||||
|
||||
int nvkm_fanpwm_create(struct nvkm_therm *, struct dcb_gpio_func *);
|
||||
int nvkm_fantog_create(struct nvkm_therm *, struct dcb_gpio_func *);
|
||||
int nvkm_fannil_create(struct nvkm_therm *);
|
||||
|
|
|
@ -995,7 +995,7 @@ int radeon_uvd_calc_upll_dividers(struct radeon_device *rdev,
|
|||
/* calc dclk divider with current vco freq */
|
||||
dclk_div = radeon_uvd_calc_upll_post_div(vco_freq, dclk,
|
||||
pd_min, pd_even);
|
||||
if (vclk_div > pd_max)
|
||||
if (dclk_div > pd_max)
|
||||
break; /* vco is too big, it has to stop */
|
||||
|
||||
/* calc score with current vco freq */
|
||||
|
|
|
@ -1727,7 +1727,7 @@ int ttm_bo_swapout(struct ttm_bo_global *glob, struct ttm_operation_ctx *ctx)
|
|||
kref_get(&bo->list_kref);
|
||||
|
||||
if (!list_empty(&bo->ddestroy)) {
|
||||
ret = ttm_bo_cleanup_refs(bo, false, false, true);
|
||||
ret = ttm_bo_cleanup_refs(bo, false, false, locked);
|
||||
kref_put(&bo->list_kref, ttm_bo_release_list);
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -316,7 +316,7 @@ static void ttm_bo_vm_close(struct vm_area_struct *vma)
|
|||
|
||||
static int ttm_bo_vm_access_kmap(struct ttm_buffer_object *bo,
|
||||
unsigned long offset,
|
||||
void *buf, int len, int write)
|
||||
uint8_t *buf, int len, int write)
|
||||
{
|
||||
unsigned long page = offset >> PAGE_SHIFT;
|
||||
unsigned long bytes_left = len;
|
||||
|
@ -345,6 +345,7 @@ static int ttm_bo_vm_access_kmap(struct ttm_buffer_object *bo,
|
|||
ttm_bo_kunmap(&map);
|
||||
|
||||
page++;
|
||||
buf += bytes;
|
||||
bytes_left -= bytes;
|
||||
offset = 0;
|
||||
} while (bytes_left);
|
||||
|
|
Loading…
Reference in New Issue