Merge branch 'linux-4.15' of git://github.com/skeggsb/linux into drm-fixes
nouveau regression fixes, and some minor fixes. * 'linux-4.15' of git://github.com/skeggsb/linux: drm/nouveau: use alternate memory type for system-memory buffers with kind != 0 drm/nouveau: avoid GPU page sizes > PAGE_SIZE for buffer objects in host memory drm/nouveau/mmu/gp10b: use correct implementation drm/nouveau/pci: do a msi rearm on init drm/nouveau/imem/nv50: fix refcount_t warning drm/nouveau/bios/dp: support DP Info Table 2.0 drm/nouveau/fbcon: fix NULL pointer access in nouveau_fbcon_destroy
This commit is contained in:
commit
51b83e1428
|
@ -224,7 +224,7 @@ nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
|
|||
/* Determine if we can get a cache-coherent map, forcing
|
||||
* uncached mapping if we can't.
|
||||
*/
|
||||
if (mmu->type[drm->ttm.type_host].type & NVIF_MEM_UNCACHED)
|
||||
if (!nouveau_drm_use_coherent_gpu_mapping(drm))
|
||||
nvbo->force_coherent = true;
|
||||
}
|
||||
|
||||
|
@ -262,7 +262,8 @@ nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
|
|||
if (cli->device.info.family > NV_DEVICE_INFO_V0_CURIE &&
|
||||
(flags & TTM_PL_FLAG_VRAM) && !vmm->page[i].vram)
|
||||
continue;
|
||||
if ((flags & TTM_PL_FLAG_TT ) && !vmm->page[i].host)
|
||||
if ((flags & TTM_PL_FLAG_TT) &&
|
||||
(!vmm->page[i].host || vmm->page[i].shift > PAGE_SHIFT))
|
||||
continue;
|
||||
|
||||
/* Select this page size if it's the first that supports
|
||||
|
|
|
@ -157,8 +157,8 @@ struct nouveau_drm {
|
|||
struct nvif_object copy;
|
||||
int mtrr;
|
||||
int type_vram;
|
||||
int type_host;
|
||||
int type_ncoh;
|
||||
int type_host[2];
|
||||
int type_ncoh[2];
|
||||
} ttm;
|
||||
|
||||
/* GEM interface support */
|
||||
|
@ -217,6 +217,13 @@ nouveau_drm(struct drm_device *dev)
|
|||
return dev->dev_private;
|
||||
}
|
||||
|
||||
static inline bool
|
||||
nouveau_drm_use_coherent_gpu_mapping(struct nouveau_drm *drm)
|
||||
{
|
||||
struct nvif_mmu *mmu = &drm->client.mmu;
|
||||
return !(mmu->type[drm->ttm.type_host[0]].type & NVIF_MEM_UNCACHED);
|
||||
}
|
||||
|
||||
int nouveau_pmops_suspend(struct device *);
|
||||
int nouveau_pmops_resume(struct device *);
|
||||
bool nouveau_pmops_runtime(void);
|
||||
|
|
|
@ -429,7 +429,7 @@ nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *fbcon)
|
|||
drm_fb_helper_unregister_fbi(&fbcon->helper);
|
||||
drm_fb_helper_fini(&fbcon->helper);
|
||||
|
||||
if (nouveau_fb->nvbo) {
|
||||
if (nouveau_fb && nouveau_fb->nvbo) {
|
||||
nouveau_vma_del(&nouveau_fb->vma);
|
||||
nouveau_bo_unmap(nouveau_fb->nvbo);
|
||||
nouveau_bo_unpin(nouveau_fb->nvbo);
|
||||
|
|
|
@ -103,10 +103,10 @@ nouveau_mem_host(struct ttm_mem_reg *reg, struct ttm_dma_tt *tt)
|
|||
u8 type;
|
||||
int ret;
|
||||
|
||||
if (mmu->type[drm->ttm.type_host].type & NVIF_MEM_UNCACHED)
|
||||
type = drm->ttm.type_ncoh;
|
||||
if (!nouveau_drm_use_coherent_gpu_mapping(drm))
|
||||
type = drm->ttm.type_ncoh[!!mem->kind];
|
||||
else
|
||||
type = drm->ttm.type_host;
|
||||
type = drm->ttm.type_host[0];
|
||||
|
||||
if (mem->kind && !(mmu->type[type].type & NVIF_MEM_KIND))
|
||||
mem->comp = mem->kind = 0;
|
||||
|
|
|
@ -235,6 +235,27 @@ nouveau_ttm_global_release(struct nouveau_drm *drm)
|
|||
drm->ttm.mem_global_ref.release = NULL;
|
||||
}
|
||||
|
||||
static int
|
||||
nouveau_ttm_init_host(struct nouveau_drm *drm, u8 kind)
|
||||
{
|
||||
struct nvif_mmu *mmu = &drm->client.mmu;
|
||||
int typei;
|
||||
|
||||
typei = nvif_mmu_type(mmu, NVIF_MEM_HOST | NVIF_MEM_MAPPABLE |
|
||||
kind | NVIF_MEM_COHERENT);
|
||||
if (typei < 0)
|
||||
return -ENOSYS;
|
||||
|
||||
drm->ttm.type_host[!!kind] = typei;
|
||||
|
||||
typei = nvif_mmu_type(mmu, NVIF_MEM_HOST | NVIF_MEM_MAPPABLE | kind);
|
||||
if (typei < 0)
|
||||
return -ENOSYS;
|
||||
|
||||
drm->ttm.type_ncoh[!!kind] = typei;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
nouveau_ttm_init(struct nouveau_drm *drm)
|
||||
{
|
||||
|
@ -244,18 +265,16 @@ nouveau_ttm_init(struct nouveau_drm *drm)
|
|||
struct drm_device *dev = drm->dev;
|
||||
int typei, ret;
|
||||
|
||||
typei = nvif_mmu_type(mmu, NVIF_MEM_HOST | NVIF_MEM_MAPPABLE |
|
||||
NVIF_MEM_COHERENT);
|
||||
if (typei < 0)
|
||||
return -ENOSYS;
|
||||
ret = nouveau_ttm_init_host(drm, 0);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
drm->ttm.type_host = typei;
|
||||
|
||||
typei = nvif_mmu_type(mmu, NVIF_MEM_HOST | NVIF_MEM_MAPPABLE);
|
||||
if (typei < 0)
|
||||
return -ENOSYS;
|
||||
|
||||
drm->ttm.type_ncoh = typei;
|
||||
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA &&
|
||||
drm->client.device.info.chipset != 0x50) {
|
||||
ret = nouveau_ttm_init_host(drm, NVIF_MEM_KIND);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (drm->client.device.info.platform != NV_DEVICE_INFO_V0_SOC &&
|
||||
drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
|
||||
|
|
|
@ -2369,7 +2369,7 @@ nv13b_chipset = {
|
|||
.imem = gk20a_instmem_new,
|
||||
.ltc = gp100_ltc_new,
|
||||
.mc = gp10b_mc_new,
|
||||
.mmu = gf100_mmu_new,
|
||||
.mmu = gp10b_mmu_new,
|
||||
.secboot = gp10b_secboot_new,
|
||||
.pmu = gm20b_pmu_new,
|
||||
.timer = gk20a_timer_new,
|
||||
|
|
|
@ -36,6 +36,7 @@ nvbios_dp_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
|
|||
if (data) {
|
||||
*ver = nvbios_rd08(bios, data + 0x00);
|
||||
switch (*ver) {
|
||||
case 0x20:
|
||||
case 0x21:
|
||||
case 0x30:
|
||||
case 0x40:
|
||||
|
@ -63,6 +64,7 @@ nvbios_dpout_entry(struct nvkm_bios *bios, u8 idx,
|
|||
if (data && idx < *cnt) {
|
||||
u16 outp = nvbios_rd16(bios, data + *hdr + idx * *len);
|
||||
switch (*ver * !!outp) {
|
||||
case 0x20:
|
||||
case 0x21:
|
||||
case 0x30:
|
||||
*hdr = nvbios_rd08(bios, data + 0x04);
|
||||
|
@ -96,12 +98,16 @@ nvbios_dpout_parse(struct nvkm_bios *bios, u8 idx,
|
|||
info->type = nvbios_rd16(bios, data + 0x00);
|
||||
info->mask = nvbios_rd16(bios, data + 0x02);
|
||||
switch (*ver) {
|
||||
case 0x20:
|
||||
info->mask |= 0x00c0; /* match any link */
|
||||
/* fall-through */
|
||||
case 0x21:
|
||||
case 0x30:
|
||||
info->flags = nvbios_rd08(bios, data + 0x05);
|
||||
info->script[0] = nvbios_rd16(bios, data + 0x06);
|
||||
info->script[1] = nvbios_rd16(bios, data + 0x08);
|
||||
info->lnkcmp = nvbios_rd16(bios, data + 0x0a);
|
||||
if (*len >= 0x0c)
|
||||
info->lnkcmp = nvbios_rd16(bios, data + 0x0a);
|
||||
if (*len >= 0x0f) {
|
||||
info->script[2] = nvbios_rd16(bios, data + 0x0c);
|
||||
info->script[3] = nvbios_rd16(bios, data + 0x0e);
|
||||
|
@ -170,6 +176,7 @@ nvbios_dpcfg_parse(struct nvkm_bios *bios, u16 outp, u8 idx,
|
|||
memset(info, 0x00, sizeof(*info));
|
||||
if (data) {
|
||||
switch (*ver) {
|
||||
case 0x20:
|
||||
case 0x21:
|
||||
info->dc = nvbios_rd08(bios, data + 0x02);
|
||||
info->pe = nvbios_rd08(bios, data + 0x03);
|
||||
|
|
|
@ -249,7 +249,7 @@ nv50_instobj_acquire(struct nvkm_memory *memory)
|
|||
iobj->base.memory.ptrs = &nv50_instobj_fast;
|
||||
else
|
||||
iobj->base.memory.ptrs = &nv50_instobj_slow;
|
||||
refcount_inc(&iobj->maps);
|
||||
refcount_set(&iobj->maps, 1);
|
||||
}
|
||||
|
||||
mutex_unlock(&imem->subdev.mutex);
|
||||
|
|
|
@ -136,6 +136,13 @@ nvkm_pci_init(struct nvkm_subdev *subdev)
|
|||
return ret;
|
||||
|
||||
pci->irq = pdev->irq;
|
||||
|
||||
/* Ensure MSI interrupts are armed, for the case where there are
|
||||
* already interrupts pending (for whatever reason) at load time.
|
||||
*/
|
||||
if (pci->msi)
|
||||
pci->func->msi_rearm(pci);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue