Merge branch 'linux-4.10' of git://github.com/skeggsb/linux into drm-next
- GP102/GP104 devinit (suspend/resume, optimus) hang fix - GP102/GP104 hardware cursor fix - Fix for a regression on some non-MST monitors that was caused by the MST work - Workaround for certain laptops where ACPI sends display hotkey presses on a modeset, causing gnome-settings-daemon to go into a continuous loop * 'linux-4.10' of git://github.com/skeggsb/linux: drm/nouveau/disp/gp102: rename from gp104 drm/nouveau/ce/gp102: rename from gp104 drm/nouveau/fb/gp102: rename from gp104 drm/nouveau/disp/gp102: fix cursor/overlay immediate channel indices drm/nouveau/disp/nv50-: specify ctrl/user separately when constructing classes drm/nouveau/disp/nv50-: split chid into chid.ctrl and chid.user drm/nouveau: Intercept ACPI_VIDEO_NOTIFY_PROBE drm/nouveau/devinit/gm200: drop pmu reset sequence drm/nouveau/devinit/gm200: replace while loops with PTIMER-based timeout loops drm/nouveau/pmu/gp102: initial implementation drm/nouveau/pmu/gp100: initial implementation drm/nouveau/pmu: execute reset before running devinit drm/nouveau/pmu: move ucode handling into gt215 implementation drm/nouveau/core: initial support for GP102 drm/nouveau/device/pci: fix oops if no mmu subdev present drm/nouveau/kms/nv50: avoid touching DP_MSTM_CTRL if !DP_MST_CAP
This commit is contained in:
commit
08859ede42
|
@ -52,7 +52,7 @@
|
|||
#define GM107_DISP /* cl5070.h */ 0x00009470
|
||||
#define GM200_DISP /* cl5070.h */ 0x00009570
|
||||
#define GP100_DISP /* cl5070.h */ 0x00009770
|
||||
#define GP104_DISP /* cl5070.h */ 0x00009870
|
||||
#define GP102_DISP /* cl5070.h */ 0x00009870
|
||||
|
||||
#define NV31_MPEG 0x00003174
|
||||
#define G82_MPEG 0x00008274
|
||||
|
@ -90,7 +90,7 @@
|
|||
#define GM107_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000947d
|
||||
#define GM200_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000957d
|
||||
#define GP100_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000977d
|
||||
#define GP104_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000987d
|
||||
#define GP102_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000987d
|
||||
|
||||
#define NV50_DISP_OVERLAY_CHANNEL_DMA /* cl507e.h */ 0x0000507e
|
||||
#define G82_DISP_OVERLAY_CHANNEL_DMA /* cl507e.h */ 0x0000827e
|
||||
|
|
|
@ -8,5 +8,5 @@ int gk104_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
|
|||
int gm107_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
|
||||
int gm200_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
|
||||
int gp100_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
|
||||
int gp104_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
|
||||
int gp102_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
|
||||
#endif
|
||||
|
|
|
@ -33,5 +33,5 @@ int gk110_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
|
|||
int gm107_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
|
||||
int gm200_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
|
||||
int gp100_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
|
||||
int gp104_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
|
||||
int gp102_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
|
||||
#endif
|
||||
|
|
|
@ -95,7 +95,7 @@ int gm107_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
|
|||
int gm200_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
|
||||
int gm20b_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
|
||||
int gp100_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
|
||||
int gp104_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
|
||||
int gp102_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
|
||||
|
||||
#include <subdev/bios.h>
|
||||
#include <subdev/bios/ramcfg.h>
|
||||
|
|
|
@ -35,6 +35,8 @@ int gk110_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
|
|||
int gk208_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
|
||||
int gk20a_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
|
||||
int gm107_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
|
||||
int gp100_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
|
||||
int gp102_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
|
||||
|
||||
/* interface to MEMX process running on PMU */
|
||||
struct nvkm_memx;
|
||||
|
|
|
@ -24,6 +24,7 @@
|
|||
*
|
||||
*/
|
||||
|
||||
#include <acpi/video.h>
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/drm_atomic.h>
|
||||
#include <drm/drm_atomic_helper.h>
|
||||
|
@ -348,6 +349,55 @@ static struct nouveau_drm_prop_enum_list dither_depth[] = {
|
|||
} \
|
||||
} while(0)
|
||||
|
||||
#ifdef CONFIG_ACPI
|
||||
|
||||
/*
|
||||
* Hans de Goede: This define belongs in acpi/video.h, I've submitted a patch
|
||||
* to the acpi subsys to move it there from drivers/acpi/acpi_video.c .
|
||||
* This should be dropped once that is merged.
|
||||
*/
|
||||
#ifndef ACPI_VIDEO_NOTIFY_PROBE
|
||||
#define ACPI_VIDEO_NOTIFY_PROBE 0x81
|
||||
#endif
|
||||
|
||||
static void
|
||||
nouveau_display_acpi_work(struct work_struct *work)
|
||||
{
|
||||
struct nouveau_drm *drm = container_of(work, typeof(*drm), acpi_work);
|
||||
|
||||
pm_runtime_get_sync(drm->dev->dev);
|
||||
|
||||
drm_helper_hpd_irq_event(drm->dev);
|
||||
|
||||
pm_runtime_mark_last_busy(drm->dev->dev);
|
||||
pm_runtime_put_sync(drm->dev->dev);
|
||||
}
|
||||
|
||||
static int
|
||||
nouveau_display_acpi_ntfy(struct notifier_block *nb, unsigned long val,
|
||||
void *data)
|
||||
{
|
||||
struct nouveau_drm *drm = container_of(nb, typeof(*drm), acpi_nb);
|
||||
struct acpi_bus_event *info = data;
|
||||
|
||||
if (!strcmp(info->device_class, ACPI_VIDEO_CLASS)) {
|
||||
if (info->type == ACPI_VIDEO_NOTIFY_PROBE) {
|
||||
/*
|
||||
* This may be the only indication we receive of a
|
||||
* connector hotplug on a runtime suspended GPU,
|
||||
* schedule acpi_work to check.
|
||||
*/
|
||||
schedule_work(&drm->acpi_work);
|
||||
|
||||
/* acpi-video should not generate keypresses for this */
|
||||
return NOTIFY_BAD;
|
||||
}
|
||||
}
|
||||
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
#endif
|
||||
|
||||
int
|
||||
nouveau_display_init(struct drm_device *dev)
|
||||
{
|
||||
|
@ -488,7 +538,7 @@ nouveau_display_create(struct drm_device *dev)
|
|||
|
||||
if (nouveau_modeset != 2 && drm->vbios.dcb.entries) {
|
||||
static const u16 oclass[] = {
|
||||
GP104_DISP,
|
||||
GP102_DISP,
|
||||
GP100_DISP,
|
||||
GM200_DISP,
|
||||
GM107_DISP,
|
||||
|
@ -532,6 +582,12 @@ nouveau_display_create(struct drm_device *dev)
|
|||
}
|
||||
|
||||
nouveau_backlight_init(dev);
|
||||
#ifdef CONFIG_ACPI
|
||||
INIT_WORK(&drm->acpi_work, nouveau_display_acpi_work);
|
||||
drm->acpi_nb.notifier_call = nouveau_display_acpi_ntfy;
|
||||
register_acpi_notifier(&drm->acpi_nb);
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
|
||||
vblank_err:
|
||||
|
@ -547,6 +603,9 @@ nouveau_display_destroy(struct drm_device *dev)
|
|||
{
|
||||
struct nouveau_display *disp = nouveau_display(dev);
|
||||
|
||||
#ifdef CONFIG_ACPI
|
||||
unregister_acpi_notifier(&nouveau_drm(dev)->acpi_nb);
|
||||
#endif
|
||||
nouveau_backlight_exit(dev);
|
||||
nouveau_display_vblank_fini(dev);
|
||||
|
||||
|
|
|
@ -37,6 +37,8 @@
|
|||
* - implemented limited ABI16/NVIF interop
|
||||
*/
|
||||
|
||||
#include <linux/notifier.h>
|
||||
|
||||
#include <nvif/client.h>
|
||||
#include <nvif/device.h>
|
||||
#include <nvif/ioctl.h>
|
||||
|
@ -161,6 +163,10 @@ struct nouveau_drm {
|
|||
struct nvbios vbios;
|
||||
struct nouveau_display *display;
|
||||
struct backlight_device *backlight;
|
||||
#ifdef CONFIG_ACPI
|
||||
struct notifier_block acpi_nb;
|
||||
struct work_struct acpi_work;
|
||||
#endif
|
||||
|
||||
/* power management */
|
||||
struct nouveau_hwmon *hwmon;
|
||||
|
|
|
@ -574,7 +574,7 @@ nv50_core_create(struct nvif_device *device, struct nvif_object *disp,
|
|||
.pushbuf = 0xb0007d00,
|
||||
};
|
||||
static const s32 oclass[] = {
|
||||
GP104_DISP_CORE_CHANNEL_DMA,
|
||||
GP102_DISP_CORE_CHANNEL_DMA,
|
||||
GP100_DISP_CORE_CHANNEL_DMA,
|
||||
GM200_DISP_CORE_CHANNEL_DMA,
|
||||
GM107_DISP_CORE_CHANNEL_DMA,
|
||||
|
@ -3343,12 +3343,15 @@ nv50_mstm_detect(struct nv50_mstm *mstm, u8 dpcd[8], int allow)
|
|||
if (!mstm)
|
||||
return 0;
|
||||
|
||||
if (dpcd[0] >= 0x12 && allow) {
|
||||
if (dpcd[0] >= 0x12) {
|
||||
ret = drm_dp_dpcd_readb(mstm->mgr.aux, DP_MSTM_CAP, &dpcd[1]);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
state = dpcd[1] & DP_MST_CAP;
|
||||
if (!(dpcd[1] & DP_MST_CAP))
|
||||
dpcd[0] = 0x11;
|
||||
else
|
||||
state = allow;
|
||||
}
|
||||
|
||||
ret = nv50_mstm_enable(mstm, dpcd[0], state);
|
||||
|
|
|
@ -4,4 +4,4 @@ nvkm-y += nvkm/engine/ce/gk104.o
|
|||
nvkm-y += nvkm/engine/ce/gm107.o
|
||||
nvkm-y += nvkm/engine/ce/gm200.o
|
||||
nvkm-y += nvkm/engine/ce/gp100.o
|
||||
nvkm-y += nvkm/engine/ce/gp104.o
|
||||
nvkm-y += nvkm/engine/ce/gp102.o
|
||||
|
|
|
@ -27,7 +27,7 @@
|
|||
#include <nvif/class.h>
|
||||
|
||||
static const struct nvkm_engine_func
|
||||
gp104_ce = {
|
||||
gp102_ce = {
|
||||
.intr = gp100_ce_intr,
|
||||
.sclass = {
|
||||
{ -1, -1, PASCAL_DMA_COPY_B },
|
||||
|
@ -37,8 +37,8 @@ gp104_ce = {
|
|||
};
|
||||
|
||||
int
|
||||
gp104_ce_new(struct nvkm_device *device, int index,
|
||||
gp102_ce_new(struct nvkm_device *device, int index,
|
||||
struct nvkm_engine **pengine)
|
||||
{
|
||||
return nvkm_engine_new_(&gp104_ce, device, index, true, pengine);
|
||||
return nvkm_engine_new_(&gp102_ce, device, index, true, pengine);
|
||||
}
|
|
@ -2167,6 +2167,7 @@ nv130_chipset = {
|
|||
.mmu = gf100_mmu_new,
|
||||
.secboot = gm200_secboot_new,
|
||||
.pci = gp100_pci_new,
|
||||
.pmu = gp100_pmu_new,
|
||||
.timer = gk20a_timer_new,
|
||||
.top = gk104_top_new,
|
||||
.ce[0] = gp100_ce_new,
|
||||
|
@ -2183,13 +2184,13 @@ nv130_chipset = {
|
|||
};
|
||||
|
||||
static const struct nvkm_device_chip
|
||||
nv134_chipset = {
|
||||
.name = "GP104",
|
||||
nv132_chipset = {
|
||||
.name = "GP102",
|
||||
.bar = gf100_bar_new,
|
||||
.bios = nvkm_bios_new,
|
||||
.bus = gf100_bus_new,
|
||||
.devinit = gm200_devinit_new,
|
||||
.fb = gp104_fb_new,
|
||||
.fb = gp102_fb_new,
|
||||
.fuse = gm107_fuse_new,
|
||||
.gpio = gk104_gpio_new,
|
||||
.i2c = gm200_i2c_new,
|
||||
|
@ -2199,13 +2200,43 @@ nv134_chipset = {
|
|||
.mc = gp100_mc_new,
|
||||
.mmu = gf100_mmu_new,
|
||||
.pci = gp100_pci_new,
|
||||
.pmu = gp102_pmu_new,
|
||||
.timer = gk20a_timer_new,
|
||||
.top = gk104_top_new,
|
||||
.ce[0] = gp104_ce_new,
|
||||
.ce[1] = gp104_ce_new,
|
||||
.ce[2] = gp104_ce_new,
|
||||
.ce[3] = gp104_ce_new,
|
||||
.disp = gp104_disp_new,
|
||||
.ce[0] = gp102_ce_new,
|
||||
.ce[1] = gp102_ce_new,
|
||||
.ce[2] = gp102_ce_new,
|
||||
.ce[3] = gp102_ce_new,
|
||||
.disp = gp102_disp_new,
|
||||
.dma = gf119_dma_new,
|
||||
.fifo = gp100_fifo_new,
|
||||
};
|
||||
|
||||
static const struct nvkm_device_chip
|
||||
nv134_chipset = {
|
||||
.name = "GP104",
|
||||
.bar = gf100_bar_new,
|
||||
.bios = nvkm_bios_new,
|
||||
.bus = gf100_bus_new,
|
||||
.devinit = gm200_devinit_new,
|
||||
.fb = gp102_fb_new,
|
||||
.fuse = gm107_fuse_new,
|
||||
.gpio = gk104_gpio_new,
|
||||
.i2c = gm200_i2c_new,
|
||||
.ibus = gm200_ibus_new,
|
||||
.imem = nv50_instmem_new,
|
||||
.ltc = gp100_ltc_new,
|
||||
.mc = gp100_mc_new,
|
||||
.mmu = gf100_mmu_new,
|
||||
.pci = gp100_pci_new,
|
||||
.pmu = gp102_pmu_new,
|
||||
.timer = gk20a_timer_new,
|
||||
.top = gk104_top_new,
|
||||
.ce[0] = gp102_ce_new,
|
||||
.ce[1] = gp102_ce_new,
|
||||
.ce[2] = gp102_ce_new,
|
||||
.ce[3] = gp102_ce_new,
|
||||
.disp = gp102_disp_new,
|
||||
.dma = gf119_dma_new,
|
||||
.fifo = gp100_fifo_new,
|
||||
};
|
||||
|
@ -2644,6 +2675,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
|
|||
case 0x126: device->chip = &nv126_chipset; break;
|
||||
case 0x12b: device->chip = &nv12b_chipset; break;
|
||||
case 0x130: device->chip = &nv130_chipset; break;
|
||||
case 0x132: device->chip = &nv132_chipset; break;
|
||||
case 0x134: device->chip = &nv134_chipset; break;
|
||||
default:
|
||||
nvdev_error(device, "unknown chipset (%08x)\n", boot0);
|
||||
|
|
|
@ -1687,7 +1687,7 @@ nvkm_device_pci_new(struct pci_dev *pci_dev, const char *cfg, const char *dbg,
|
|||
* This is necessary for platforms where the default DMA mask of 32
|
||||
* does not cover any system memory, i.e., when all RAM is > 4 GB.
|
||||
*/
|
||||
if (subdev_mask & BIT(NVKM_SUBDEV_MMU))
|
||||
if (pdev->device.mmu)
|
||||
dma_set_mask_and_coherent(&pci_dev->dev,
|
||||
DMA_BIT_MASK(pdev->device.mmu->dma_bits));
|
||||
|
||||
|
|
|
@ -11,7 +11,7 @@ nvkm-y += nvkm/engine/disp/gk110.o
|
|||
nvkm-y += nvkm/engine/disp/gm107.o
|
||||
nvkm-y += nvkm/engine/disp/gm200.o
|
||||
nvkm-y += nvkm/engine/disp/gp100.o
|
||||
nvkm-y += nvkm/engine/disp/gp104.o
|
||||
nvkm-y += nvkm/engine/disp/gp102.o
|
||||
|
||||
nvkm-y += nvkm/engine/disp/outp.o
|
||||
nvkm-y += nvkm/engine/disp/outpdp.o
|
||||
|
@ -48,14 +48,14 @@ nvkm-y += nvkm/engine/disp/rootgk110.o
|
|||
nvkm-y += nvkm/engine/disp/rootgm107.o
|
||||
nvkm-y += nvkm/engine/disp/rootgm200.o
|
||||
nvkm-y += nvkm/engine/disp/rootgp100.o
|
||||
nvkm-y += nvkm/engine/disp/rootgp104.o
|
||||
nvkm-y += nvkm/engine/disp/rootgp102.o
|
||||
|
||||
nvkm-y += nvkm/engine/disp/channv50.o
|
||||
nvkm-y += nvkm/engine/disp/changf119.o
|
||||
|
||||
nvkm-y += nvkm/engine/disp/dmacnv50.o
|
||||
nvkm-y += nvkm/engine/disp/dmacgf119.o
|
||||
nvkm-y += nvkm/engine/disp/dmacgp104.o
|
||||
nvkm-y += nvkm/engine/disp/dmacgp102.o
|
||||
|
||||
nvkm-y += nvkm/engine/disp/basenv50.o
|
||||
nvkm-y += nvkm/engine/disp/baseg84.o
|
||||
|
@ -64,7 +64,7 @@ nvkm-y += nvkm/engine/disp/basegt215.o
|
|||
nvkm-y += nvkm/engine/disp/basegf119.o
|
||||
nvkm-y += nvkm/engine/disp/basegk104.o
|
||||
nvkm-y += nvkm/engine/disp/basegk110.o
|
||||
nvkm-y += nvkm/engine/disp/basegp104.o
|
||||
nvkm-y += nvkm/engine/disp/basegp102.o
|
||||
|
||||
nvkm-y += nvkm/engine/disp/corenv50.o
|
||||
nvkm-y += nvkm/engine/disp/coreg84.o
|
||||
|
@ -77,7 +77,7 @@ nvkm-y += nvkm/engine/disp/coregk110.o
|
|||
nvkm-y += nvkm/engine/disp/coregm107.o
|
||||
nvkm-y += nvkm/engine/disp/coregm200.o
|
||||
nvkm-y += nvkm/engine/disp/coregp100.o
|
||||
nvkm-y += nvkm/engine/disp/coregp104.o
|
||||
nvkm-y += nvkm/engine/disp/coregp102.o
|
||||
|
||||
nvkm-y += nvkm/engine/disp/ovlynv50.o
|
||||
nvkm-y += nvkm/engine/disp/ovlyg84.o
|
||||
|
@ -85,7 +85,7 @@ nvkm-y += nvkm/engine/disp/ovlygt200.o
|
|||
nvkm-y += nvkm/engine/disp/ovlygt215.o
|
||||
nvkm-y += nvkm/engine/disp/ovlygf119.o
|
||||
nvkm-y += nvkm/engine/disp/ovlygk104.o
|
||||
nvkm-y += nvkm/engine/disp/ovlygp104.o
|
||||
nvkm-y += nvkm/engine/disp/ovlygp102.o
|
||||
|
||||
nvkm-y += nvkm/engine/disp/piocnv50.o
|
||||
nvkm-y += nvkm/engine/disp/piocgf119.o
|
||||
|
@ -95,9 +95,11 @@ nvkm-y += nvkm/engine/disp/cursg84.o
|
|||
nvkm-y += nvkm/engine/disp/cursgt215.o
|
||||
nvkm-y += nvkm/engine/disp/cursgf119.o
|
||||
nvkm-y += nvkm/engine/disp/cursgk104.o
|
||||
nvkm-y += nvkm/engine/disp/cursgp102.o
|
||||
|
||||
nvkm-y += nvkm/engine/disp/oimmnv50.o
|
||||
nvkm-y += nvkm/engine/disp/oimmg84.o
|
||||
nvkm-y += nvkm/engine/disp/oimmgt215.o
|
||||
nvkm-y += nvkm/engine/disp/oimmgf119.o
|
||||
nvkm-y += nvkm/engine/disp/oimmgk104.o
|
||||
nvkm-y += nvkm/engine/disp/oimmgp102.o
|
||||
|
|
|
@ -27,12 +27,12 @@
|
|||
#include <nvif/class.h>
|
||||
|
||||
const struct nv50_disp_dmac_oclass
|
||||
gp104_disp_base_oclass = {
|
||||
gp102_disp_base_oclass = {
|
||||
.base.oclass = GK110_DISP_BASE_CHANNEL_DMA,
|
||||
.base.minver = 0,
|
||||
.base.maxver = 0,
|
||||
.ctor = nv50_disp_base_new,
|
||||
.func = &gp104_disp_dmac_func,
|
||||
.func = &gp102_disp_dmac_func,
|
||||
.mthd = &gf119_disp_base_chan_mthd,
|
||||
.chid = 1,
|
||||
};
|
|
@ -82,7 +82,7 @@ nv50_disp_chan_mthd(struct nv50_disp_chan *chan, int debug)
|
|||
|
||||
if (mthd->addr) {
|
||||
snprintf(cname_, sizeof(cname_), "%s %d",
|
||||
mthd->name, chan->chid);
|
||||
mthd->name, chan->chid.user);
|
||||
cname = cname_;
|
||||
}
|
||||
|
||||
|
@ -139,7 +139,7 @@ nv50_disp_chan_uevent_ctor(struct nvkm_object *object, void *data, u32 size,
|
|||
if (!(ret = nvif_unvers(ret, &data, &size, args->none))) {
|
||||
notify->size = sizeof(struct nvif_notify_uevent_rep);
|
||||
notify->types = 1;
|
||||
notify->index = chan->chid;
|
||||
notify->index = chan->chid.user;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -159,7 +159,7 @@ nv50_disp_chan_rd32(struct nvkm_object *object, u64 addr, u32 *data)
|
|||
struct nv50_disp_chan *chan = nv50_disp_chan(object);
|
||||
struct nv50_disp *disp = chan->root->disp;
|
||||
struct nvkm_device *device = disp->base.engine.subdev.device;
|
||||
*data = nvkm_rd32(device, 0x640000 + (chan->chid * 0x1000) + addr);
|
||||
*data = nvkm_rd32(device, 0x640000 + (chan->chid.user * 0x1000) + addr);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -169,7 +169,7 @@ nv50_disp_chan_wr32(struct nvkm_object *object, u64 addr, u32 data)
|
|||
struct nv50_disp_chan *chan = nv50_disp_chan(object);
|
||||
struct nv50_disp *disp = chan->root->disp;
|
||||
struct nvkm_device *device = disp->base.engine.subdev.device;
|
||||
nvkm_wr32(device, 0x640000 + (chan->chid * 0x1000) + addr, data);
|
||||
nvkm_wr32(device, 0x640000 + (chan->chid.user * 0x1000) + addr, data);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -196,7 +196,7 @@ nv50_disp_chan_map(struct nvkm_object *object, u64 *addr, u32 *size)
|
|||
struct nv50_disp *disp = chan->root->disp;
|
||||
struct nvkm_device *device = disp->base.engine.subdev.device;
|
||||
*addr = device->func->resource_addr(device, 0) +
|
||||
0x640000 + (chan->chid * 0x1000);
|
||||
0x640000 + (chan->chid.user * 0x1000);
|
||||
*size = 0x001000;
|
||||
return 0;
|
||||
}
|
||||
|
@ -243,8 +243,8 @@ nv50_disp_chan_dtor(struct nvkm_object *object)
|
|||
{
|
||||
struct nv50_disp_chan *chan = nv50_disp_chan(object);
|
||||
struct nv50_disp *disp = chan->root->disp;
|
||||
if (chan->chid >= 0)
|
||||
disp->chan[chan->chid] = NULL;
|
||||
if (chan->chid.user >= 0)
|
||||
disp->chan[chan->chid.user] = NULL;
|
||||
return chan->func->dtor ? chan->func->dtor(chan) : chan;
|
||||
}
|
||||
|
||||
|
@ -263,7 +263,7 @@ nv50_disp_chan = {
|
|||
int
|
||||
nv50_disp_chan_ctor(const struct nv50_disp_chan_func *func,
|
||||
const struct nv50_disp_chan_mthd *mthd,
|
||||
struct nv50_disp_root *root, int chid, int head,
|
||||
struct nv50_disp_root *root, int ctrl, int user, int head,
|
||||
const struct nvkm_oclass *oclass,
|
||||
struct nv50_disp_chan *chan)
|
||||
{
|
||||
|
@ -273,21 +273,22 @@ nv50_disp_chan_ctor(const struct nv50_disp_chan_func *func,
|
|||
chan->func = func;
|
||||
chan->mthd = mthd;
|
||||
chan->root = root;
|
||||
chan->chid = chid;
|
||||
chan->chid.ctrl = ctrl;
|
||||
chan->chid.user = user;
|
||||
chan->head = head;
|
||||
|
||||
if (disp->chan[chan->chid]) {
|
||||
chan->chid = -1;
|
||||
if (disp->chan[chan->chid.user]) {
|
||||
chan->chid.user = -1;
|
||||
return -EBUSY;
|
||||
}
|
||||
disp->chan[chan->chid] = chan;
|
||||
disp->chan[chan->chid.user] = chan;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
nv50_disp_chan_new_(const struct nv50_disp_chan_func *func,
|
||||
const struct nv50_disp_chan_mthd *mthd,
|
||||
struct nv50_disp_root *root, int chid, int head,
|
||||
struct nv50_disp_root *root, int ctrl, int user, int head,
|
||||
const struct nvkm_oclass *oclass,
|
||||
struct nvkm_object **pobject)
|
||||
{
|
||||
|
@ -297,5 +298,6 @@ nv50_disp_chan_new_(const struct nv50_disp_chan_func *func,
|
|||
return -ENOMEM;
|
||||
*pobject = &chan->object;
|
||||
|
||||
return nv50_disp_chan_ctor(func, mthd, root, chid, head, oclass, chan);
|
||||
return nv50_disp_chan_ctor(func, mthd, root, ctrl, user,
|
||||
head, oclass, chan);
|
||||
}
|
||||
|
|
|
@ -7,7 +7,11 @@ struct nv50_disp_chan {
|
|||
const struct nv50_disp_chan_func *func;
|
||||
const struct nv50_disp_chan_mthd *mthd;
|
||||
struct nv50_disp_root *root;
|
||||
int chid;
|
||||
|
||||
struct {
|
||||
int ctrl;
|
||||
int user;
|
||||
} chid;
|
||||
int head;
|
||||
|
||||
struct nvkm_object object;
|
||||
|
@ -25,11 +29,11 @@ struct nv50_disp_chan_func {
|
|||
|
||||
int nv50_disp_chan_ctor(const struct nv50_disp_chan_func *,
|
||||
const struct nv50_disp_chan_mthd *,
|
||||
struct nv50_disp_root *, int chid, int head,
|
||||
struct nv50_disp_root *, int ctrl, int user, int head,
|
||||
const struct nvkm_oclass *, struct nv50_disp_chan *);
|
||||
int nv50_disp_chan_new_(const struct nv50_disp_chan_func *,
|
||||
const struct nv50_disp_chan_mthd *,
|
||||
struct nv50_disp_root *, int chid, int head,
|
||||
struct nv50_disp_root *, int ctrl, int user, int head,
|
||||
const struct nvkm_oclass *, struct nvkm_object **);
|
||||
|
||||
extern const struct nv50_disp_chan_func nv50_disp_pioc_func;
|
||||
|
@ -90,13 +94,16 @@ extern const struct nv50_disp_chan_mthd gk104_disp_ovly_chan_mthd;
|
|||
struct nv50_disp_pioc_oclass {
|
||||
int (*ctor)(const struct nv50_disp_chan_func *,
|
||||
const struct nv50_disp_chan_mthd *,
|
||||
struct nv50_disp_root *, int chid,
|
||||
struct nv50_disp_root *, int ctrl, int user,
|
||||
const struct nvkm_oclass *, void *data, u32 size,
|
||||
struct nvkm_object **);
|
||||
struct nvkm_sclass base;
|
||||
const struct nv50_disp_chan_func *func;
|
||||
const struct nv50_disp_chan_mthd *mthd;
|
||||
int chid;
|
||||
struct {
|
||||
int ctrl;
|
||||
int user;
|
||||
} chid;
|
||||
};
|
||||
|
||||
extern const struct nv50_disp_pioc_oclass nv50_disp_oimm_oclass;
|
||||
|
@ -114,15 +121,17 @@ extern const struct nv50_disp_pioc_oclass gf119_disp_curs_oclass;
|
|||
extern const struct nv50_disp_pioc_oclass gk104_disp_oimm_oclass;
|
||||
extern const struct nv50_disp_pioc_oclass gk104_disp_curs_oclass;
|
||||
|
||||
extern const struct nv50_disp_pioc_oclass gp102_disp_oimm_oclass;
|
||||
extern const struct nv50_disp_pioc_oclass gp102_disp_curs_oclass;
|
||||
|
||||
int nv50_disp_curs_new(const struct nv50_disp_chan_func *,
|
||||
const struct nv50_disp_chan_mthd *,
|
||||
struct nv50_disp_root *, int chid,
|
||||
struct nv50_disp_root *, int ctrl, int user,
|
||||
const struct nvkm_oclass *, void *data, u32 size,
|
||||
struct nvkm_object **);
|
||||
int nv50_disp_oimm_new(const struct nv50_disp_chan_func *,
|
||||
const struct nv50_disp_chan_mthd *,
|
||||
struct nv50_disp_root *, int chid,
|
||||
struct nv50_disp_root *, int ctrl, int user,
|
||||
const struct nvkm_oclass *, void *data, u32 size,
|
||||
struct nvkm_object **);
|
||||
#endif
|
||||
|
|
|
@ -29,7 +29,7 @@
|
|||
#include <nvif/class.h>
|
||||
|
||||
static int
|
||||
gp104_disp_core_init(struct nv50_disp_dmac *chan)
|
||||
gp102_disp_core_init(struct nv50_disp_dmac *chan)
|
||||
{
|
||||
struct nv50_disp *disp = chan->base.root->disp;
|
||||
struct nvkm_subdev *subdev = &disp->base.engine.subdev;
|
||||
|
@ -60,19 +60,19 @@ gp104_disp_core_init(struct nv50_disp_dmac *chan)
|
|||
}
|
||||
|
||||
static const struct nv50_disp_dmac_func
|
||||
gp104_disp_core_func = {
|
||||
.init = gp104_disp_core_init,
|
||||
gp102_disp_core_func = {
|
||||
.init = gp102_disp_core_init,
|
||||
.fini = gf119_disp_core_fini,
|
||||
.bind = gf119_disp_dmac_bind,
|
||||
};
|
||||
|
||||
const struct nv50_disp_dmac_oclass
|
||||
gp104_disp_core_oclass = {
|
||||
.base.oclass = GP104_DISP_CORE_CHANNEL_DMA,
|
||||
gp102_disp_core_oclass = {
|
||||
.base.oclass = GP102_DISP_CORE_CHANNEL_DMA,
|
||||
.base.minver = 0,
|
||||
.base.maxver = 0,
|
||||
.ctor = nv50_disp_core_new,
|
||||
.func = &gp104_disp_core_func,
|
||||
.func = &gp102_disp_core_func,
|
||||
.mthd = &gk104_disp_core_chan_mthd,
|
||||
.chid = 0,
|
||||
};
|
|
@ -33,5 +33,5 @@ g84_disp_curs_oclass = {
|
|||
.base.maxver = 0,
|
||||
.ctor = nv50_disp_curs_new,
|
||||
.func = &nv50_disp_pioc_func,
|
||||
.chid = 7,
|
||||
.chid = { 7, 7 },
|
||||
};
|
||||
|
|
|
@ -33,5 +33,5 @@ gf119_disp_curs_oclass = {
|
|||
.base.maxver = 0,
|
||||
.ctor = nv50_disp_curs_new,
|
||||
.func = &gf119_disp_pioc_func,
|
||||
.chid = 13,
|
||||
.chid = { 13, 13 },
|
||||
};
|
||||
|
|
|
@ -33,5 +33,5 @@ gk104_disp_curs_oclass = {
|
|||
.base.maxver = 0,
|
||||
.ctor = nv50_disp_curs_new,
|
||||
.func = &gf119_disp_pioc_func,
|
||||
.chid = 13,
|
||||
.chid = { 13, 13 },
|
||||
};
|
||||
|
|
|
@ -0,0 +1,37 @@
|
|||
/*
|
||||
* Copyright 2016 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Ben Skeggs <bskeggs@redhat.com>
|
||||
*/
|
||||
#include "channv50.h"
|
||||
#include "rootnv50.h"
|
||||
|
||||
#include <nvif/class.h>
|
||||
|
||||
const struct nv50_disp_pioc_oclass
|
||||
gp102_disp_curs_oclass = {
|
||||
.base.oclass = GK104_DISP_CURSOR,
|
||||
.base.minver = 0,
|
||||
.base.maxver = 0,
|
||||
.ctor = nv50_disp_curs_new,
|
||||
.func = &gf119_disp_pioc_func,
|
||||
.chid = { 13, 17 },
|
||||
};
|
|
@ -33,5 +33,5 @@ gt215_disp_curs_oclass = {
|
|||
.base.maxver = 0,
|
||||
.ctor = nv50_disp_curs_new,
|
||||
.func = &nv50_disp_pioc_func,
|
||||
.chid = 7,
|
||||
.chid = { 7, 7 },
|
||||
};
|
||||
|
|
|
@ -33,7 +33,7 @@
|
|||
int
|
||||
nv50_disp_curs_new(const struct nv50_disp_chan_func *func,
|
||||
const struct nv50_disp_chan_mthd *mthd,
|
||||
struct nv50_disp_root *root, int chid,
|
||||
struct nv50_disp_root *root, int ctrl, int user,
|
||||
const struct nvkm_oclass *oclass, void *data, u32 size,
|
||||
struct nvkm_object **pobject)
|
||||
{
|
||||
|
@ -54,7 +54,7 @@ nv50_disp_curs_new(const struct nv50_disp_chan_func *func,
|
|||
} else
|
||||
return ret;
|
||||
|
||||
return nv50_disp_chan_new_(func, mthd, root, chid + head,
|
||||
return nv50_disp_chan_new_(func, mthd, root, ctrl + head, user + head,
|
||||
head, oclass, pobject);
|
||||
}
|
||||
|
||||
|
@ -65,5 +65,5 @@ nv50_disp_curs_oclass = {
|
|||
.base.maxver = 0,
|
||||
.ctor = nv50_disp_curs_new,
|
||||
.func = &nv50_disp_pioc_func,
|
||||
.chid = 7,
|
||||
.chid = { 7, 7 },
|
||||
};
|
||||
|
|
|
@ -32,8 +32,8 @@ gf119_disp_dmac_bind(struct nv50_disp_dmac *chan,
|
|||
struct nvkm_object *object, u32 handle)
|
||||
{
|
||||
return nvkm_ramht_insert(chan->base.root->ramht, object,
|
||||
chan->base.chid, -9, handle,
|
||||
chan->base.chid << 27 | 0x00000001);
|
||||
chan->base.chid.user, -9, handle,
|
||||
chan->base.chid.user << 27 | 0x00000001);
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -42,22 +42,23 @@ gf119_disp_dmac_fini(struct nv50_disp_dmac *chan)
|
|||
struct nv50_disp *disp = chan->base.root->disp;
|
||||
struct nvkm_subdev *subdev = &disp->base.engine.subdev;
|
||||
struct nvkm_device *device = subdev->device;
|
||||
int chid = chan->base.chid;
|
||||
int ctrl = chan->base.chid.ctrl;
|
||||
int user = chan->base.chid.user;
|
||||
|
||||
/* deactivate channel */
|
||||
nvkm_mask(device, 0x610490 + (chid * 0x0010), 0x00001010, 0x00001000);
|
||||
nvkm_mask(device, 0x610490 + (chid * 0x0010), 0x00000003, 0x00000000);
|
||||
nvkm_mask(device, 0x610490 + (ctrl * 0x0010), 0x00001010, 0x00001000);
|
||||
nvkm_mask(device, 0x610490 + (ctrl * 0x0010), 0x00000003, 0x00000000);
|
||||
if (nvkm_msec(device, 2000,
|
||||
if (!(nvkm_rd32(device, 0x610490 + (chid * 0x10)) & 0x001e0000))
|
||||
if (!(nvkm_rd32(device, 0x610490 + (ctrl * 0x10)) & 0x001e0000))
|
||||
break;
|
||||
) < 0) {
|
||||
nvkm_error(subdev, "ch %d fini: %08x\n", chid,
|
||||
nvkm_rd32(device, 0x610490 + (chid * 0x10)));
|
||||
nvkm_error(subdev, "ch %d fini: %08x\n", user,
|
||||
nvkm_rd32(device, 0x610490 + (ctrl * 0x10)));
|
||||
}
|
||||
|
||||
/* disable error reporting and completion notification */
|
||||
nvkm_mask(device, 0x610090, 0x00000001 << chid, 0x00000000);
|
||||
nvkm_mask(device, 0x6100a0, 0x00000001 << chid, 0x00000000);
|
||||
nvkm_mask(device, 0x610090, 0x00000001 << user, 0x00000000);
|
||||
nvkm_mask(device, 0x6100a0, 0x00000001 << user, 0x00000000);
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -66,26 +67,27 @@ gf119_disp_dmac_init(struct nv50_disp_dmac *chan)
|
|||
struct nv50_disp *disp = chan->base.root->disp;
|
||||
struct nvkm_subdev *subdev = &disp->base.engine.subdev;
|
||||
struct nvkm_device *device = subdev->device;
|
||||
int chid = chan->base.chid;
|
||||
int ctrl = chan->base.chid.ctrl;
|
||||
int user = chan->base.chid.user;
|
||||
|
||||
/* enable error reporting */
|
||||
nvkm_mask(device, 0x6100a0, 0x00000001 << chid, 0x00000001 << chid);
|
||||
nvkm_mask(device, 0x6100a0, 0x00000001 << user, 0x00000001 << user);
|
||||
|
||||
/* initialise channel for dma command submission */
|
||||
nvkm_wr32(device, 0x610494 + (chid * 0x0010), chan->push);
|
||||
nvkm_wr32(device, 0x610498 + (chid * 0x0010), 0x00010000);
|
||||
nvkm_wr32(device, 0x61049c + (chid * 0x0010), 0x00000001);
|
||||
nvkm_mask(device, 0x610490 + (chid * 0x0010), 0x00000010, 0x00000010);
|
||||
nvkm_wr32(device, 0x640000 + (chid * 0x1000), 0x00000000);
|
||||
nvkm_wr32(device, 0x610490 + (chid * 0x0010), 0x00000013);
|
||||
nvkm_wr32(device, 0x610494 + (ctrl * 0x0010), chan->push);
|
||||
nvkm_wr32(device, 0x610498 + (ctrl * 0x0010), 0x00010000);
|
||||
nvkm_wr32(device, 0x61049c + (ctrl * 0x0010), 0x00000001);
|
||||
nvkm_mask(device, 0x610490 + (ctrl * 0x0010), 0x00000010, 0x00000010);
|
||||
nvkm_wr32(device, 0x640000 + (ctrl * 0x1000), 0x00000000);
|
||||
nvkm_wr32(device, 0x610490 + (ctrl * 0x0010), 0x00000013);
|
||||
|
||||
/* wait for it to go inactive */
|
||||
if (nvkm_msec(device, 2000,
|
||||
if (!(nvkm_rd32(device, 0x610490 + (chid * 0x10)) & 0x80000000))
|
||||
if (!(nvkm_rd32(device, 0x610490 + (ctrl * 0x10)) & 0x80000000))
|
||||
break;
|
||||
) < 0) {
|
||||
nvkm_error(subdev, "ch %d init: %08x\n", chid,
|
||||
nvkm_rd32(device, 0x610490 + (chid * 0x10)));
|
||||
nvkm_error(subdev, "ch %d init: %08x\n", user,
|
||||
nvkm_rd32(device, 0x610490 + (ctrl * 0x10)));
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
|
|
|
@ -27,31 +27,32 @@
|
|||
#include <subdev/timer.h>
|
||||
|
||||
static int
|
||||
gp104_disp_dmac_init(struct nv50_disp_dmac *chan)
|
||||
gp102_disp_dmac_init(struct nv50_disp_dmac *chan)
|
||||
{
|
||||
struct nv50_disp *disp = chan->base.root->disp;
|
||||
struct nvkm_subdev *subdev = &disp->base.engine.subdev;
|
||||
struct nvkm_device *device = subdev->device;
|
||||
int chid = chan->base.chid;
|
||||
int ctrl = chan->base.chid.ctrl;
|
||||
int user = chan->base.chid.user;
|
||||
|
||||
/* enable error reporting */
|
||||
nvkm_mask(device, 0x6100a0, 0x00000001 << chid, 0x00000001 << chid);
|
||||
nvkm_mask(device, 0x6100a0, 0x00000001 << user, 0x00000001 << user);
|
||||
|
||||
/* initialise channel for dma command submission */
|
||||
nvkm_wr32(device, 0x611494 + (chid * 0x0010), chan->push);
|
||||
nvkm_wr32(device, 0x611498 + (chid * 0x0010), 0x00010000);
|
||||
nvkm_wr32(device, 0x61149c + (chid * 0x0010), 0x00000001);
|
||||
nvkm_mask(device, 0x610490 + (chid * 0x0010), 0x00000010, 0x00000010);
|
||||
nvkm_wr32(device, 0x640000 + (chid * 0x1000), 0x00000000);
|
||||
nvkm_wr32(device, 0x610490 + (chid * 0x0010), 0x00000013);
|
||||
nvkm_wr32(device, 0x611494 + (ctrl * 0x0010), chan->push);
|
||||
nvkm_wr32(device, 0x611498 + (ctrl * 0x0010), 0x00010000);
|
||||
nvkm_wr32(device, 0x61149c + (ctrl * 0x0010), 0x00000001);
|
||||
nvkm_mask(device, 0x610490 + (ctrl * 0x0010), 0x00000010, 0x00000010);
|
||||
nvkm_wr32(device, 0x640000 + (ctrl * 0x1000), 0x00000000);
|
||||
nvkm_wr32(device, 0x610490 + (ctrl * 0x0010), 0x00000013);
|
||||
|
||||
/* wait for it to go inactive */
|
||||
if (nvkm_msec(device, 2000,
|
||||
if (!(nvkm_rd32(device, 0x610490 + (chid * 0x10)) & 0x80000000))
|
||||
if (!(nvkm_rd32(device, 0x610490 + (ctrl * 0x10)) & 0x80000000))
|
||||
break;
|
||||
) < 0) {
|
||||
nvkm_error(subdev, "ch %d init: %08x\n", chid,
|
||||
nvkm_rd32(device, 0x610490 + (chid * 0x10)));
|
||||
nvkm_error(subdev, "ch %d init: %08x\n", user,
|
||||
nvkm_rd32(device, 0x610490 + (ctrl * 0x10)));
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
|
@ -59,8 +60,8 @@ gp104_disp_dmac_init(struct nv50_disp_dmac *chan)
|
|||
}
|
||||
|
||||
const struct nv50_disp_dmac_func
|
||||
gp104_disp_dmac_func = {
|
||||
.init = gp104_disp_dmac_init,
|
||||
gp102_disp_dmac_func = {
|
||||
.init = gp102_disp_dmac_init,
|
||||
.fini = gf119_disp_dmac_fini,
|
||||
.bind = gf119_disp_dmac_bind,
|
||||
};
|
|
@ -149,7 +149,7 @@ nv50_disp_dmac_new_(const struct nv50_disp_dmac_func *func,
|
|||
chan->func = func;
|
||||
|
||||
ret = nv50_disp_chan_ctor(&nv50_disp_dmac_func_, mthd, root,
|
||||
chid, head, oclass, &chan->base);
|
||||
chid, chid, head, oclass, &chan->base);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -179,9 +179,9 @@ nv50_disp_dmac_bind(struct nv50_disp_dmac *chan,
|
|||
struct nvkm_object *object, u32 handle)
|
||||
{
|
||||
return nvkm_ramht_insert(chan->base.root->ramht, object,
|
||||
chan->base.chid, -10, handle,
|
||||
chan->base.chid << 28 |
|
||||
chan->base.chid);
|
||||
chan->base.chid.user, -10, handle,
|
||||
chan->base.chid.user << 28 |
|
||||
chan->base.chid.user);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -190,21 +190,22 @@ nv50_disp_dmac_fini(struct nv50_disp_dmac *chan)
|
|||
struct nv50_disp *disp = chan->base.root->disp;
|
||||
struct nvkm_subdev *subdev = &disp->base.engine.subdev;
|
||||
struct nvkm_device *device = subdev->device;
|
||||
int chid = chan->base.chid;
|
||||
int ctrl = chan->base.chid.ctrl;
|
||||
int user = chan->base.chid.user;
|
||||
|
||||
/* deactivate channel */
|
||||
nvkm_mask(device, 0x610200 + (chid * 0x0010), 0x00001010, 0x00001000);
|
||||
nvkm_mask(device, 0x610200 + (chid * 0x0010), 0x00000003, 0x00000000);
|
||||
nvkm_mask(device, 0x610200 + (ctrl * 0x0010), 0x00001010, 0x00001000);
|
||||
nvkm_mask(device, 0x610200 + (ctrl * 0x0010), 0x00000003, 0x00000000);
|
||||
if (nvkm_msec(device, 2000,
|
||||
if (!(nvkm_rd32(device, 0x610200 + (chid * 0x10)) & 0x001e0000))
|
||||
if (!(nvkm_rd32(device, 0x610200 + (ctrl * 0x10)) & 0x001e0000))
|
||||
break;
|
||||
) < 0) {
|
||||
nvkm_error(subdev, "ch %d fini timeout, %08x\n", chid,
|
||||
nvkm_rd32(device, 0x610200 + (chid * 0x10)));
|
||||
nvkm_error(subdev, "ch %d fini timeout, %08x\n", user,
|
||||
nvkm_rd32(device, 0x610200 + (ctrl * 0x10)));
|
||||
}
|
||||
|
||||
/* disable error reporting and completion notifications */
|
||||
nvkm_mask(device, 0x610028, 0x00010001 << chid, 0x00000000 << chid);
|
||||
nvkm_mask(device, 0x610028, 0x00010001 << user, 0x00000000 << user);
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -213,26 +214,27 @@ nv50_disp_dmac_init(struct nv50_disp_dmac *chan)
|
|||
struct nv50_disp *disp = chan->base.root->disp;
|
||||
struct nvkm_subdev *subdev = &disp->base.engine.subdev;
|
||||
struct nvkm_device *device = subdev->device;
|
||||
int chid = chan->base.chid;
|
||||
int ctrl = chan->base.chid.ctrl;
|
||||
int user = chan->base.chid.user;
|
||||
|
||||
/* enable error reporting */
|
||||
nvkm_mask(device, 0x610028, 0x00010000 << chid, 0x00010000 << chid);
|
||||
nvkm_mask(device, 0x610028, 0x00010000 << user, 0x00010000 << user);
|
||||
|
||||
/* initialise channel for dma command submission */
|
||||
nvkm_wr32(device, 0x610204 + (chid * 0x0010), chan->push);
|
||||
nvkm_wr32(device, 0x610208 + (chid * 0x0010), 0x00010000);
|
||||
nvkm_wr32(device, 0x61020c + (chid * 0x0010), chid);
|
||||
nvkm_mask(device, 0x610200 + (chid * 0x0010), 0x00000010, 0x00000010);
|
||||
nvkm_wr32(device, 0x640000 + (chid * 0x1000), 0x00000000);
|
||||
nvkm_wr32(device, 0x610200 + (chid * 0x0010), 0x00000013);
|
||||
nvkm_wr32(device, 0x610204 + (ctrl * 0x0010), chan->push);
|
||||
nvkm_wr32(device, 0x610208 + (ctrl * 0x0010), 0x00010000);
|
||||
nvkm_wr32(device, 0x61020c + (ctrl * 0x0010), ctrl);
|
||||
nvkm_mask(device, 0x610200 + (ctrl * 0x0010), 0x00000010, 0x00000010);
|
||||
nvkm_wr32(device, 0x640000 + (ctrl * 0x1000), 0x00000000);
|
||||
nvkm_wr32(device, 0x610200 + (ctrl * 0x0010), 0x00000013);
|
||||
|
||||
/* wait for it to go inactive */
|
||||
if (nvkm_msec(device, 2000,
|
||||
if (!(nvkm_rd32(device, 0x610200 + (chid * 0x10)) & 0x80000000))
|
||||
if (!(nvkm_rd32(device, 0x610200 + (ctrl * 0x10)) & 0x80000000))
|
||||
break;
|
||||
) < 0) {
|
||||
nvkm_error(subdev, "ch %d init timeout, %08x\n", chid,
|
||||
nvkm_rd32(device, 0x610200 + (chid * 0x10)));
|
||||
nvkm_error(subdev, "ch %d init timeout, %08x\n", user,
|
||||
nvkm_rd32(device, 0x610200 + (ctrl * 0x10)));
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
|
|
|
@ -30,7 +30,7 @@ int gf119_disp_dmac_bind(struct nv50_disp_dmac *, struct nvkm_object *, u32);
|
|||
extern const struct nv50_disp_dmac_func gf119_disp_core_func;
|
||||
void gf119_disp_core_fini(struct nv50_disp_dmac *);
|
||||
|
||||
extern const struct nv50_disp_dmac_func gp104_disp_dmac_func;
|
||||
extern const struct nv50_disp_dmac_func gp102_disp_dmac_func;
|
||||
|
||||
struct nv50_disp_dmac_oclass {
|
||||
int (*ctor)(const struct nv50_disp_dmac_func *,
|
||||
|
@ -95,7 +95,7 @@ extern const struct nv50_disp_dmac_oclass gm200_disp_core_oclass;
|
|||
|
||||
extern const struct nv50_disp_dmac_oclass gp100_disp_core_oclass;
|
||||
|
||||
extern const struct nv50_disp_dmac_oclass gp104_disp_core_oclass;
|
||||
extern const struct nv50_disp_dmac_oclass gp104_disp_base_oclass;
|
||||
extern const struct nv50_disp_dmac_oclass gp104_disp_ovly_oclass;
|
||||
extern const struct nv50_disp_dmac_oclass gp102_disp_core_oclass;
|
||||
extern const struct nv50_disp_dmac_oclass gp102_disp_base_oclass;
|
||||
extern const struct nv50_disp_dmac_oclass gp102_disp_ovly_oclass;
|
||||
#endif
|
||||
|
|
|
@ -25,7 +25,7 @@
|
|||
#include "rootnv50.h"
|
||||
|
||||
static void
|
||||
gp104_disp_intr_error(struct nv50_disp *disp, int chid)
|
||||
gp102_disp_intr_error(struct nv50_disp *disp, int chid)
|
||||
{
|
||||
struct nvkm_subdev *subdev = &disp->base.engine.subdev;
|
||||
struct nvkm_device *device = subdev->device;
|
||||
|
@ -51,12 +51,12 @@ gp104_disp_intr_error(struct nv50_disp *disp, int chid)
|
|||
}
|
||||
|
||||
static const struct nv50_disp_func
|
||||
gp104_disp = {
|
||||
gp102_disp = {
|
||||
.intr = gf119_disp_intr,
|
||||
.intr_error = gp104_disp_intr_error,
|
||||
.intr_error = gp102_disp_intr_error,
|
||||
.uevent = &gf119_disp_chan_uevent,
|
||||
.super = gf119_disp_intr_supervisor,
|
||||
.root = &gp104_disp_root_oclass,
|
||||
.root = &gp102_disp_root_oclass,
|
||||
.head.vblank_init = gf119_disp_vblank_init,
|
||||
.head.vblank_fini = gf119_disp_vblank_fini,
|
||||
.head.scanoutpos = gf119_disp_root_scanoutpos,
|
||||
|
@ -75,7 +75,7 @@ gp104_disp = {
|
|||
};
|
||||
|
||||
int
|
||||
gp104_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
|
||||
gp102_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
|
||||
{
|
||||
return gf119_disp_new_(&gp104_disp, device, index, pdisp);
|
||||
return gf119_disp_new_(&gp102_disp, device, index, pdisp);
|
||||
}
|
|
@ -33,5 +33,5 @@ g84_disp_oimm_oclass = {
|
|||
.base.maxver = 0,
|
||||
.ctor = nv50_disp_oimm_new,
|
||||
.func = &nv50_disp_pioc_func,
|
||||
.chid = 5,
|
||||
.chid = { 5, 5 },
|
||||
};
|
||||
|
|
|
@ -33,5 +33,5 @@ gf119_disp_oimm_oclass = {
|
|||
.base.maxver = 0,
|
||||
.ctor = nv50_disp_oimm_new,
|
||||
.func = &gf119_disp_pioc_func,
|
||||
.chid = 9,
|
||||
.chid = { 9, 9 },
|
||||
};
|
||||
|
|
|
@ -33,5 +33,5 @@ gk104_disp_oimm_oclass = {
|
|||
.base.maxver = 0,
|
||||
.ctor = nv50_disp_oimm_new,
|
||||
.func = &gf119_disp_pioc_func,
|
||||
.chid = 9,
|
||||
.chid = { 9, 9 },
|
||||
};
|
||||
|
|
|
@ -0,0 +1,37 @@
|
|||
/*
|
||||
* Copyright 2016 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Ben Skeggs <bskeggs@redhat.com>
|
||||
*/
|
||||
#include "channv50.h"
|
||||
#include "rootnv50.h"
|
||||
|
||||
#include <nvif/class.h>
|
||||
|
||||
const struct nv50_disp_pioc_oclass
|
||||
gp102_disp_oimm_oclass = {
|
||||
.base.oclass = GK104_DISP_OVERLAY,
|
||||
.base.minver = 0,
|
||||
.base.maxver = 0,
|
||||
.ctor = nv50_disp_oimm_new,
|
||||
.func = &gf119_disp_pioc_func,
|
||||
.chid = { 9, 13 },
|
||||
};
|
|
@ -33,5 +33,5 @@ gt215_disp_oimm_oclass = {
|
|||
.base.maxver = 0,
|
||||
.ctor = nv50_disp_oimm_new,
|
||||
.func = &nv50_disp_pioc_func,
|
||||
.chid = 5,
|
||||
.chid = { 5, 5 },
|
||||
};
|
||||
|
|
|
@ -33,7 +33,7 @@
|
|||
int
|
||||
nv50_disp_oimm_new(const struct nv50_disp_chan_func *func,
|
||||
const struct nv50_disp_chan_mthd *mthd,
|
||||
struct nv50_disp_root *root, int chid,
|
||||
struct nv50_disp_root *root, int ctrl, int user,
|
||||
const struct nvkm_oclass *oclass, void *data, u32 size,
|
||||
struct nvkm_object **pobject)
|
||||
{
|
||||
|
@ -54,7 +54,7 @@ nv50_disp_oimm_new(const struct nv50_disp_chan_func *func,
|
|||
} else
|
||||
return ret;
|
||||
|
||||
return nv50_disp_chan_new_(func, mthd, root, chid + head,
|
||||
return nv50_disp_chan_new_(func, mthd, root, ctrl + head, user + head,
|
||||
head, oclass, pobject);
|
||||
}
|
||||
|
||||
|
@ -65,5 +65,5 @@ nv50_disp_oimm_oclass = {
|
|||
.base.maxver = 0,
|
||||
.ctor = nv50_disp_oimm_new,
|
||||
.func = &nv50_disp_pioc_func,
|
||||
.chid = 5,
|
||||
.chid = { 5, 5 },
|
||||
};
|
||||
|
|
|
@ -27,12 +27,12 @@
|
|||
#include <nvif/class.h>
|
||||
|
||||
const struct nv50_disp_dmac_oclass
|
||||
gp104_disp_ovly_oclass = {
|
||||
gp102_disp_ovly_oclass = {
|
||||
.base.oclass = GK104_DISP_OVERLAY_CONTROL_DMA,
|
||||
.base.minver = 0,
|
||||
.base.maxver = 0,
|
||||
.ctor = nv50_disp_ovly_new,
|
||||
.func = &gp104_disp_dmac_func,
|
||||
.func = &gp102_disp_dmac_func,
|
||||
.mthd = &gk104_disp_ovly_chan_mthd,
|
||||
.chid = 5,
|
||||
};
|
|
@ -32,20 +32,21 @@ gf119_disp_pioc_fini(struct nv50_disp_chan *chan)
|
|||
struct nv50_disp *disp = chan->root->disp;
|
||||
struct nvkm_subdev *subdev = &disp->base.engine.subdev;
|
||||
struct nvkm_device *device = subdev->device;
|
||||
int chid = chan->chid;
|
||||
int ctrl = chan->chid.ctrl;
|
||||
int user = chan->chid.user;
|
||||
|
||||
nvkm_mask(device, 0x610490 + (chid * 0x10), 0x00000001, 0x00000000);
|
||||
nvkm_mask(device, 0x610490 + (ctrl * 0x10), 0x00000001, 0x00000000);
|
||||
if (nvkm_msec(device, 2000,
|
||||
if (!(nvkm_rd32(device, 0x610490 + (chid * 0x10)) & 0x00030000))
|
||||
if (!(nvkm_rd32(device, 0x610490 + (ctrl * 0x10)) & 0x00030000))
|
||||
break;
|
||||
) < 0) {
|
||||
nvkm_error(subdev, "ch %d fini: %08x\n", chid,
|
||||
nvkm_rd32(device, 0x610490 + (chid * 0x10)));
|
||||
nvkm_error(subdev, "ch %d fini: %08x\n", user,
|
||||
nvkm_rd32(device, 0x610490 + (ctrl * 0x10)));
|
||||
}
|
||||
|
||||
/* disable error reporting and completion notification */
|
||||
nvkm_mask(device, 0x610090, 0x00000001 << chid, 0x00000000);
|
||||
nvkm_mask(device, 0x6100a0, 0x00000001 << chid, 0x00000000);
|
||||
nvkm_mask(device, 0x610090, 0x00000001 << user, 0x00000000);
|
||||
nvkm_mask(device, 0x6100a0, 0x00000001 << user, 0x00000000);
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -54,20 +55,21 @@ gf119_disp_pioc_init(struct nv50_disp_chan *chan)
|
|||
struct nv50_disp *disp = chan->root->disp;
|
||||
struct nvkm_subdev *subdev = &disp->base.engine.subdev;
|
||||
struct nvkm_device *device = subdev->device;
|
||||
int chid = chan->chid;
|
||||
int ctrl = chan->chid.ctrl;
|
||||
int user = chan->chid.user;
|
||||
|
||||
/* enable error reporting */
|
||||
nvkm_mask(device, 0x6100a0, 0x00000001 << chid, 0x00000001 << chid);
|
||||
nvkm_mask(device, 0x6100a0, 0x00000001 << user, 0x00000001 << user);
|
||||
|
||||
/* activate channel */
|
||||
nvkm_wr32(device, 0x610490 + (chid * 0x10), 0x00000001);
|
||||
nvkm_wr32(device, 0x610490 + (ctrl * 0x10), 0x00000001);
|
||||
if (nvkm_msec(device, 2000,
|
||||
u32 tmp = nvkm_rd32(device, 0x610490 + (chid * 0x10));
|
||||
u32 tmp = nvkm_rd32(device, 0x610490 + (ctrl * 0x10));
|
||||
if ((tmp & 0x00030000) == 0x00010000)
|
||||
break;
|
||||
) < 0) {
|
||||
nvkm_error(subdev, "ch %d init: %08x\n", chid,
|
||||
nvkm_rd32(device, 0x610490 + (chid * 0x10)));
|
||||
nvkm_error(subdev, "ch %d init: %08x\n", user,
|
||||
nvkm_rd32(device, 0x610490 + (ctrl * 0x10)));
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
|
|
|
@ -32,15 +32,16 @@ nv50_disp_pioc_fini(struct nv50_disp_chan *chan)
|
|||
struct nv50_disp *disp = chan->root->disp;
|
||||
struct nvkm_subdev *subdev = &disp->base.engine.subdev;
|
||||
struct nvkm_device *device = subdev->device;
|
||||
int chid = chan->chid;
|
||||
int ctrl = chan->chid.ctrl;
|
||||
int user = chan->chid.user;
|
||||
|
||||
nvkm_mask(device, 0x610200 + (chid * 0x10), 0x00000001, 0x00000000);
|
||||
nvkm_mask(device, 0x610200 + (ctrl * 0x10), 0x00000001, 0x00000000);
|
||||
if (nvkm_msec(device, 2000,
|
||||
if (!(nvkm_rd32(device, 0x610200 + (chid * 0x10)) & 0x00030000))
|
||||
if (!(nvkm_rd32(device, 0x610200 + (ctrl * 0x10)) & 0x00030000))
|
||||
break;
|
||||
) < 0) {
|
||||
nvkm_error(subdev, "ch %d timeout: %08x\n", chid,
|
||||
nvkm_rd32(device, 0x610200 + (chid * 0x10)));
|
||||
nvkm_error(subdev, "ch %d timeout: %08x\n", user,
|
||||
nvkm_rd32(device, 0x610200 + (ctrl * 0x10)));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -50,26 +51,27 @@ nv50_disp_pioc_init(struct nv50_disp_chan *chan)
|
|||
struct nv50_disp *disp = chan->root->disp;
|
||||
struct nvkm_subdev *subdev = &disp->base.engine.subdev;
|
||||
struct nvkm_device *device = subdev->device;
|
||||
int chid = chan->chid;
|
||||
int ctrl = chan->chid.ctrl;
|
||||
int user = chan->chid.user;
|
||||
|
||||
nvkm_wr32(device, 0x610200 + (chid * 0x10), 0x00002000);
|
||||
nvkm_wr32(device, 0x610200 + (ctrl * 0x10), 0x00002000);
|
||||
if (nvkm_msec(device, 2000,
|
||||
if (!(nvkm_rd32(device, 0x610200 + (chid * 0x10)) & 0x00030000))
|
||||
if (!(nvkm_rd32(device, 0x610200 + (ctrl * 0x10)) & 0x00030000))
|
||||
break;
|
||||
) < 0) {
|
||||
nvkm_error(subdev, "ch %d timeout0: %08x\n", chid,
|
||||
nvkm_rd32(device, 0x610200 + (chid * 0x10)));
|
||||
nvkm_error(subdev, "ch %d timeout0: %08x\n", user,
|
||||
nvkm_rd32(device, 0x610200 + (ctrl * 0x10)));
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
nvkm_wr32(device, 0x610200 + (chid * 0x10), 0x00000001);
|
||||
nvkm_wr32(device, 0x610200 + (ctrl * 0x10), 0x00000001);
|
||||
if (nvkm_msec(device, 2000,
|
||||
u32 tmp = nvkm_rd32(device, 0x610200 + (chid * 0x10));
|
||||
u32 tmp = nvkm_rd32(device, 0x610200 + (ctrl * 0x10));
|
||||
if ((tmp & 0x00030000) == 0x00010000)
|
||||
break;
|
||||
) < 0) {
|
||||
nvkm_error(subdev, "ch %d timeout1: %08x\n", chid,
|
||||
nvkm_rd32(device, 0x610200 + (chid * 0x10)));
|
||||
nvkm_error(subdev, "ch %d timeout1: %08x\n", user,
|
||||
nvkm_rd32(device, 0x610200 + (ctrl * 0x10)));
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
|
|
|
@ -27,32 +27,32 @@
|
|||
#include <nvif/class.h>
|
||||
|
||||
static const struct nv50_disp_root_func
|
||||
gp104_disp_root = {
|
||||
gp102_disp_root = {
|
||||
.init = gf119_disp_root_init,
|
||||
.fini = gf119_disp_root_fini,
|
||||
.dmac = {
|
||||
&gp104_disp_core_oclass,
|
||||
&gp104_disp_base_oclass,
|
||||
&gp104_disp_ovly_oclass,
|
||||
&gp102_disp_core_oclass,
|
||||
&gp102_disp_base_oclass,
|
||||
&gp102_disp_ovly_oclass,
|
||||
},
|
||||
.pioc = {
|
||||
&gk104_disp_oimm_oclass,
|
||||
&gk104_disp_curs_oclass,
|
||||
&gp102_disp_oimm_oclass,
|
||||
&gp102_disp_curs_oclass,
|
||||
},
|
||||
};
|
||||
|
||||
static int
|
||||
gp104_disp_root_new(struct nvkm_disp *disp, const struct nvkm_oclass *oclass,
|
||||
gp102_disp_root_new(struct nvkm_disp *disp, const struct nvkm_oclass *oclass,
|
||||
void *data, u32 size, struct nvkm_object **pobject)
|
||||
{
|
||||
return nv50_disp_root_new_(&gp104_disp_root, disp, oclass,
|
||||
return nv50_disp_root_new_(&gp102_disp_root, disp, oclass,
|
||||
data, size, pobject);
|
||||
}
|
||||
|
||||
const struct nvkm_disp_oclass
|
||||
gp104_disp_root_oclass = {
|
||||
.base.oclass = GP104_DISP,
|
||||
gp102_disp_root_oclass = {
|
||||
.base.oclass = GP102_DISP,
|
||||
.base.minver = -1,
|
||||
.base.maxver = -1,
|
||||
.ctor = gp104_disp_root_new,
|
||||
.ctor = gp102_disp_root_new,
|
||||
};
|
|
@ -250,8 +250,8 @@ nv50_disp_root_pioc_new_(const struct nvkm_oclass *oclass,
|
|||
{
|
||||
const struct nv50_disp_pioc_oclass *sclass = oclass->priv;
|
||||
struct nv50_disp_root *root = nv50_disp_root(oclass->parent);
|
||||
return sclass->ctor(sclass->func, sclass->mthd, root, sclass->chid,
|
||||
oclass, data, size, pobject);
|
||||
return sclass->ctor(sclass->func, sclass->mthd, root, sclass->chid.ctrl,
|
||||
sclass->chid.user, oclass, data, size, pobject);
|
||||
}
|
||||
|
||||
static int
|
||||
|
|
|
@ -41,5 +41,5 @@ extern const struct nvkm_disp_oclass gk110_disp_root_oclass;
|
|||
extern const struct nvkm_disp_oclass gm107_disp_root_oclass;
|
||||
extern const struct nvkm_disp_oclass gm200_disp_root_oclass;
|
||||
extern const struct nvkm_disp_oclass gp100_disp_root_oclass;
|
||||
extern const struct nvkm_disp_oclass gp104_disp_root_oclass;
|
||||
extern const struct nvkm_disp_oclass gp102_disp_root_oclass;
|
||||
#endif
|
||||
|
|
|
@ -26,6 +26,7 @@
|
|||
#include <subdev/bios.h>
|
||||
#include <subdev/bios/bit.h>
|
||||
#include <subdev/bios/pmu.h>
|
||||
#include <subdev/timer.h>
|
||||
|
||||
static void
|
||||
pmu_code(struct nv50_devinit *init, u32 pmu, u32 img, u32 len, bool sec)
|
||||
|
@ -123,15 +124,6 @@ gm200_devinit_post(struct nvkm_devinit *base, bool post)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* reset PMU and load init table parser ucode */
|
||||
if (post) {
|
||||
nvkm_mask(device, 0x000200, 0x00002000, 0x00000000);
|
||||
nvkm_mask(device, 0x000200, 0x00002000, 0x00002000);
|
||||
nvkm_rd32(device, 0x000200);
|
||||
while (nvkm_rd32(device, 0x10a10c) & 0x00000006) {
|
||||
}
|
||||
}
|
||||
|
||||
ret = pmu_load(init, 0x04, post, &exec, &args);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -156,8 +148,11 @@ gm200_devinit_post(struct nvkm_devinit *base, bool post)
|
|||
if (post) {
|
||||
nvkm_wr32(device, 0x10a040, 0x00005000);
|
||||
pmu_exec(init, exec);
|
||||
while (!(nvkm_rd32(device, 0x10a040) & 0x00002000)) {
|
||||
}
|
||||
if (nvkm_msec(device, 2000,
|
||||
if (nvkm_rd32(device, 0x10a040) & 0x00002000)
|
||||
break;
|
||||
) < 0)
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
/* load and execute some other ucode image (bios therm?) */
|
||||
|
|
|
@ -26,7 +26,7 @@ nvkm-y += nvkm/subdev/fb/gm107.o
|
|||
nvkm-y += nvkm/subdev/fb/gm200.o
|
||||
nvkm-y += nvkm/subdev/fb/gm20b.o
|
||||
nvkm-y += nvkm/subdev/fb/gp100.o
|
||||
nvkm-y += nvkm/subdev/fb/gp104.o
|
||||
nvkm-y += nvkm/subdev/fb/gp102.o
|
||||
|
||||
nvkm-y += nvkm/subdev/fb/ram.o
|
||||
nvkm-y += nvkm/subdev/fb/ramnv04.o
|
||||
|
|
|
@ -27,7 +27,7 @@
|
|||
#include <core/memory.h>
|
||||
|
||||
static const struct nvkm_fb_func
|
||||
gp104_fb = {
|
||||
gp102_fb = {
|
||||
.dtor = gf100_fb_dtor,
|
||||
.oneinit = gf100_fb_oneinit,
|
||||
.init = gp100_fb_init,
|
||||
|
@ -37,7 +37,7 @@ gp104_fb = {
|
|||
};
|
||||
|
||||
int
|
||||
gp104_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
|
||||
gp102_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
|
||||
{
|
||||
return gf100_fb_new_(&gp104_fb, device, index, pfb);
|
||||
return gf100_fb_new_(&gp102_fb, device, index, pfb);
|
||||
}
|
|
@ -8,3 +8,5 @@ nvkm-y += nvkm/subdev/pmu/gk110.o
|
|||
nvkm-y += nvkm/subdev/pmu/gk208.o
|
||||
nvkm-y += nvkm/subdev/pmu/gk20a.o
|
||||
nvkm-y += nvkm/subdev/pmu/gm107.o
|
||||
nvkm-y += nvkm/subdev/pmu/gp100.o
|
||||
nvkm-y += nvkm/subdev/pmu/gp102.o
|
||||
|
|
|
@ -32,225 +32,85 @@ nvkm_pmu_pgob(struct nvkm_pmu *pmu, bool enable)
|
|||
pmu->func->pgob(pmu, enable);
|
||||
}
|
||||
|
||||
static void
|
||||
nvkm_pmu_recv(struct work_struct *work)
|
||||
{
|
||||
struct nvkm_pmu *pmu = container_of(work, typeof(*pmu), recv.work);
|
||||
return pmu->func->recv(pmu);
|
||||
}
|
||||
|
||||
int
|
||||
nvkm_pmu_send(struct nvkm_pmu *pmu, u32 reply[2],
|
||||
u32 process, u32 message, u32 data0, u32 data1)
|
||||
{
|
||||
struct nvkm_subdev *subdev = &pmu->subdev;
|
||||
struct nvkm_device *device = subdev->device;
|
||||
u32 addr;
|
||||
|
||||
mutex_lock(&subdev->mutex);
|
||||
/* wait for a free slot in the fifo */
|
||||
addr = nvkm_rd32(device, 0x10a4a0);
|
||||
if (nvkm_msec(device, 2000,
|
||||
u32 tmp = nvkm_rd32(device, 0x10a4b0);
|
||||
if (tmp != (addr ^ 8))
|
||||
break;
|
||||
) < 0) {
|
||||
mutex_unlock(&subdev->mutex);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
/* we currently only support a single process at a time waiting
|
||||
* on a synchronous reply, take the PMU mutex and tell the
|
||||
* receive handler what we're waiting for
|
||||
*/
|
||||
if (reply) {
|
||||
pmu->recv.message = message;
|
||||
pmu->recv.process = process;
|
||||
}
|
||||
|
||||
/* acquire data segment access */
|
||||
do {
|
||||
nvkm_wr32(device, 0x10a580, 0x00000001);
|
||||
} while (nvkm_rd32(device, 0x10a580) != 0x00000001);
|
||||
|
||||
/* write the packet */
|
||||
nvkm_wr32(device, 0x10a1c0, 0x01000000 | (((addr & 0x07) << 4) +
|
||||
pmu->send.base));
|
||||
nvkm_wr32(device, 0x10a1c4, process);
|
||||
nvkm_wr32(device, 0x10a1c4, message);
|
||||
nvkm_wr32(device, 0x10a1c4, data0);
|
||||
nvkm_wr32(device, 0x10a1c4, data1);
|
||||
nvkm_wr32(device, 0x10a4a0, (addr + 1) & 0x0f);
|
||||
|
||||
/* release data segment access */
|
||||
nvkm_wr32(device, 0x10a580, 0x00000000);
|
||||
|
||||
/* wait for reply, if requested */
|
||||
if (reply) {
|
||||
wait_event(pmu->recv.wait, (pmu->recv.process == 0));
|
||||
reply[0] = pmu->recv.data[0];
|
||||
reply[1] = pmu->recv.data[1];
|
||||
}
|
||||
|
||||
mutex_unlock(&subdev->mutex);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
nvkm_pmu_recv(struct work_struct *work)
|
||||
{
|
||||
struct nvkm_pmu *pmu = container_of(work, struct nvkm_pmu, recv.work);
|
||||
struct nvkm_subdev *subdev = &pmu->subdev;
|
||||
struct nvkm_device *device = subdev->device;
|
||||
u32 process, message, data0, data1;
|
||||
|
||||
/* nothing to do if GET == PUT */
|
||||
u32 addr = nvkm_rd32(device, 0x10a4cc);
|
||||
if (addr == nvkm_rd32(device, 0x10a4c8))
|
||||
return;
|
||||
|
||||
/* acquire data segment access */
|
||||
do {
|
||||
nvkm_wr32(device, 0x10a580, 0x00000002);
|
||||
} while (nvkm_rd32(device, 0x10a580) != 0x00000002);
|
||||
|
||||
/* read the packet */
|
||||
nvkm_wr32(device, 0x10a1c0, 0x02000000 | (((addr & 0x07) << 4) +
|
||||
pmu->recv.base));
|
||||
process = nvkm_rd32(device, 0x10a1c4);
|
||||
message = nvkm_rd32(device, 0x10a1c4);
|
||||
data0 = nvkm_rd32(device, 0x10a1c4);
|
||||
data1 = nvkm_rd32(device, 0x10a1c4);
|
||||
nvkm_wr32(device, 0x10a4cc, (addr + 1) & 0x0f);
|
||||
|
||||
/* release data segment access */
|
||||
nvkm_wr32(device, 0x10a580, 0x00000000);
|
||||
|
||||
/* wake process if it's waiting on a synchronous reply */
|
||||
if (pmu->recv.process) {
|
||||
if (process == pmu->recv.process &&
|
||||
message == pmu->recv.message) {
|
||||
pmu->recv.data[0] = data0;
|
||||
pmu->recv.data[1] = data1;
|
||||
pmu->recv.process = 0;
|
||||
wake_up(&pmu->recv.wait);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
/* right now there's no other expected responses from the engine,
|
||||
* so assume that any unexpected message is an error.
|
||||
*/
|
||||
nvkm_warn(subdev, "%c%c%c%c %08x %08x %08x %08x\n",
|
||||
(char)((process & 0x000000ff) >> 0),
|
||||
(char)((process & 0x0000ff00) >> 8),
|
||||
(char)((process & 0x00ff0000) >> 16),
|
||||
(char)((process & 0xff000000) >> 24),
|
||||
process, message, data0, data1);
|
||||
if (!pmu || !pmu->func->send)
|
||||
return -ENODEV;
|
||||
return pmu->func->send(pmu, reply, process, message, data0, data1);
|
||||
}
|
||||
|
||||
static void
|
||||
nvkm_pmu_intr(struct nvkm_subdev *subdev)
|
||||
{
|
||||
struct nvkm_pmu *pmu = nvkm_pmu(subdev);
|
||||
struct nvkm_device *device = pmu->subdev.device;
|
||||
u32 disp = nvkm_rd32(device, 0x10a01c);
|
||||
u32 intr = nvkm_rd32(device, 0x10a008) & disp & ~(disp >> 16);
|
||||
|
||||
if (intr & 0x00000020) {
|
||||
u32 stat = nvkm_rd32(device, 0x10a16c);
|
||||
if (stat & 0x80000000) {
|
||||
nvkm_error(subdev, "UAS fault at %06x addr %08x\n",
|
||||
stat & 0x00ffffff,
|
||||
nvkm_rd32(device, 0x10a168));
|
||||
nvkm_wr32(device, 0x10a16c, 0x00000000);
|
||||
intr &= ~0x00000020;
|
||||
}
|
||||
}
|
||||
|
||||
if (intr & 0x00000040) {
|
||||
schedule_work(&pmu->recv.work);
|
||||
nvkm_wr32(device, 0x10a004, 0x00000040);
|
||||
intr &= ~0x00000040;
|
||||
}
|
||||
|
||||
if (intr & 0x00000080) {
|
||||
nvkm_info(subdev, "wr32 %06x %08x\n",
|
||||
nvkm_rd32(device, 0x10a7a0),
|
||||
nvkm_rd32(device, 0x10a7a4));
|
||||
nvkm_wr32(device, 0x10a004, 0x00000080);
|
||||
intr &= ~0x00000080;
|
||||
}
|
||||
|
||||
if (intr) {
|
||||
nvkm_error(subdev, "intr %08x\n", intr);
|
||||
nvkm_wr32(device, 0x10a004, intr);
|
||||
}
|
||||
if (!pmu->func->intr)
|
||||
return;
|
||||
pmu->func->intr(pmu);
|
||||
}
|
||||
|
||||
static int
|
||||
nvkm_pmu_fini(struct nvkm_subdev *subdev, bool suspend)
|
||||
{
|
||||
struct nvkm_pmu *pmu = nvkm_pmu(subdev);
|
||||
struct nvkm_device *device = pmu->subdev.device;
|
||||
|
||||
nvkm_wr32(device, 0x10a014, 0x00000060);
|
||||
if (pmu->func->fini)
|
||||
pmu->func->fini(pmu);
|
||||
|
||||
flush_work(&pmu->recv.work);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
nvkm_pmu_reset(struct nvkm_pmu *pmu)
|
||||
{
|
||||
struct nvkm_device *device = pmu->subdev.device;
|
||||
|
||||
if (!(nvkm_rd32(device, 0x000200) & 0x00002000))
|
||||
return 0;
|
||||
|
||||
/* Inhibit interrupts, and wait for idle. */
|
||||
nvkm_wr32(device, 0x10a014, 0x0000ffff);
|
||||
nvkm_msec(device, 2000,
|
||||
if (!nvkm_rd32(device, 0x10a04c))
|
||||
break;
|
||||
);
|
||||
|
||||
/* Reset. */
|
||||
pmu->func->reset(pmu);
|
||||
|
||||
/* Wait for IMEM/DMEM scrubbing to be complete. */
|
||||
nvkm_msec(device, 2000,
|
||||
if (!(nvkm_rd32(device, 0x10a10c) & 0x00000006))
|
||||
break;
|
||||
);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
nvkm_pmu_preinit(struct nvkm_subdev *subdev)
|
||||
{
|
||||
struct nvkm_pmu *pmu = nvkm_pmu(subdev);
|
||||
return nvkm_pmu_reset(pmu);
|
||||
}
|
||||
|
||||
static int
|
||||
nvkm_pmu_init(struct nvkm_subdev *subdev)
|
||||
{
|
||||
struct nvkm_pmu *pmu = nvkm_pmu(subdev);
|
||||
struct nvkm_device *device = pmu->subdev.device;
|
||||
int i;
|
||||
|
||||
/* prevent previous ucode from running, wait for idle, reset */
|
||||
nvkm_wr32(device, 0x10a014, 0x0000ffff); /* INTR_EN_CLR = ALL */
|
||||
nvkm_msec(device, 2000,
|
||||
if (!nvkm_rd32(device, 0x10a04c))
|
||||
break;
|
||||
);
|
||||
nvkm_mask(device, 0x000200, 0x00002000, 0x00000000);
|
||||
nvkm_mask(device, 0x000200, 0x00002000, 0x00002000);
|
||||
nvkm_rd32(device, 0x000200);
|
||||
nvkm_msec(device, 2000,
|
||||
if (!(nvkm_rd32(device, 0x10a10c) & 0x00000006))
|
||||
break;
|
||||
);
|
||||
|
||||
/* upload data segment */
|
||||
nvkm_wr32(device, 0x10a1c0, 0x01000000);
|
||||
for (i = 0; i < pmu->func->data.size / 4; i++)
|
||||
nvkm_wr32(device, 0x10a1c4, pmu->func->data.data[i]);
|
||||
|
||||
/* upload code segment */
|
||||
nvkm_wr32(device, 0x10a180, 0x01000000);
|
||||
for (i = 0; i < pmu->func->code.size / 4; i++) {
|
||||
if ((i & 0x3f) == 0)
|
||||
nvkm_wr32(device, 0x10a188, i >> 6);
|
||||
nvkm_wr32(device, 0x10a184, pmu->func->code.data[i]);
|
||||
}
|
||||
|
||||
/* start it running */
|
||||
nvkm_wr32(device, 0x10a10c, 0x00000000);
|
||||
nvkm_wr32(device, 0x10a104, 0x00000000);
|
||||
nvkm_wr32(device, 0x10a100, 0x00000002);
|
||||
|
||||
/* wait for valid host->pmu ring configuration */
|
||||
if (nvkm_msec(device, 2000,
|
||||
if (nvkm_rd32(device, 0x10a4d0))
|
||||
break;
|
||||
) < 0)
|
||||
return -EBUSY;
|
||||
pmu->send.base = nvkm_rd32(device, 0x10a4d0) & 0x0000ffff;
|
||||
pmu->send.size = nvkm_rd32(device, 0x10a4d0) >> 16;
|
||||
|
||||
/* wait for valid pmu->host ring configuration */
|
||||
if (nvkm_msec(device, 2000,
|
||||
if (nvkm_rd32(device, 0x10a4dc))
|
||||
break;
|
||||
) < 0)
|
||||
return -EBUSY;
|
||||
pmu->recv.base = nvkm_rd32(device, 0x10a4dc) & 0x0000ffff;
|
||||
pmu->recv.size = nvkm_rd32(device, 0x10a4dc) >> 16;
|
||||
|
||||
nvkm_wr32(device, 0x10a010, 0x000000e0);
|
||||
return 0;
|
||||
int ret = nvkm_pmu_reset(pmu);
|
||||
if (ret == 0 && pmu->func->init)
|
||||
ret = pmu->func->init(pmu);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void *
|
||||
|
@ -262,6 +122,7 @@ nvkm_pmu_dtor(struct nvkm_subdev *subdev)
|
|||
static const struct nvkm_subdev_func
|
||||
nvkm_pmu = {
|
||||
.dtor = nvkm_pmu_dtor,
|
||||
.preinit = nvkm_pmu_preinit,
|
||||
.init = nvkm_pmu_init,
|
||||
.fini = nvkm_pmu_fini,
|
||||
.intr = nvkm_pmu_intr,
|
||||
|
|
|
@ -30,6 +30,12 @@ gf100_pmu = {
|
|||
.code.size = sizeof(gf100_pmu_code),
|
||||
.data.data = gf100_pmu_data,
|
||||
.data.size = sizeof(gf100_pmu_data),
|
||||
.reset = gt215_pmu_reset,
|
||||
.init = gt215_pmu_init,
|
||||
.fini = gt215_pmu_fini,
|
||||
.intr = gt215_pmu_intr,
|
||||
.send = gt215_pmu_send,
|
||||
.recv = gt215_pmu_recv,
|
||||
};
|
||||
|
||||
int
|
||||
|
|
|
@ -30,6 +30,12 @@ gf119_pmu = {
|
|||
.code.size = sizeof(gf119_pmu_code),
|
||||
.data.data = gf119_pmu_data,
|
||||
.data.size = sizeof(gf119_pmu_data),
|
||||
.reset = gt215_pmu_reset,
|
||||
.init = gt215_pmu_init,
|
||||
.fini = gt215_pmu_fini,
|
||||
.intr = gt215_pmu_intr,
|
||||
.send = gt215_pmu_send,
|
||||
.recv = gt215_pmu_recv,
|
||||
};
|
||||
|
||||
int
|
||||
|
|
|
@ -109,6 +109,12 @@ gk104_pmu = {
|
|||
.code.size = sizeof(gk104_pmu_code),
|
||||
.data.data = gk104_pmu_data,
|
||||
.data.size = sizeof(gk104_pmu_data),
|
||||
.reset = gt215_pmu_reset,
|
||||
.init = gt215_pmu_init,
|
||||
.fini = gt215_pmu_fini,
|
||||
.intr = gt215_pmu_intr,
|
||||
.send = gt215_pmu_send,
|
||||
.recv = gt215_pmu_recv,
|
||||
.pgob = gk104_pmu_pgob,
|
||||
};
|
||||
|
||||
|
|
|
@ -88,6 +88,12 @@ gk110_pmu = {
|
|||
.code.size = sizeof(gk110_pmu_code),
|
||||
.data.data = gk110_pmu_data,
|
||||
.data.size = sizeof(gk110_pmu_data),
|
||||
.reset = gt215_pmu_reset,
|
||||
.init = gt215_pmu_init,
|
||||
.fini = gt215_pmu_fini,
|
||||
.intr = gt215_pmu_intr,
|
||||
.send = gt215_pmu_send,
|
||||
.recv = gt215_pmu_recv,
|
||||
.pgob = gk110_pmu_pgob,
|
||||
};
|
||||
|
||||
|
|
|
@ -30,6 +30,12 @@ gk208_pmu = {
|
|||
.code.size = sizeof(gk208_pmu_code),
|
||||
.data.data = gk208_pmu_data,
|
||||
.data.size = sizeof(gk208_pmu_data),
|
||||
.reset = gt215_pmu_reset,
|
||||
.init = gt215_pmu_init,
|
||||
.fini = gt215_pmu_fini,
|
||||
.intr = gt215_pmu_intr,
|
||||
.send = gt215_pmu_send,
|
||||
.recv = gt215_pmu_recv,
|
||||
.pgob = gk110_pmu_pgob,
|
||||
};
|
||||
|
||||
|
|
|
@ -32,6 +32,12 @@ gm107_pmu = {
|
|||
.code.size = sizeof(gm107_pmu_code),
|
||||
.data.data = gm107_pmu_data,
|
||||
.data.size = sizeof(gm107_pmu_data),
|
||||
.reset = gt215_pmu_reset,
|
||||
.init = gt215_pmu_init,
|
||||
.fini = gt215_pmu_fini,
|
||||
.intr = gt215_pmu_intr,
|
||||
.send = gt215_pmu_send,
|
||||
.recv = gt215_pmu_recv,
|
||||
};
|
||||
|
||||
int
|
||||
|
|
|
@ -0,0 +1,35 @@
|
|||
/*
|
||||
* Copyright 2016 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Ben Skeggs <bskeggs@redhat.com>
|
||||
*/
|
||||
#include "priv.h"
|
||||
|
||||
static const struct nvkm_pmu_func
|
||||
gp100_pmu = {
|
||||
.reset = gt215_pmu_reset,
|
||||
};
|
||||
|
||||
int
|
||||
gp100_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
|
||||
{
|
||||
return nvkm_pmu_new_(&gp100_pmu, device, index, ppmu);
|
||||
}
|
|
@ -0,0 +1,43 @@
|
|||
/*
|
||||
* Copyright 2016 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Ben Skeggs <bskeggs@redhat.com>
|
||||
*/
|
||||
#include "priv.h"
|
||||
|
||||
static void
|
||||
gp102_pmu_reset(struct nvkm_pmu *pmu)
|
||||
{
|
||||
struct nvkm_device *device = pmu->subdev.device;
|
||||
nvkm_mask(device, 0x10a3c0, 0x00000001, 0x00000001);
|
||||
nvkm_mask(device, 0x10a3c0, 0x00000001, 0x00000000);
|
||||
}
|
||||
|
||||
static const struct nvkm_pmu_func
|
||||
gp102_pmu = {
|
||||
.reset = gp102_pmu_reset,
|
||||
};
|
||||
|
||||
int
|
||||
gp102_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
|
||||
{
|
||||
return nvkm_pmu_new_(&gp102_pmu, device, index, ppmu);
|
||||
}
|
|
@ -24,12 +24,229 @@
|
|||
#include "priv.h"
|
||||
#include "fuc/gt215.fuc3.h"
|
||||
|
||||
#include <subdev/timer.h>
|
||||
|
||||
int
|
||||
gt215_pmu_send(struct nvkm_pmu *pmu, u32 reply[2],
|
||||
u32 process, u32 message, u32 data0, u32 data1)
|
||||
{
|
||||
struct nvkm_subdev *subdev = &pmu->subdev;
|
||||
struct nvkm_device *device = subdev->device;
|
||||
u32 addr;
|
||||
|
||||
mutex_lock(&subdev->mutex);
|
||||
/* wait for a free slot in the fifo */
|
||||
addr = nvkm_rd32(device, 0x10a4a0);
|
||||
if (nvkm_msec(device, 2000,
|
||||
u32 tmp = nvkm_rd32(device, 0x10a4b0);
|
||||
if (tmp != (addr ^ 8))
|
||||
break;
|
||||
) < 0) {
|
||||
mutex_unlock(&subdev->mutex);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
/* we currently only support a single process at a time waiting
|
||||
* on a synchronous reply, take the PMU mutex and tell the
|
||||
* receive handler what we're waiting for
|
||||
*/
|
||||
if (reply) {
|
||||
pmu->recv.message = message;
|
||||
pmu->recv.process = process;
|
||||
}
|
||||
|
||||
/* acquire data segment access */
|
||||
do {
|
||||
nvkm_wr32(device, 0x10a580, 0x00000001);
|
||||
} while (nvkm_rd32(device, 0x10a580) != 0x00000001);
|
||||
|
||||
/* write the packet */
|
||||
nvkm_wr32(device, 0x10a1c0, 0x01000000 | (((addr & 0x07) << 4) +
|
||||
pmu->send.base));
|
||||
nvkm_wr32(device, 0x10a1c4, process);
|
||||
nvkm_wr32(device, 0x10a1c4, message);
|
||||
nvkm_wr32(device, 0x10a1c4, data0);
|
||||
nvkm_wr32(device, 0x10a1c4, data1);
|
||||
nvkm_wr32(device, 0x10a4a0, (addr + 1) & 0x0f);
|
||||
|
||||
/* release data segment access */
|
||||
nvkm_wr32(device, 0x10a580, 0x00000000);
|
||||
|
||||
/* wait for reply, if requested */
|
||||
if (reply) {
|
||||
wait_event(pmu->recv.wait, (pmu->recv.process == 0));
|
||||
reply[0] = pmu->recv.data[0];
|
||||
reply[1] = pmu->recv.data[1];
|
||||
}
|
||||
|
||||
mutex_unlock(&subdev->mutex);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
gt215_pmu_recv(struct nvkm_pmu *pmu)
|
||||
{
|
||||
struct nvkm_subdev *subdev = &pmu->subdev;
|
||||
struct nvkm_device *device = subdev->device;
|
||||
u32 process, message, data0, data1;
|
||||
|
||||
/* nothing to do if GET == PUT */
|
||||
u32 addr = nvkm_rd32(device, 0x10a4cc);
|
||||
if (addr == nvkm_rd32(device, 0x10a4c8))
|
||||
return;
|
||||
|
||||
/* acquire data segment access */
|
||||
do {
|
||||
nvkm_wr32(device, 0x10a580, 0x00000002);
|
||||
} while (nvkm_rd32(device, 0x10a580) != 0x00000002);
|
||||
|
||||
/* read the packet */
|
||||
nvkm_wr32(device, 0x10a1c0, 0x02000000 | (((addr & 0x07) << 4) +
|
||||
pmu->recv.base));
|
||||
process = nvkm_rd32(device, 0x10a1c4);
|
||||
message = nvkm_rd32(device, 0x10a1c4);
|
||||
data0 = nvkm_rd32(device, 0x10a1c4);
|
||||
data1 = nvkm_rd32(device, 0x10a1c4);
|
||||
nvkm_wr32(device, 0x10a4cc, (addr + 1) & 0x0f);
|
||||
|
||||
/* release data segment access */
|
||||
nvkm_wr32(device, 0x10a580, 0x00000000);
|
||||
|
||||
/* wake process if it's waiting on a synchronous reply */
|
||||
if (pmu->recv.process) {
|
||||
if (process == pmu->recv.process &&
|
||||
message == pmu->recv.message) {
|
||||
pmu->recv.data[0] = data0;
|
||||
pmu->recv.data[1] = data1;
|
||||
pmu->recv.process = 0;
|
||||
wake_up(&pmu->recv.wait);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
/* right now there's no other expected responses from the engine,
|
||||
* so assume that any unexpected message is an error.
|
||||
*/
|
||||
nvkm_warn(subdev, "%c%c%c%c %08x %08x %08x %08x\n",
|
||||
(char)((process & 0x000000ff) >> 0),
|
||||
(char)((process & 0x0000ff00) >> 8),
|
||||
(char)((process & 0x00ff0000) >> 16),
|
||||
(char)((process & 0xff000000) >> 24),
|
||||
process, message, data0, data1);
|
||||
}
|
||||
|
||||
void
|
||||
gt215_pmu_intr(struct nvkm_pmu *pmu)
|
||||
{
|
||||
struct nvkm_subdev *subdev = &pmu->subdev;
|
||||
struct nvkm_device *device = subdev->device;
|
||||
u32 disp = nvkm_rd32(device, 0x10a01c);
|
||||
u32 intr = nvkm_rd32(device, 0x10a008) & disp & ~(disp >> 16);
|
||||
|
||||
if (intr & 0x00000020) {
|
||||
u32 stat = nvkm_rd32(device, 0x10a16c);
|
||||
if (stat & 0x80000000) {
|
||||
nvkm_error(subdev, "UAS fault at %06x addr %08x\n",
|
||||
stat & 0x00ffffff,
|
||||
nvkm_rd32(device, 0x10a168));
|
||||
nvkm_wr32(device, 0x10a16c, 0x00000000);
|
||||
intr &= ~0x00000020;
|
||||
}
|
||||
}
|
||||
|
||||
if (intr & 0x00000040) {
|
||||
schedule_work(&pmu->recv.work);
|
||||
nvkm_wr32(device, 0x10a004, 0x00000040);
|
||||
intr &= ~0x00000040;
|
||||
}
|
||||
|
||||
if (intr & 0x00000080) {
|
||||
nvkm_info(subdev, "wr32 %06x %08x\n",
|
||||
nvkm_rd32(device, 0x10a7a0),
|
||||
nvkm_rd32(device, 0x10a7a4));
|
||||
nvkm_wr32(device, 0x10a004, 0x00000080);
|
||||
intr &= ~0x00000080;
|
||||
}
|
||||
|
||||
if (intr) {
|
||||
nvkm_error(subdev, "intr %08x\n", intr);
|
||||
nvkm_wr32(device, 0x10a004, intr);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
gt215_pmu_fini(struct nvkm_pmu *pmu)
|
||||
{
|
||||
nvkm_wr32(pmu->subdev.device, 0x10a014, 0x00000060);
|
||||
}
|
||||
|
||||
void
|
||||
gt215_pmu_reset(struct nvkm_pmu *pmu)
|
||||
{
|
||||
struct nvkm_device *device = pmu->subdev.device;
|
||||
nvkm_mask(device, 0x000200, 0x00002000, 0x00000000);
|
||||
nvkm_mask(device, 0x000200, 0x00002000, 0x00002000);
|
||||
nvkm_rd32(device, 0x000200);
|
||||
}
|
||||
|
||||
int
|
||||
gt215_pmu_init(struct nvkm_pmu *pmu)
|
||||
{
|
||||
struct nvkm_device *device = pmu->subdev.device;
|
||||
int i;
|
||||
|
||||
/* upload data segment */
|
||||
nvkm_wr32(device, 0x10a1c0, 0x01000000);
|
||||
for (i = 0; i < pmu->func->data.size / 4; i++)
|
||||
nvkm_wr32(device, 0x10a1c4, pmu->func->data.data[i]);
|
||||
|
||||
/* upload code segment */
|
||||
nvkm_wr32(device, 0x10a180, 0x01000000);
|
||||
for (i = 0; i < pmu->func->code.size / 4; i++) {
|
||||
if ((i & 0x3f) == 0)
|
||||
nvkm_wr32(device, 0x10a188, i >> 6);
|
||||
nvkm_wr32(device, 0x10a184, pmu->func->code.data[i]);
|
||||
}
|
||||
|
||||
/* start it running */
|
||||
nvkm_wr32(device, 0x10a10c, 0x00000000);
|
||||
nvkm_wr32(device, 0x10a104, 0x00000000);
|
||||
nvkm_wr32(device, 0x10a100, 0x00000002);
|
||||
|
||||
/* wait for valid host->pmu ring configuration */
|
||||
if (nvkm_msec(device, 2000,
|
||||
if (nvkm_rd32(device, 0x10a4d0))
|
||||
break;
|
||||
) < 0)
|
||||
return -EBUSY;
|
||||
pmu->send.base = nvkm_rd32(device, 0x10a4d0) & 0x0000ffff;
|
||||
pmu->send.size = nvkm_rd32(device, 0x10a4d0) >> 16;
|
||||
|
||||
/* wait for valid pmu->host ring configuration */
|
||||
if (nvkm_msec(device, 2000,
|
||||
if (nvkm_rd32(device, 0x10a4dc))
|
||||
break;
|
||||
) < 0)
|
||||
return -EBUSY;
|
||||
pmu->recv.base = nvkm_rd32(device, 0x10a4dc) & 0x0000ffff;
|
||||
pmu->recv.size = nvkm_rd32(device, 0x10a4dc) >> 16;
|
||||
|
||||
nvkm_wr32(device, 0x10a010, 0x000000e0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct nvkm_pmu_func
|
||||
gt215_pmu = {
|
||||
.code.data = gt215_pmu_code,
|
||||
.code.size = sizeof(gt215_pmu_code),
|
||||
.data.data = gt215_pmu_data,
|
||||
.data.size = sizeof(gt215_pmu_data),
|
||||
.reset = gt215_pmu_reset,
|
||||
.init = gt215_pmu_init,
|
||||
.fini = gt215_pmu_fini,
|
||||
.intr = gt215_pmu_intr,
|
||||
.send = gt215_pmu_send,
|
||||
.recv = gt215_pmu_recv,
|
||||
};
|
||||
|
||||
int
|
||||
|
|
|
@ -18,8 +18,22 @@ struct nvkm_pmu_func {
|
|||
u32 size;
|
||||
} data;
|
||||
|
||||
void (*reset)(struct nvkm_pmu *);
|
||||
int (*init)(struct nvkm_pmu *);
|
||||
void (*fini)(struct nvkm_pmu *);
|
||||
void (*intr)(struct nvkm_pmu *);
|
||||
int (*send)(struct nvkm_pmu *, u32 reply[2], u32 process,
|
||||
u32 message, u32 data0, u32 data1);
|
||||
void (*recv)(struct nvkm_pmu *);
|
||||
void (*pgob)(struct nvkm_pmu *, bool);
|
||||
};
|
||||
|
||||
void gt215_pmu_reset(struct nvkm_pmu *);
|
||||
int gt215_pmu_init(struct nvkm_pmu *);
|
||||
void gt215_pmu_fini(struct nvkm_pmu *);
|
||||
void gt215_pmu_intr(struct nvkm_pmu *);
|
||||
void gt215_pmu_recv(struct nvkm_pmu *);
|
||||
int gt215_pmu_send(struct nvkm_pmu *, u32[2], u32, u32, u32, u32);
|
||||
|
||||
void gk110_pmu_pgob(struct nvkm_pmu *, bool);
|
||||
#endif
|
||||
|
|
Loading…
Reference in New Issue