drm/nouveau/device: remove pci/platform_device from common struct
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
This commit is contained in:
parent
c7af0ff0e8
commit
26c9e8effe
|
@ -47,13 +47,21 @@ enum nvkm_devidx {
|
|||
NVKM_ENGINE_SEC,
|
||||
NVKM_ENGINE_MSPDEC,
|
||||
|
||||
NVKM_SUBDEV_NR,
|
||||
NVKM_SUBDEV_NR
|
||||
};
|
||||
|
||||
enum nvkm_device_type {
|
||||
NVKM_DEVICE_PCI,
|
||||
NVKM_DEVICE_AGP,
|
||||
NVKM_DEVICE_PCIE,
|
||||
NVKM_DEVICE_TEGRA,
|
||||
};
|
||||
|
||||
struct nvkm_device {
|
||||
const struct nvkm_device_func *func;
|
||||
const struct nvkm_device_quirk *quirk;
|
||||
struct device *dev;
|
||||
enum nvkm_device_type type;
|
||||
u64 handle;
|
||||
const char *name;
|
||||
const char *cfgopt;
|
||||
|
@ -63,9 +71,6 @@ struct nvkm_device {
|
|||
struct mutex mutex;
|
||||
int refcount;
|
||||
|
||||
struct pci_dev *pdev;
|
||||
struct platform_device *platformdev;
|
||||
|
||||
void __iomem *pri;
|
||||
|
||||
struct nvkm_event event;
|
||||
|
@ -150,6 +155,7 @@ struct nvkm_device_func {
|
|||
void (*fini)(struct nvkm_device *, bool suspend);
|
||||
resource_size_t (*resource_addr)(struct nvkm_device *, unsigned bar);
|
||||
resource_size_t (*resource_size)(struct nvkm_device *, unsigned bar);
|
||||
bool cpu_coherent;
|
||||
};
|
||||
|
||||
struct nvkm_device_quirk {
|
||||
|
@ -220,32 +226,6 @@ int nvkm_device_list(u64 *name, int size);
|
|||
_temp; \
|
||||
})
|
||||
|
||||
static inline bool
|
||||
nv_device_is_pci(struct nvkm_device *device)
|
||||
{
|
||||
return device->pdev != NULL;
|
||||
}
|
||||
|
||||
static inline bool
|
||||
nv_device_is_cpu_coherent(struct nvkm_device *device)
|
||||
{
|
||||
return (!IS_ENABLED(CONFIG_ARM) && nv_device_is_pci(device));
|
||||
}
|
||||
|
||||
static inline struct device *
|
||||
nv_device_base(struct nvkm_device *device)
|
||||
{
|
||||
return nv_device_is_pci(device) ? &device->pdev->dev :
|
||||
&device->platformdev->dev;
|
||||
}
|
||||
|
||||
struct platform_device;
|
||||
|
||||
enum nv_bus_type {
|
||||
NVKM_BUS_PCI,
|
||||
NVKM_BUS_PLATFORM,
|
||||
};
|
||||
|
||||
void nvkm_device_del(struct nvkm_device **);
|
||||
|
||||
struct nvkm_device_oclass {
|
||||
|
|
|
@ -174,19 +174,19 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
|
|||
getparam->value = device->info.chipset;
|
||||
break;
|
||||
case NOUVEAU_GETPARAM_PCI_VENDOR:
|
||||
if (nv_device_is_pci(nvxx_device(device)))
|
||||
if (nvxx_device(device)->func->pci)
|
||||
getparam->value = dev->pdev->vendor;
|
||||
else
|
||||
getparam->value = 0;
|
||||
break;
|
||||
case NOUVEAU_GETPARAM_PCI_DEVICE:
|
||||
if (nv_device_is_pci(nvxx_device(device)))
|
||||
if (nvxx_device(device)->func->pci)
|
||||
getparam->value = dev->pdev->device;
|
||||
else
|
||||
getparam->value = 0;
|
||||
break;
|
||||
case NOUVEAU_GETPARAM_BUS_TYPE:
|
||||
if (!nv_device_is_pci(nvxx_device(device)))
|
||||
if (!nvxx_device(device)->func->pci)
|
||||
getparam->value = 3;
|
||||
else
|
||||
if (drm_pci_device_is_agp(dev))
|
||||
|
|
|
@ -372,12 +372,12 @@ static int nouveau_rom_call(acpi_handle rom_handle, uint8_t *bios,
|
|||
return len;
|
||||
}
|
||||
|
||||
bool nouveau_acpi_rom_supported(struct pci_dev *pdev)
|
||||
bool nouveau_acpi_rom_supported(struct device *dev)
|
||||
{
|
||||
acpi_status status;
|
||||
acpi_handle dhandle, rom_handle;
|
||||
|
||||
dhandle = ACPI_HANDLE(&pdev->dev);
|
||||
dhandle = ACPI_HANDLE(dev);
|
||||
if (!dhandle)
|
||||
return false;
|
||||
|
||||
|
|
|
@ -10,7 +10,7 @@ void nouveau_register_dsm_handler(void);
|
|||
void nouveau_unregister_dsm_handler(void);
|
||||
void nouveau_switcheroo_optimus_dsm(void);
|
||||
int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len);
|
||||
bool nouveau_acpi_rom_supported(struct pci_dev *pdev);
|
||||
bool nouveau_acpi_rom_supported(struct device *);
|
||||
void *nouveau_acpi_edid(struct drm_device *, struct drm_connector *);
|
||||
#else
|
||||
static inline bool nouveau_is_optimus(void) { return false; };
|
||||
|
@ -18,7 +18,7 @@ static inline bool nouveau_is_v1_dsm(void) { return false; };
|
|||
static inline void nouveau_register_dsm_handler(void) {}
|
||||
static inline void nouveau_unregister_dsm_handler(void) {}
|
||||
static inline void nouveau_switcheroo_optimus_dsm(void) {}
|
||||
static inline bool nouveau_acpi_rom_supported(struct pci_dev *pdev) { return false; }
|
||||
static inline bool nouveau_acpi_rom_supported(struct device *dev) { return false; }
|
||||
static inline int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len) { return -EINVAL; }
|
||||
static inline void *nouveau_acpi_edid(struct drm_device *dev, struct drm_connector *connector) { return NULL; }
|
||||
#endif
|
||||
|
|
|
@ -4,6 +4,8 @@
|
|||
#include "nouveau_agp.h"
|
||||
#include "nouveau_reg.h"
|
||||
|
||||
#include <core/pci.h>
|
||||
|
||||
#if __OS_HAS_AGP
|
||||
MODULE_PARM_DESC(agpmode, "AGP mode (0 to disable AGP)");
|
||||
static int nouveau_agpmode = -1;
|
||||
|
@ -28,6 +30,7 @@ static unsigned long
|
|||
get_agp_mode(struct nouveau_drm *drm, const struct drm_agp_info *info)
|
||||
{
|
||||
struct nvif_device *device = &drm->device;
|
||||
struct pci_dev *pdev = nvxx_device(device)->func->pci(nvxx_device(device))->pdev;
|
||||
struct nouveau_agpmode_quirk *quirk = nouveau_agpmode_quirk_list;
|
||||
int agpmode = nouveau_agpmode;
|
||||
unsigned long mode = info->mode;
|
||||
|
@ -45,8 +48,8 @@ get_agp_mode(struct nouveau_drm *drm, const struct drm_agp_info *info)
|
|||
while (agpmode == -1 && quirk->hostbridge_vendor) {
|
||||
if (info->id_vendor == quirk->hostbridge_vendor &&
|
||||
info->id_device == quirk->hostbridge_device &&
|
||||
nvxx_device(device)->pdev->vendor == quirk->chip_vendor &&
|
||||
nvxx_device(device)->pdev->device == quirk->chip_device) {
|
||||
pdev->vendor == quirk->chip_vendor &&
|
||||
pdev->device == quirk->chip_device) {
|
||||
agpmode = quirk->mode;
|
||||
NV_INFO(drm, "Forcing agp mode to %dX. Use agpmode to override.\n",
|
||||
agpmode);
|
||||
|
|
|
@ -209,7 +209,7 @@ nouveau_bo_new(struct drm_device *dev, int size, int align,
|
|||
nvbo->tile_flags = tile_flags;
|
||||
nvbo->bo.bdev = &drm->ttm.bdev;
|
||||
|
||||
if (!nv_device_is_cpu_coherent(nvxx_device(&drm->device)))
|
||||
if (!nvxx_device(&drm->device)->func->cpu_coherent)
|
||||
nvbo->force_coherent = flags & TTM_PL_FLAG_UNCACHED;
|
||||
|
||||
nvbo->page_shift = 12;
|
||||
|
@ -466,8 +466,8 @@ nouveau_bo_sync_for_device(struct nouveau_bo *nvbo)
|
|||
return;
|
||||
|
||||
for (i = 0; i < ttm_dma->ttm.num_pages; i++)
|
||||
dma_sync_single_for_device(nv_device_base(device),
|
||||
ttm_dma->dma_address[i], PAGE_SIZE, DMA_TO_DEVICE);
|
||||
dma_sync_single_for_device(device->dev, ttm_dma->dma_address[i],
|
||||
PAGE_SIZE, DMA_TO_DEVICE);
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -486,8 +486,8 @@ nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo)
|
|||
return;
|
||||
|
||||
for (i = 0; i < ttm_dma->ttm.num_pages; i++)
|
||||
dma_sync_single_for_cpu(nv_device_base(device),
|
||||
ttm_dma->dma_address[i], PAGE_SIZE, DMA_FROM_DEVICE);
|
||||
dma_sync_single_for_cpu(device->dev, ttm_dma->dma_address[i],
|
||||
PAGE_SIZE, DMA_FROM_DEVICE);
|
||||
}
|
||||
|
||||
int
|
||||
|
@ -1487,13 +1487,13 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm)
|
|||
drm = nouveau_bdev(ttm->bdev);
|
||||
device = nvxx_device(&drm->device);
|
||||
dev = drm->dev;
|
||||
pdev = nv_device_base(device);
|
||||
pdev = device->dev;
|
||||
|
||||
/*
|
||||
* Objects matching this condition have been marked as force_coherent,
|
||||
* so use the DMA API for them.
|
||||
*/
|
||||
if (!nv_device_is_cpu_coherent(device) &&
|
||||
if (!nvxx_device(&drm->device)->func->cpu_coherent &&
|
||||
ttm->caching_state == tt_uncached)
|
||||
return ttm_dma_populate(ttm_dma, dev->dev);
|
||||
|
||||
|
@ -1552,13 +1552,13 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
|
|||
drm = nouveau_bdev(ttm->bdev);
|
||||
device = nvxx_device(&drm->device);
|
||||
dev = drm->dev;
|
||||
pdev = nv_device_base(device);
|
||||
pdev = device->dev;
|
||||
|
||||
/*
|
||||
* Objects matching this condition have been marked as force_coherent,
|
||||
* so use the DMA API for them.
|
||||
*/
|
||||
if (!nv_device_is_cpu_coherent(device) &&
|
||||
if (!nvxx_device(&drm->device)->func->cpu_coherent &&
|
||||
ttm->caching_state == tt_uncached) {
|
||||
ttm_dma_unpopulate(ttm_dma, dev->dev);
|
||||
return;
|
||||
|
|
|
@ -165,7 +165,7 @@ nouveau_sysfs_fini(struct drm_device *dev)
|
|||
struct nvif_device *device = &drm->device;
|
||||
|
||||
if (sysfs && sysfs->ctrl.priv) {
|
||||
device_remove_file(nv_device_base(nvxx_device(device)), &dev_attr_pstate);
|
||||
device_remove_file(nvxx_device(device)->dev, &dev_attr_pstate);
|
||||
nvif_object_fini(&sysfs->ctrl);
|
||||
}
|
||||
|
||||
|
@ -192,7 +192,7 @@ nouveau_sysfs_init(struct drm_device *dev)
|
|||
NVIF_IOCTL_NEW_V0_CONTROL, NULL, 0,
|
||||
&sysfs->ctrl);
|
||||
if (ret == 0)
|
||||
device_create_file(nv_device_base(nvxx_device(device)), &dev_attr_pstate);
|
||||
device_create_file(nvxx_device(device)->dev, &dev_attr_pstate);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -341,7 +341,7 @@ nouveau_ttm_init(struct nouveau_drm *drm)
|
|||
int ret;
|
||||
|
||||
bits = nvxx_mmu(&drm->device)->dma_bits;
|
||||
if (nv_device_is_pci(nvxx_device(&drm->device))) {
|
||||
if (nvxx_device(&drm->device)->func->pci) {
|
||||
if (drm->agp.stat == ENABLED ||
|
||||
!pci_dma_supported(dev->pdev, DMA_BIT_MASK(bits)))
|
||||
bits = 32;
|
||||
|
|
|
@ -210,8 +210,8 @@ nv50_dmac_destroy(struct nv50_dmac *dmac, struct nvif_object *disp)
|
|||
nv50_chan_destroy(&dmac->base);
|
||||
|
||||
if (dmac->ptr) {
|
||||
struct pci_dev *pdev = nvxx_device(device)->pdev;
|
||||
pci_free_consistent(pdev, PAGE_SIZE, dmac->ptr, dmac->handle);
|
||||
struct device *dev = nvxx_device(device)->dev;
|
||||
dma_free_coherent(dev, PAGE_SIZE, dmac->ptr, dmac->handle);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -226,8 +226,8 @@ nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
|
|||
|
||||
mutex_init(&dmac->lock);
|
||||
|
||||
dmac->ptr = pci_alloc_consistent(nvxx_device(device)->pdev,
|
||||
PAGE_SIZE, &dmac->handle);
|
||||
dmac->ptr = dma_alloc_coherent(nvxx_device(device)->dev, PAGE_SIZE,
|
||||
&dmac->handle, GFP_KERNEL);
|
||||
if (!dmac->ptr)
|
||||
return -ENOMEM;
|
||||
|
||||
|
|
|
@ -2294,7 +2294,7 @@ nvkm_device_del(struct nvkm_device **pdevice)
|
|||
int
|
||||
nvkm_device_ctor(const struct nvkm_device_func *func,
|
||||
const struct nvkm_device_quirk *quirk,
|
||||
void *dev, enum nv_bus_type type, u64 handle,
|
||||
struct device *dev, enum nvkm_device_type type, u64 handle,
|
||||
const char *name, const char *cfg, const char *dbg,
|
||||
bool detect, bool mmio, u64 subdev_mask,
|
||||
struct nvkm_device *device)
|
||||
|
@ -2312,16 +2312,8 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
|
|||
|
||||
device->func = func;
|
||||
device->quirk = quirk;
|
||||
switch (type) {
|
||||
case NVKM_BUS_PCI:
|
||||
device->pdev = dev;
|
||||
device->dev = &device->pdev->dev;
|
||||
break;
|
||||
case NVKM_BUS_PLATFORM:
|
||||
device->platformdev = dev;
|
||||
device->dev = &device->platformdev->dev;
|
||||
break;
|
||||
}
|
||||
device->dev = dev;
|
||||
device->type = type;
|
||||
device->handle = handle;
|
||||
device->cfgopt = cfg;
|
||||
device->dbgopt = dbg;
|
||||
|
|
|
@ -1621,6 +1621,7 @@ nvkm_device_pci_func = {
|
|||
.fini = nvkm_device_pci_fini,
|
||||
.resource_addr = nvkm_device_pci_resource_addr,
|
||||
.resource_size = nvkm_device_pci_resource_size,
|
||||
.cpu_coherent = !IS_ENABLED(CONFIG_ARM),
|
||||
};
|
||||
|
||||
int
|
||||
|
@ -1671,8 +1672,10 @@ nvkm_device_pci_new(struct pci_dev *pci_dev, const char *cfg, const char *dbg,
|
|||
*pdevice = &pdev->device;
|
||||
pdev->pdev = pci_dev;
|
||||
|
||||
return nvkm_device_ctor(&nvkm_device_pci_func, quirk,
|
||||
pci_dev, NVKM_BUS_PCI,
|
||||
return nvkm_device_ctor(&nvkm_device_pci_func, quirk, &pci_dev->dev,
|
||||
pci_is_pcie(pci_dev) ? NVKM_DEVICE_PCIE :
|
||||
pci_find_capability(pci_dev, PCI_CAP_ID_AGP) ?
|
||||
NVKM_DEVICE_AGP : NVKM_DEVICE_PCI,
|
||||
(u64)pci_domain_nr(pci_dev->bus) << 32 |
|
||||
pci_dev->bus->number << 16 |
|
||||
PCI_SLOT(pci_dev->devfn) << 8 |
|
||||
|
|
|
@ -41,7 +41,7 @@
|
|||
|
||||
int nvkm_device_ctor(const struct nvkm_device_func *,
|
||||
const struct nvkm_device_quirk *,
|
||||
void *, enum nv_bus_type type, u64 handle,
|
||||
struct device *, enum nvkm_device_type, u64 handle,
|
||||
const char *name, const char *cfg, const char *dbg,
|
||||
bool detect, bool mmio, u64 subdev_mask,
|
||||
struct nvkm_device *);
|
||||
|
|
|
@ -102,6 +102,7 @@ nvkm_device_tegra_func = {
|
|||
.fini = nvkm_device_tegra_fini,
|
||||
.resource_addr = nvkm_device_tegra_resource_addr,
|
||||
.resource_size = nvkm_device_tegra_resource_size,
|
||||
.cpu_coherent = false,
|
||||
};
|
||||
|
||||
int
|
||||
|
@ -118,8 +119,8 @@ nvkm_device_tegra_new(struct platform_device *pdev,
|
|||
tdev->pdev = pdev;
|
||||
tdev->irq = -1;
|
||||
|
||||
return nvkm_device_ctor(&nvkm_device_tegra_func, NULL, pdev,
|
||||
NVKM_BUS_PLATFORM, pdev->id, NULL,
|
||||
return nvkm_device_ctor(&nvkm_device_tegra_func, NULL, &pdev->dev,
|
||||
NVKM_DEVICE_TEGRA, pdev->id, NULL,
|
||||
cfg, dbg, detect, mmio, subdev_mask,
|
||||
&tdev->device);
|
||||
}
|
||||
|
|
|
@ -70,16 +70,22 @@ nvkm_udevice_info(struct nvkm_udevice *udev, void *data, u32 size)
|
|||
args->v0.platform = NV_DEVICE_INFO_V0_IGP;
|
||||
break;
|
||||
default:
|
||||
if (device->pdev) {
|
||||
if (pci_find_capability(device->pdev, PCI_CAP_ID_AGP))
|
||||
args->v0.platform = NV_DEVICE_INFO_V0_AGP;
|
||||
else
|
||||
if (pci_is_pcie(device->pdev))
|
||||
args->v0.platform = NV_DEVICE_INFO_V0_PCIE;
|
||||
else
|
||||
args->v0.platform = NV_DEVICE_INFO_V0_PCI;
|
||||
} else {
|
||||
switch (device->type) {
|
||||
case NVKM_DEVICE_PCI:
|
||||
args->v0.platform = NV_DEVICE_INFO_V0_PCI;
|
||||
break;
|
||||
case NVKM_DEVICE_AGP:
|
||||
args->v0.platform = NV_DEVICE_INFO_V0_AGP;
|
||||
break;
|
||||
case NVKM_DEVICE_PCIE:
|
||||
args->v0.platform = NV_DEVICE_INFO_V0_PCIE;
|
||||
break;
|
||||
case NVKM_DEVICE_TEGRA:
|
||||
args->v0.platform = NV_DEVICE_INFO_V0_SOC;
|
||||
break;
|
||||
default:
|
||||
WARN_ON(1);
|
||||
break;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -190,7 +190,7 @@ nvkm_falcon_init(struct nvkm_engine *engine)
|
|||
snprintf(name, sizeof(name), "nouveau/nv%02x_fuc%03x",
|
||||
device->chipset, falcon->addr >> 12);
|
||||
|
||||
ret = request_firmware(&fw, name, nv_device_base(device));
|
||||
ret = request_firmware(&fw, name, device->dev);
|
||||
if (ret == 0) {
|
||||
falcon->code.data = vmemdup(fw->data, fw->size);
|
||||
falcon->code.size = fw->size;
|
||||
|
@ -209,7 +209,7 @@ nvkm_falcon_init(struct nvkm_engine *engine)
|
|||
snprintf(name, sizeof(name), "nouveau/nv%02x_fuc%03xd",
|
||||
device->chipset, falcon->addr >> 12);
|
||||
|
||||
ret = request_firmware(&fw, name, nv_device_base(device));
|
||||
ret = request_firmware(&fw, name, device->dev);
|
||||
if (ret) {
|
||||
nvkm_error(subdev, "unable to load firmware data\n");
|
||||
return -ENODEV;
|
||||
|
@ -224,7 +224,7 @@ nvkm_falcon_init(struct nvkm_engine *engine)
|
|||
snprintf(name, sizeof(name), "nouveau/nv%02x_fuc%03xc",
|
||||
device->chipset, falcon->addr >> 12);
|
||||
|
||||
ret = request_firmware(&fw, name, nv_device_base(device));
|
||||
ret = request_firmware(&fw, name, device->dev);
|
||||
if (ret) {
|
||||
nvkm_error(subdev, "unable to load firmware code\n");
|
||||
return -ENODEV;
|
||||
|
|
|
@ -1642,7 +1642,7 @@ gf100_gr_ctor_fw(struct gf100_gr *gr, const char *fwname,
|
|||
}
|
||||
|
||||
snprintf(f, sizeof(f), "nvidia/%s/%s.bin", cname, fwname);
|
||||
ret = request_firmware(&fw, f, nv_device_base(device));
|
||||
ret = request_firmware(&fw, f, device->dev);
|
||||
if (ret) {
|
||||
nvkm_error(subdev, "failed to load %s\n", fwname);
|
||||
return ret;
|
||||
|
|
|
@ -107,7 +107,7 @@ nvkm_xtensa_init(struct nvkm_engine *engine)
|
|||
snprintf(name, sizeof(name), "nouveau/nv84_xuc%03x",
|
||||
xtensa->addr >> 12);
|
||||
|
||||
ret = request_firmware(&fw, name, nv_device_base(device));
|
||||
ret = request_firmware(&fw, name, device->dev);
|
||||
if (ret) {
|
||||
nvkm_warn(subdev, "unable to load firmware %s\n", name);
|
||||
return ret;
|
||||
|
|
|
@ -134,7 +134,7 @@ shadow_fw_read(void *data, u32 offset, u32 length, struct nvkm_bios *bios)
|
|||
static void *
|
||||
shadow_fw_init(struct nvkm_bios *bios, const char *name)
|
||||
{
|
||||
struct device *dev = &bios->subdev.device->pdev->dev;
|
||||
struct device *dev = bios->subdev.device->dev;
|
||||
const struct firmware *fw;
|
||||
int ret = request_firmware(&fw, name, dev);
|
||||
if (ret)
|
||||
|
|
|
@ -24,10 +24,10 @@
|
|||
|
||||
#if defined(CONFIG_ACPI) && defined(CONFIG_X86)
|
||||
int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len);
|
||||
bool nouveau_acpi_rom_supported(struct pci_dev *pdev);
|
||||
bool nouveau_acpi_rom_supported(struct device *);
|
||||
#else
|
||||
static inline bool
|
||||
nouveau_acpi_rom_supported(struct pci_dev *pdev)
|
||||
nouveau_acpi_rom_supported(struct device *dev)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
@ -88,7 +88,7 @@ acpi_read_slow(void *data, u32 offset, u32 length, struct nvkm_bios *bios)
|
|||
static void *
|
||||
acpi_init(struct nvkm_bios *bios, const char *name)
|
||||
{
|
||||
if (!nouveau_acpi_rom_supported(bios->subdev.device->pdev))
|
||||
if (!nouveau_acpi_rom_supported(bios->subdev.device->dev))
|
||||
return ERR_PTR(-ENODEV);
|
||||
return NULL;
|
||||
}
|
||||
|
|
|
@ -22,6 +22,8 @@
|
|||
*/
|
||||
#include "priv.h"
|
||||
|
||||
#include <core/pci.h>
|
||||
|
||||
struct priv {
|
||||
struct pci_dev *pdev;
|
||||
void __iomem *rom;
|
||||
|
@ -51,10 +53,16 @@ pcirom_fini(void *data)
|
|||
static void *
|
||||
pcirom_init(struct nvkm_bios *bios, const char *name)
|
||||
{
|
||||
struct pci_dev *pdev = bios->subdev.device->pdev;
|
||||
struct nvkm_device *device = bios->subdev.device;
|
||||
struct priv *priv = NULL;
|
||||
struct pci_dev *pdev;
|
||||
int ret;
|
||||
|
||||
if (device->func->pci)
|
||||
pdev = device->func->pci(device)->pdev;
|
||||
else
|
||||
return ERR_PTR(-ENODEV);
|
||||
|
||||
if (!(ret = pci_enable_rom(pdev))) {
|
||||
if (ret = -ENOMEM,
|
||||
(priv = kmalloc(sizeof(*priv), GFP_KERNEL))) {
|
||||
|
@ -83,10 +91,16 @@ nvbios_pcirom = {
|
|||
static void *
|
||||
platform_init(struct nvkm_bios *bios, const char *name)
|
||||
{
|
||||
struct pci_dev *pdev = bios->subdev.device->pdev;
|
||||
struct nvkm_device *device = bios->subdev.device;
|
||||
struct pci_dev *pdev;
|
||||
struct priv *priv;
|
||||
int ret = -ENOMEM;
|
||||
|
||||
if (device->func->pci)
|
||||
pdev = device->func->pci(device)->pdev;
|
||||
else
|
||||
return ERR_PTR(-ENODEV);
|
||||
|
||||
if ((priv = kmalloc(sizeof(*priv), GFP_KERNEL))) {
|
||||
if (ret = -ENODEV,
|
||||
(priv->rom = pci_platform_rom(pdev, &priv->size)))
|
||||
|
|
|
@ -65,7 +65,7 @@ gf100_fb_dtor(struct nvkm_fb *base)
|
|||
struct nvkm_device *device = fb->base.subdev.device;
|
||||
|
||||
if (fb->r100c10_page) {
|
||||
dma_unmap_page(nv_device_base(device), fb->r100c10, PAGE_SIZE,
|
||||
dma_unmap_page(device->dev, fb->r100c10, PAGE_SIZE,
|
||||
DMA_BIDIRECTIONAL);
|
||||
__free_page(fb->r100c10_page);
|
||||
}
|
||||
|
@ -86,10 +86,9 @@ gf100_fb_new_(const struct nvkm_fb_func *func, struct nvkm_device *device,
|
|||
|
||||
fb->r100c10_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
|
||||
if (fb->r100c10_page) {
|
||||
fb->r100c10 = dma_map_page(nv_device_base(device),
|
||||
fb->r100c10_page, 0, PAGE_SIZE,
|
||||
DMA_BIDIRECTIONAL);
|
||||
if (dma_mapping_error(nv_device_base(device), fb->r100c10))
|
||||
fb->r100c10 = dma_map_page(device->dev, fb->r100c10_page, 0,
|
||||
PAGE_SIZE, DMA_BIDIRECTIONAL);
|
||||
if (dma_mapping_error(device->dev, fb->r100c10))
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
|
|
|
@ -234,7 +234,7 @@ nv50_fb_dtor(struct nvkm_fb *base)
|
|||
struct nvkm_device *device = fb->base.subdev.device;
|
||||
|
||||
if (fb->r100c08_page) {
|
||||
dma_unmap_page(nv_device_base(device), fb->r100c08, PAGE_SIZE,
|
||||
dma_unmap_page(device->dev, fb->r100c08, PAGE_SIZE,
|
||||
DMA_BIDIRECTIONAL);
|
||||
__free_page(fb->r100c08_page);
|
||||
}
|
||||
|
@ -265,10 +265,9 @@ nv50_fb_new_(const struct nv50_fb_func *func, struct nvkm_device *device,
|
|||
|
||||
fb->r100c08_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
|
||||
if (fb->r100c08_page) {
|
||||
fb->r100c08 = dma_map_page(nv_device_base(device),
|
||||
fb->r100c08_page, 0, PAGE_SIZE,
|
||||
DMA_BIDIRECTIONAL);
|
||||
if (dma_mapping_error(nv_device_base(device), fb->r100c08))
|
||||
fb->r100c08 = dma_map_page(device->dev, fb->r100c08_page, 0,
|
||||
PAGE_SIZE, DMA_BIDIRECTIONAL);
|
||||
if (dma_mapping_error(device->dev, fb->r100c08))
|
||||
return -EFAULT;
|
||||
} else {
|
||||
nvkm_warn(&fb->base.subdev, "failed 100c08 page alloc\n");
|
||||
|
|
|
@ -186,7 +186,7 @@ gk20a_instobj_dtor_dma(struct gk20a_instobj *_node)
|
|||
{
|
||||
struct gk20a_instobj_dma *node = (void *)_node;
|
||||
struct gk20a_instmem *imem = _node->imem;
|
||||
struct device *dev = nv_device_base(imem->base.subdev.device);
|
||||
struct device *dev = imem->base.subdev.device->dev;
|
||||
|
||||
if (unlikely(!node->cpuaddr))
|
||||
return;
|
||||
|
|
|
@ -110,8 +110,8 @@ nv04_mmu_dtor(struct nvkm_mmu *base)
|
|||
nvkm_vm_ref(NULL, &mmu->vm, NULL);
|
||||
}
|
||||
if (mmu->nullp) {
|
||||
pci_free_consistent(device->pdev, 16 * 1024,
|
||||
mmu->nullp, mmu->null);
|
||||
dma_free_coherent(device->dev, 16 * 1024,
|
||||
mmu->nullp, mmu->null);
|
||||
}
|
||||
return mmu;
|
||||
}
|
||||
|
|
|
@ -133,7 +133,7 @@ nv41_mmu = {
|
|||
int
|
||||
nv41_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
|
||||
{
|
||||
if (pci_find_capability(device->pdev, PCI_CAP_ID_AGP) ||
|
||||
if (device->type == NVKM_DEVICE_AGP ||
|
||||
!nvkm_boolopt(device->cfgopt, "NvPCIE", true))
|
||||
return nv04_mmu_new(device, index, pmmu);
|
||||
|
||||
|
|
|
@ -165,7 +165,8 @@ nv44_mmu_oneinit(struct nvkm_mmu *base)
|
|||
struct nvkm_device *device = mmu->base.subdev.device;
|
||||
int ret;
|
||||
|
||||
mmu->nullp = pci_alloc_consistent(device->pdev, 16 * 1024, &mmu->null);
|
||||
mmu->nullp = dma_alloc_coherent(device->dev, 16 * 1024,
|
||||
&mmu->null, GFP_KERNEL);
|
||||
if (!mmu->nullp) {
|
||||
nvkm_warn(&mmu->base.subdev, "unable to allocate dummy pages\n");
|
||||
mmu->null = 0;
|
||||
|
@ -227,7 +228,7 @@ nv44_mmu = {
|
|||
int
|
||||
nv44_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
|
||||
{
|
||||
if (pci_find_capability(device->pdev, PCI_CAP_ID_AGP) ||
|
||||
if (device->type == NVKM_DEVICE_AGP ||
|
||||
!nvkm_boolopt(device->cfgopt, "NvPCIE", true))
|
||||
return nv04_mmu_new(device, index, pmmu);
|
||||
|
||||
|
|
|
@ -95,7 +95,7 @@ mxm_shadow_dsm(struct nvkm_mxm *mxm, u8 version)
|
|||
acpi_handle handle;
|
||||
int rev;
|
||||
|
||||
handle = ACPI_HANDLE(nv_device_base(device));
|
||||
handle = ACPI_HANDLE(device->dev);
|
||||
if (!handle)
|
||||
return false;
|
||||
|
||||
|
|
Loading…
Reference in New Issue