2015-04-21 04:55:21 +08:00
|
|
|
/*
|
|
|
|
* Copyright 2008 Advanced Micro Devices, Inc.
|
|
|
|
* Copyright 2008 Red Hat Inc.
|
|
|
|
* Copyright 2009 Jerome Glisse.
|
|
|
|
*
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
|
|
* copy of this software and associated documentation files (the "Software"),
|
|
|
|
* to deal in the Software without restriction, including without limitation
|
|
|
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
|
|
* and/or sell copies of the Software, and to permit persons to whom the
|
|
|
|
* Software is furnished to do so, subject to the following conditions:
|
|
|
|
*
|
|
|
|
* The above copyright notice and this permission notice shall be included in
|
|
|
|
* all copies or substantial portions of the Software.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
|
|
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
|
|
|
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
|
|
|
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
|
|
|
* OTHER DEALINGS IN THE SOFTWARE.
|
|
|
|
*
|
|
|
|
* Authors: Dave Airlie
|
|
|
|
* Alex Deucher
|
|
|
|
* Jerome Glisse
|
|
|
|
*/
|
2016-06-12 15:41:58 +08:00
|
|
|
#include <linux/kthread.h>
|
2015-04-21 04:55:21 +08:00
|
|
|
#include <linux/console.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/debugfs.h>
|
|
|
|
#include <drm/drmP.h>
|
|
|
|
#include <drm/drm_crtc_helper.h>
|
|
|
|
#include <drm/amdgpu_drm.h>
|
|
|
|
#include <linux/vgaarb.h>
|
|
|
|
#include <linux/vga_switcheroo.h>
|
|
|
|
#include <linux/efi.h>
|
|
|
|
#include "amdgpu.h"
|
2016-05-31 20:02:27 +08:00
|
|
|
#include "amdgpu_trace.h"
|
2015-04-21 04:55:21 +08:00
|
|
|
#include "amdgpu_i2c.h"
|
|
|
|
#include "atom.h"
|
|
|
|
#include "amdgpu_atombios.h"
|
2016-09-24 04:23:41 +08:00
|
|
|
#include "amdgpu_atomfirmware.h"
|
2015-11-12 08:45:06 +08:00
|
|
|
#include "amd_pcie.h"
|
2016-01-21 17:29:41 +08:00
|
|
|
#ifdef CONFIG_DRM_AMDGPU_SI
|
|
|
|
#include "si.h"
|
|
|
|
#endif
|
2015-04-21 05:09:27 +08:00
|
|
|
#ifdef CONFIG_DRM_AMDGPU_CIK
|
|
|
|
#include "cik.h"
|
|
|
|
#endif
|
2015-04-21 05:31:14 +08:00
|
|
|
#include "vi.h"
|
2017-03-07 03:53:16 +08:00
|
|
|
#include "soc15.h"
|
2015-04-21 04:55:21 +08:00
|
|
|
#include "bif/bif_4_1_d.h"
|
2016-08-10 16:01:25 +08:00
|
|
|
#include <linux/pci.h>
|
2016-09-14 19:38:08 +08:00
|
|
|
#include <linux/firmware.h>
|
2017-06-24 01:55:15 +08:00
|
|
|
#include "amdgpu_vf_error.h"
|
2015-04-21 04:55:21 +08:00
|
|
|
|
2015-11-10 06:21:45 +08:00
|
|
|
#include "amdgpu_amdkfd.h"
|
2015-04-21 04:55:21 +08:00
|
|
|
|
2017-04-28 04:58:01 +08:00
|
|
|
MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
|
2017-05-10 00:27:35 +08:00
|
|
|
MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
|
2017-04-28 04:58:01 +08:00
|
|
|
|
2017-05-25 12:35:25 +08:00
|
|
|
#define AMDGPU_RESUME_MS 2000
|
|
|
|
|
2015-04-21 04:55:21 +08:00
|
|
|
static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev);
|
|
|
|
static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev);
|
2017-05-10 23:04:06 +08:00
|
|
|
static int amdgpu_debugfs_test_ib_ring_init(struct amdgpu_device *adev);
|
2017-08-23 00:31:43 +08:00
|
|
|
static int amdgpu_debugfs_vbios_dump_init(struct amdgpu_device *adev);
|
2015-04-21 04:55:21 +08:00
|
|
|
|
|
|
|
static const char *amdgpu_asic_name[] = {
|
2016-01-21 19:08:55 +08:00
|
|
|
"TAHITI",
|
|
|
|
"PITCAIRN",
|
|
|
|
"VERDE",
|
|
|
|
"OLAND",
|
|
|
|
"HAINAN",
|
2015-04-21 04:55:21 +08:00
|
|
|
"BONAIRE",
|
|
|
|
"KAVERI",
|
|
|
|
"KABINI",
|
|
|
|
"HAWAII",
|
|
|
|
"MULLINS",
|
|
|
|
"TOPAZ",
|
|
|
|
"TONGA",
|
2015-07-08 01:05:16 +08:00
|
|
|
"FIJI",
|
2015-04-21 04:55:21 +08:00
|
|
|
"CARRIZO",
|
2015-10-09 02:50:27 +08:00
|
|
|
"STONEY",
|
2016-03-15 06:33:29 +08:00
|
|
|
"POLARIS10",
|
|
|
|
"POLARIS11",
|
2016-12-15 04:32:28 +08:00
|
|
|
"POLARIS12",
|
2016-03-09 09:28:32 +08:00
|
|
|
"VEGA10",
|
2016-12-07 17:31:19 +08:00
|
|
|
"RAVEN",
|
2015-04-21 04:55:21 +08:00
|
|
|
"LAST",
|
|
|
|
};
|
|
|
|
|
|
|
|
bool amdgpu_device_is_px(struct drm_device *dev)
|
|
|
|
{
|
|
|
|
struct amdgpu_device *adev = dev->dev_private;
|
|
|
|
|
2015-07-22 11:29:01 +08:00
|
|
|
if (adev->flags & AMD_IS_PX)
|
2015-04-21 04:55:21 +08:00
|
|
|
return true;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* MMIO register access helper functions.
|
|
|
|
*/
|
|
|
|
uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
|
2017-01-25 15:07:40 +08:00
|
|
|
uint32_t acc_flags)
|
2015-04-21 04:55:21 +08:00
|
|
|
{
|
2016-05-31 20:02:27 +08:00
|
|
|
uint32_t ret;
|
|
|
|
|
2017-01-25 15:07:40 +08:00
|
|
|
if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)) {
|
2017-01-12 14:29:34 +08:00
|
|
|
BUG_ON(in_interrupt());
|
|
|
|
return amdgpu_virt_kiq_rreg(adev, reg);
|
|
|
|
}
|
|
|
|
|
2017-01-25 15:07:40 +08:00
|
|
|
if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
|
2016-05-31 20:02:27 +08:00
|
|
|
ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
|
2015-04-21 04:55:21 +08:00
|
|
|
else {
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&adev->mmio_idx_lock, flags);
|
|
|
|
writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
|
|
|
|
ret = readl(((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
|
|
|
|
spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
|
|
|
|
}
|
2016-05-31 20:02:27 +08:00
|
|
|
trace_amdgpu_mm_rreg(adev->pdev->device, reg, ret);
|
|
|
|
return ret;
|
2015-04-21 04:55:21 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
|
2017-01-25 15:07:40 +08:00
|
|
|
uint32_t acc_flags)
|
2015-04-21 04:55:21 +08:00
|
|
|
{
|
2016-05-31 20:02:27 +08:00
|
|
|
trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
|
2016-03-31 13:26:59 +08:00
|
|
|
|
2017-07-04 13:11:52 +08:00
|
|
|
if (adev->asic_type >= CHIP_VEGA10 && reg == 0) {
|
|
|
|
adev->last_mm_index = v;
|
|
|
|
}
|
|
|
|
|
2017-01-25 15:07:40 +08:00
|
|
|
if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)) {
|
2017-01-12 14:29:34 +08:00
|
|
|
BUG_ON(in_interrupt());
|
|
|
|
return amdgpu_virt_kiq_wreg(adev, reg, v);
|
|
|
|
}
|
|
|
|
|
2017-01-25 15:07:40 +08:00
|
|
|
if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
|
2015-04-21 04:55:21 +08:00
|
|
|
writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
|
|
|
|
else {
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&adev->mmio_idx_lock, flags);
|
|
|
|
writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
|
|
|
|
writel(v, ((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
|
|
|
|
spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
|
|
|
|
}
|
2017-07-04 13:11:52 +08:00
|
|
|
|
|
|
|
if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) {
|
|
|
|
udelay(500);
|
|
|
|
}
|
2015-04-21 04:55:21 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg)
|
|
|
|
{
|
|
|
|
if ((reg * 4) < adev->rio_mem_size)
|
|
|
|
return ioread32(adev->rio_mem + (reg * 4));
|
|
|
|
else {
|
|
|
|
iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
|
|
|
|
return ioread32(adev->rio_mem + (mmMM_DATA * 4));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
|
|
|
|
{
|
2017-07-04 13:11:52 +08:00
|
|
|
if (adev->asic_type >= CHIP_VEGA10 && reg == 0) {
|
|
|
|
adev->last_mm_index = v;
|
|
|
|
}
|
2015-04-21 04:55:21 +08:00
|
|
|
|
|
|
|
if ((reg * 4) < adev->rio_mem_size)
|
|
|
|
iowrite32(v, adev->rio_mem + (reg * 4));
|
|
|
|
else {
|
|
|
|
iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
|
|
|
|
iowrite32(v, adev->rio_mem + (mmMM_DATA * 4));
|
|
|
|
}
|
2017-07-04 13:11:52 +08:00
|
|
|
|
|
|
|
if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) {
|
|
|
|
udelay(500);
|
|
|
|
}
|
2015-04-21 04:55:21 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* amdgpu_mm_rdoorbell - read a doorbell dword
|
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
* @index: doorbell index
|
|
|
|
*
|
|
|
|
* Returns the value in the doorbell aperture at the
|
|
|
|
* requested doorbell index (CIK).
|
|
|
|
*/
|
|
|
|
u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
|
|
|
|
{
|
|
|
|
if (index < adev->doorbell.num_doorbells) {
|
|
|
|
return readl(adev->doorbell.ptr + index);
|
|
|
|
} else {
|
|
|
|
DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* amdgpu_mm_wdoorbell - write a doorbell dword
|
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
* @index: doorbell index
|
|
|
|
* @v: value to write
|
|
|
|
*
|
|
|
|
* Writes @v to the doorbell aperture at the
|
|
|
|
* requested doorbell index (CIK).
|
|
|
|
*/
|
|
|
|
void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
|
|
|
|
{
|
|
|
|
if (index < adev->doorbell.num_doorbells) {
|
|
|
|
writel(v, adev->doorbell.ptr + index);
|
|
|
|
} else {
|
|
|
|
DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-03-18 15:23:08 +08:00
|
|
|
/**
|
|
|
|
* amdgpu_mm_rdoorbell64 - read a doorbell Qword
|
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
* @index: doorbell index
|
|
|
|
*
|
|
|
|
* Returns the value in the doorbell aperture at the
|
|
|
|
* requested doorbell index (VEGA10+).
|
|
|
|
*/
|
|
|
|
u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
|
|
|
|
{
|
|
|
|
if (index < adev->doorbell.num_doorbells) {
|
|
|
|
return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index));
|
|
|
|
} else {
|
|
|
|
DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* amdgpu_mm_wdoorbell64 - write a doorbell Qword
|
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
* @index: doorbell index
|
|
|
|
* @v: value to write
|
|
|
|
*
|
|
|
|
* Writes @v to the doorbell aperture at the
|
|
|
|
* requested doorbell index (VEGA10+).
|
|
|
|
*/
|
|
|
|
void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
|
|
|
|
{
|
|
|
|
if (index < adev->doorbell.num_doorbells) {
|
|
|
|
atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v);
|
|
|
|
} else {
|
|
|
|
DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-04-21 04:55:21 +08:00
|
|
|
/**
|
|
|
|
* amdgpu_invalid_rreg - dummy reg read function
|
|
|
|
*
|
|
|
|
* @adev: amdgpu device pointer
|
|
|
|
* @reg: offset of register
|
|
|
|
*
|
|
|
|
* Dummy register read function. Used for register blocks
|
|
|
|
* that certain asics don't have (all asics).
|
|
|
|
* Returns the value in the register.
|
|
|
|
*/
|
|
|
|
static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
|
|
|
|
{
|
|
|
|
DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
|
|
|
|
BUG();
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* amdgpu_invalid_wreg - dummy reg write function
|
|
|
|
*
|
|
|
|
* @adev: amdgpu device pointer
|
|
|
|
* @reg: offset of register
|
|
|
|
* @v: value to write to the register
|
|
|
|
*
|
|
|
|
* Dummy register read function. Used for register blocks
|
|
|
|
* that certain asics don't have (all asics).
|
|
|
|
*/
|
|
|
|
static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
|
|
|
|
{
|
|
|
|
DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
|
|
|
|
reg, v);
|
|
|
|
BUG();
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* amdgpu_block_invalid_rreg - dummy reg read function
|
|
|
|
*
|
|
|
|
* @adev: amdgpu device pointer
|
|
|
|
* @block: offset of instance
|
|
|
|
* @reg: offset of register
|
|
|
|
*
|
|
|
|
* Dummy register read function. Used for register blocks
|
|
|
|
* that certain asics don't have (all asics).
|
|
|
|
* Returns the value in the register.
|
|
|
|
*/
|
|
|
|
static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
|
|
|
|
uint32_t block, uint32_t reg)
|
|
|
|
{
|
|
|
|
DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
|
|
|
|
reg, block);
|
|
|
|
BUG();
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* amdgpu_block_invalid_wreg - dummy reg write function
|
|
|
|
*
|
|
|
|
* @adev: amdgpu device pointer
|
|
|
|
* @block: offset of instance
|
|
|
|
* @reg: offset of register
|
|
|
|
* @v: value to write to the register
|
|
|
|
*
|
|
|
|
* Dummy register read function. Used for register blocks
|
|
|
|
* that certain asics don't have (all asics).
|
|
|
|
*/
|
|
|
|
static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
|
|
|
|
uint32_t block,
|
|
|
|
uint32_t reg, uint32_t v)
|
|
|
|
{
|
|
|
|
DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
|
|
|
|
reg, block, v);
|
|
|
|
BUG();
|
|
|
|
}
|
|
|
|
|
|
|
|
static int amdgpu_vram_scratch_init(struct amdgpu_device *adev)
|
|
|
|
{
|
2017-07-27 23:24:36 +08:00
|
|
|
return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE,
|
|
|
|
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
|
|
|
|
&adev->vram_scratch.robj,
|
|
|
|
&adev->vram_scratch.gpu_addr,
|
|
|
|
(void **)&adev->vram_scratch.ptr);
|
2015-04-21 04:55:21 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void amdgpu_vram_scratch_fini(struct amdgpu_device *adev)
|
|
|
|
{
|
2017-07-27 23:43:00 +08:00
|
|
|
amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL);
|
2015-04-21 04:55:21 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* amdgpu_program_register_sequence - program an array of registers.
|
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
* @registers: pointer to the register array
|
|
|
|
* @array_size: size of the register array
|
|
|
|
*
|
|
|
|
* Programs an array or registers with and and or masks.
|
|
|
|
* This is a helper for setting golden registers.
|
|
|
|
*/
|
|
|
|
void amdgpu_program_register_sequence(struct amdgpu_device *adev,
|
|
|
|
const u32 *registers,
|
|
|
|
const u32 array_size)
|
|
|
|
{
|
|
|
|
u32 tmp, reg, and_mask, or_mask;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (array_size % 3)
|
|
|
|
return;
|
|
|
|
|
|
|
|
for (i = 0; i < array_size; i +=3) {
|
|
|
|
reg = registers[i + 0];
|
|
|
|
and_mask = registers[i + 1];
|
|
|
|
or_mask = registers[i + 2];
|
|
|
|
|
|
|
|
if (and_mask == 0xffffffff) {
|
|
|
|
tmp = or_mask;
|
|
|
|
} else {
|
|
|
|
tmp = RREG32(reg);
|
|
|
|
tmp &= ~and_mask;
|
|
|
|
tmp |= or_mask;
|
|
|
|
}
|
|
|
|
WREG32(reg, tmp);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void amdgpu_pci_config_reset(struct amdgpu_device *adev)
|
|
|
|
{
|
|
|
|
pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* GPU doorbell aperture helpers function.
|
|
|
|
*/
|
|
|
|
/**
|
|
|
|
* amdgpu_doorbell_init - Init doorbell driver information.
|
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
*
|
|
|
|
* Init doorbell driver information (CIK)
|
|
|
|
* Returns 0 on success, error on failure.
|
|
|
|
*/
|
|
|
|
static int amdgpu_doorbell_init(struct amdgpu_device *adev)
|
|
|
|
{
|
2017-06-08 17:15:16 +08:00
|
|
|
/* No doorbell on SI hardware generation */
|
|
|
|
if (adev->asic_type < CHIP_BONAIRE) {
|
|
|
|
adev->doorbell.base = 0;
|
|
|
|
adev->doorbell.size = 0;
|
|
|
|
adev->doorbell.num_doorbells = 0;
|
|
|
|
adev->doorbell.ptr = NULL;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-04-21 04:55:21 +08:00
|
|
|
/* doorbell bar mapping */
|
|
|
|
adev->doorbell.base = pci_resource_start(adev->pdev, 2);
|
|
|
|
adev->doorbell.size = pci_resource_len(adev->pdev, 2);
|
|
|
|
|
2016-05-03 21:54:54 +08:00
|
|
|
adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32),
|
2015-04-21 04:55:21 +08:00
|
|
|
AMDGPU_DOORBELL_MAX_ASSIGNMENT+1);
|
|
|
|
if (adev->doorbell.num_doorbells == 0)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2017-03-06 20:34:57 +08:00
|
|
|
adev->doorbell.ptr = ioremap(adev->doorbell.base,
|
|
|
|
adev->doorbell.num_doorbells *
|
|
|
|
sizeof(u32));
|
|
|
|
if (adev->doorbell.ptr == NULL)
|
2015-04-21 04:55:21 +08:00
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* amdgpu_doorbell_fini - Tear down doorbell driver information.
|
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
*
|
|
|
|
* Tear down doorbell driver information (CIK)
|
|
|
|
*/
|
|
|
|
static void amdgpu_doorbell_fini(struct amdgpu_device *adev)
|
|
|
|
{
|
|
|
|
iounmap(adev->doorbell.ptr);
|
|
|
|
adev->doorbell.ptr = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* amdgpu_doorbell_get_kfd_info - Report doorbell configuration required to
|
|
|
|
* setup amdkfd
|
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
* @aperture_base: output returning doorbell aperture base physical address
|
|
|
|
* @aperture_size: output returning doorbell aperture size in bytes
|
|
|
|
* @start_offset: output returning # of doorbell bytes reserved for amdgpu.
|
|
|
|
*
|
|
|
|
* amdgpu and amdkfd share the doorbell aperture. amdgpu sets it up,
|
|
|
|
* takes doorbells required for its own rings and reports the setup to amdkfd.
|
|
|
|
* amdgpu reserved doorbells are at the start of the doorbell aperture.
|
|
|
|
*/
|
|
|
|
void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev,
|
|
|
|
phys_addr_t *aperture_base,
|
|
|
|
size_t *aperture_size,
|
|
|
|
size_t *start_offset)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* The first num_doorbells are used by amdgpu.
|
|
|
|
* amdkfd takes whatever's left in the aperture.
|
|
|
|
*/
|
|
|
|
if (adev->doorbell.size > adev->doorbell.num_doorbells * sizeof(u32)) {
|
|
|
|
*aperture_base = adev->doorbell.base;
|
|
|
|
*aperture_size = adev->doorbell.size;
|
|
|
|
*start_offset = adev->doorbell.num_doorbells * sizeof(u32);
|
|
|
|
} else {
|
|
|
|
*aperture_base = 0;
|
|
|
|
*aperture_size = 0;
|
|
|
|
*start_offset = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* amdgpu_wb_*()
|
2017-05-09 09:36:03 +08:00
|
|
|
* Writeback is the method by which the GPU updates special pages in memory
|
2017-05-09 01:41:11 +08:00
|
|
|
* with the status of certain GPU events (fences, ring pointers,etc.).
|
2015-04-21 04:55:21 +08:00
|
|
|
*/
|
|
|
|
|
|
|
|
/**
|
|
|
|
* amdgpu_wb_fini - Disable Writeback and free memory
|
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
*
|
|
|
|
* Disables Writeback and frees the Writeback memory (all asics).
|
|
|
|
* Used at driver shutdown.
|
|
|
|
*/
|
|
|
|
static void amdgpu_wb_fini(struct amdgpu_device *adev)
|
|
|
|
{
|
|
|
|
if (adev->wb.wb_obj) {
|
2016-10-22 03:30:36 +08:00
|
|
|
amdgpu_bo_free_kernel(&adev->wb.wb_obj,
|
|
|
|
&adev->wb.gpu_addr,
|
|
|
|
(void **)&adev->wb.wb);
|
2015-04-21 04:55:21 +08:00
|
|
|
adev->wb.wb_obj = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* amdgpu_wb_init- Init Writeback driver info and allocate memory
|
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
*
|
2017-05-09 09:36:03 +08:00
|
|
|
* Initializes writeback and allocates writeback memory (all asics).
|
2015-04-21 04:55:21 +08:00
|
|
|
* Used at driver startup.
|
|
|
|
* Returns 0 on success or an -error on failure.
|
|
|
|
*/
|
|
|
|
static int amdgpu_wb_init(struct amdgpu_device *adev)
|
|
|
|
{
|
|
|
|
int r;
|
|
|
|
|
|
|
|
if (adev->wb.wb_obj == NULL) {
|
2017-07-29 00:14:15 +08:00
|
|
|
/* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */
|
|
|
|
r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8,
|
2016-10-22 03:30:36 +08:00
|
|
|
PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
|
|
|
|
&adev->wb.wb_obj, &adev->wb.gpu_addr,
|
|
|
|
(void **)&adev->wb.wb);
|
2015-04-21 04:55:21 +08:00
|
|
|
if (r) {
|
|
|
|
dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
adev->wb.num_wb = AMDGPU_MAX_WB;
|
|
|
|
memset(&adev->wb.used, 0, sizeof(adev->wb.used));
|
|
|
|
|
|
|
|
/* clear wb memory */
|
2017-03-15 10:13:32 +08:00
|
|
|
memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t));
|
2015-04-21 04:55:21 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* amdgpu_wb_get - Allocate a wb entry
|
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
* @wb: wb index
|
|
|
|
*
|
|
|
|
* Allocate a wb slot for use by the driver (all asics).
|
|
|
|
* Returns 0 on success or -EINVAL on failure.
|
|
|
|
*/
|
|
|
|
int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb)
|
|
|
|
{
|
|
|
|
unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
|
|
|
|
|
2017-07-29 00:14:15 +08:00
|
|
|
if (offset < adev->wb.num_wb) {
|
2016-03-18 15:08:49 +08:00
|
|
|
__set_bit(offset, adev->wb.used);
|
2017-07-29 00:14:15 +08:00
|
|
|
*wb = offset * 8; /* convert to dw offset */
|
2017-06-19 22:19:41 +08:00
|
|
|
return 0;
|
|
|
|
} else {
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-04-21 04:55:21 +08:00
|
|
|
/**
|
|
|
|
* amdgpu_wb_free - Free a wb entry
|
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
* @wb: wb index
|
|
|
|
*
|
|
|
|
* Free a wb slot allocated for use by the driver (all asics)
|
|
|
|
*/
|
|
|
|
void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb)
|
|
|
|
{
|
|
|
|
if (wb < adev->wb.num_wb)
|
|
|
|
__clear_bit(wb, adev->wb.used);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* amdgpu_vram_location - try to find VRAM location
|
|
|
|
* @adev: amdgpu device structure holding all necessary informations
|
|
|
|
* @mc: memory controller structure holding memory informations
|
|
|
|
* @base: base address at which to put VRAM
|
|
|
|
*
|
2017-05-09 09:36:03 +08:00
|
|
|
* Function will try to place VRAM at base address provided
|
2015-04-21 04:55:21 +08:00
|
|
|
* as parameter (which is so far either PCI aperture address or
|
|
|
|
* for IGP TOM base address).
|
|
|
|
*
|
|
|
|
* If there is not enough space to fit the unvisible VRAM in the 32bits
|
|
|
|
* address space then we limit the VRAM size to the aperture.
|
|
|
|
*
|
|
|
|
* Note: We don't explicitly enforce VRAM start to be aligned on VRAM size,
|
|
|
|
* this shouldn't be a problem as we are using the PCI aperture as a reference.
|
|
|
|
* Otherwise this would be needed for rv280, all r3xx, and all r4xx, but
|
|
|
|
* not IGP.
|
|
|
|
*
|
|
|
|
* Note: we use mc_vram_size as on some board we need to program the mc to
|
|
|
|
* cover the whole aperture even if VRAM size is inferior to aperture size
|
|
|
|
* Novell bug 204882 + along with lots of ubuntu ones
|
|
|
|
*
|
|
|
|
* Note: when limiting vram it's safe to overwritte real_vram_size because
|
|
|
|
* we are not in case where real_vram_size is inferior to mc_vram_size (ie
|
|
|
|
* note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu
|
|
|
|
* ones)
|
|
|
|
*
|
|
|
|
* Note: IGP TOM addr should be the same as the aperture addr, we don't
|
2017-05-09 09:36:03 +08:00
|
|
|
* explicitly check for that though.
|
2015-04-21 04:55:21 +08:00
|
|
|
*
|
|
|
|
* FIXME: when reducing VRAM size align new size on power of 2.
|
|
|
|
*/
|
|
|
|
void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base)
|
|
|
|
{
|
|
|
|
uint64_t limit = (uint64_t)amdgpu_vram_limit << 20;
|
|
|
|
|
|
|
|
mc->vram_start = base;
|
|
|
|
if (mc->mc_vram_size > (adev->mc.mc_mask - base + 1)) {
|
|
|
|
dev_warn(adev->dev, "limiting VRAM to PCI aperture size\n");
|
|
|
|
mc->real_vram_size = mc->aper_size;
|
|
|
|
mc->mc_vram_size = mc->aper_size;
|
|
|
|
}
|
|
|
|
mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
|
|
|
|
if (limit && limit < mc->real_vram_size)
|
|
|
|
mc->real_vram_size = limit;
|
|
|
|
dev_info(adev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
|
|
|
|
mc->mc_vram_size >> 20, mc->vram_start,
|
|
|
|
mc->vram_end, mc->real_vram_size >> 20);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2017-07-07 17:56:59 +08:00
|
|
|
* amdgpu_gart_location - try to find GTT location
|
2015-04-21 04:55:21 +08:00
|
|
|
* @adev: amdgpu device structure holding all necessary informations
|
|
|
|
* @mc: memory controller structure holding memory informations
|
|
|
|
*
|
|
|
|
* Function will place try to place GTT before or after VRAM.
|
|
|
|
*
|
|
|
|
* If GTT size is bigger than space left then we ajust GTT size.
|
|
|
|
* Thus function will never fails.
|
|
|
|
*
|
|
|
|
* FIXME: when reducing GTT size align new size on power of 2.
|
|
|
|
*/
|
2017-07-07 17:56:59 +08:00
|
|
|
void amdgpu_gart_location(struct amdgpu_device *adev, struct amdgpu_mc *mc)
|
2015-04-21 04:55:21 +08:00
|
|
|
{
|
|
|
|
u64 size_af, size_bf;
|
|
|
|
|
2017-07-07 04:26:05 +08:00
|
|
|
size_af = adev->mc.mc_mask - mc->vram_end;
|
|
|
|
size_bf = mc->vram_start;
|
2015-04-21 04:55:21 +08:00
|
|
|
if (size_bf > size_af) {
|
2017-07-07 17:56:59 +08:00
|
|
|
if (mc->gart_size > size_bf) {
|
2015-04-21 04:55:21 +08:00
|
|
|
dev_warn(adev->dev, "limiting GTT\n");
|
2017-07-07 17:56:59 +08:00
|
|
|
mc->gart_size = size_bf;
|
2015-04-21 04:55:21 +08:00
|
|
|
}
|
2017-07-07 17:56:59 +08:00
|
|
|
mc->gart_start = 0;
|
2015-04-21 04:55:21 +08:00
|
|
|
} else {
|
2017-07-07 17:56:59 +08:00
|
|
|
if (mc->gart_size > size_af) {
|
2015-04-21 04:55:21 +08:00
|
|
|
dev_warn(adev->dev, "limiting GTT\n");
|
2017-07-07 17:56:59 +08:00
|
|
|
mc->gart_size = size_af;
|
2015-04-21 04:55:21 +08:00
|
|
|
}
|
2017-07-07 17:56:59 +08:00
|
|
|
mc->gart_start = mc->vram_end + 1;
|
2015-04-21 04:55:21 +08:00
|
|
|
}
|
2017-07-07 17:56:59 +08:00
|
|
|
mc->gart_end = mc->gart_start + mc->gart_size - 1;
|
2015-04-21 04:55:21 +08:00
|
|
|
dev_info(adev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
|
2017-07-07 17:56:59 +08:00
|
|
|
mc->gart_size >> 20, mc->gart_start, mc->gart_end);
|
2015-04-21 04:55:21 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* GPU helpers function.
|
|
|
|
*/
|
|
|
|
/**
|
2017-02-10 15:59:59 +08:00
|
|
|
* amdgpu_need_post - check if the hw need post or not
|
2015-04-21 04:55:21 +08:00
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
*
|
2017-02-10 15:59:59 +08:00
|
|
|
* Check if the asic has been initialized (all asics) at driver startup
|
|
|
|
* or post is needed if hw reset is performed.
|
|
|
|
* Returns true if need or false if not.
|
2015-04-21 04:55:21 +08:00
|
|
|
*/
|
2017-02-10 15:59:59 +08:00
|
|
|
bool amdgpu_need_post(struct amdgpu_device *adev)
|
2015-04-21 04:55:21 +08:00
|
|
|
{
|
|
|
|
uint32_t reg;
|
|
|
|
|
2017-02-10 15:59:59 +08:00
|
|
|
if (adev->has_hw_reset) {
|
|
|
|
adev->has_hw_reset = false;
|
|
|
|
return true;
|
|
|
|
}
|
2017-07-01 05:26:47 +08:00
|
|
|
|
|
|
|
/* bios scratch used on CIK+ */
|
|
|
|
if (adev->asic_type >= CHIP_BONAIRE)
|
|
|
|
return amdgpu_atombios_scratch_need_asic_init(adev);
|
|
|
|
|
|
|
|
/* check MEM_SIZE for older asics */
|
2017-03-04 06:26:10 +08:00
|
|
|
reg = amdgpu_asic_get_config_memsize(adev);
|
2015-04-21 04:55:21 +08:00
|
|
|
|
2017-03-29 00:19:31 +08:00
|
|
|
if ((reg != 0) && (reg != 0xffffffff))
|
2017-02-10 15:59:59 +08:00
|
|
|
return false;
|
2015-04-21 04:55:21 +08:00
|
|
|
|
2017-02-10 15:59:59 +08:00
|
|
|
return true;
|
2015-04-21 04:55:21 +08:00
|
|
|
|
|
|
|
}
|
|
|
|
|
2016-09-14 19:38:08 +08:00
|
|
|
static bool amdgpu_vpost_needed(struct amdgpu_device *adev)
|
|
|
|
{
|
|
|
|
if (amdgpu_sriov_vf(adev))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (amdgpu_passthrough(adev)) {
|
2016-11-11 11:24:29 +08:00
|
|
|
/* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
|
|
|
|
* some old smc fw still need driver do vPost otherwise gpu hang, while
|
|
|
|
* those smc fw version above 22.15 doesn't have this flaw, so we force
|
|
|
|
* vpost executed for smc version below 22.15
|
2016-09-14 19:38:08 +08:00
|
|
|
*/
|
|
|
|
if (adev->asic_type == CHIP_FIJI) {
|
|
|
|
int err;
|
|
|
|
uint32_t fw_ver;
|
|
|
|
err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
|
|
|
|
/* force vPost if error occured */
|
|
|
|
if (err)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
|
2016-11-11 11:24:29 +08:00
|
|
|
if (fw_ver < 0x00160e00)
|
|
|
|
return true;
|
2016-09-14 19:38:08 +08:00
|
|
|
}
|
|
|
|
}
|
2017-02-10 15:59:59 +08:00
|
|
|
return amdgpu_need_post(adev);
|
2016-09-14 19:38:08 +08:00
|
|
|
}
|
|
|
|
|
2015-04-21 04:55:21 +08:00
|
|
|
/**
|
|
|
|
* amdgpu_dummy_page_init - init dummy page used by the driver
|
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
*
|
|
|
|
* Allocate the dummy page used by the driver (all asics).
|
|
|
|
* This dummy page is used by the driver as a filler for gart entries
|
|
|
|
* when pages are taken out of the GART
|
|
|
|
* Returns 0 on sucess, -ENOMEM on failure.
|
|
|
|
*/
|
|
|
|
int amdgpu_dummy_page_init(struct amdgpu_device *adev)
|
|
|
|
{
|
|
|
|
if (adev->dummy_page.page)
|
|
|
|
return 0;
|
|
|
|
adev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
|
|
|
|
if (adev->dummy_page.page == NULL)
|
|
|
|
return -ENOMEM;
|
|
|
|
adev->dummy_page.addr = pci_map_page(adev->pdev, adev->dummy_page.page,
|
|
|
|
0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
|
|
|
|
if (pci_dma_mapping_error(adev->pdev, adev->dummy_page.addr)) {
|
|
|
|
dev_err(&adev->pdev->dev, "Failed to DMA MAP the dummy page\n");
|
|
|
|
__free_page(adev->dummy_page.page);
|
|
|
|
adev->dummy_page.page = NULL;
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* amdgpu_dummy_page_fini - free dummy page used by the driver
|
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
*
|
|
|
|
* Frees the dummy page used by the driver (all asics).
|
|
|
|
*/
|
|
|
|
void amdgpu_dummy_page_fini(struct amdgpu_device *adev)
|
|
|
|
{
|
|
|
|
if (adev->dummy_page.page == NULL)
|
|
|
|
return;
|
|
|
|
pci_unmap_page(adev->pdev, adev->dummy_page.addr,
|
|
|
|
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
|
|
|
|
__free_page(adev->dummy_page.page);
|
|
|
|
adev->dummy_page.page = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/* ATOM accessor methods */
|
|
|
|
/*
|
|
|
|
* ATOM is an interpreted byte code stored in tables in the vbios. The
|
|
|
|
* driver registers callbacks to access registers and the interpreter
|
|
|
|
* in the driver parses the tables and executes then to program specific
|
|
|
|
* actions (set display modes, asic init, etc.). See amdgpu_atombios.c,
|
|
|
|
* atombios.h, and atom.c
|
|
|
|
*/
|
|
|
|
|
|
|
|
/**
|
|
|
|
* cail_pll_read - read PLL register
|
|
|
|
*
|
|
|
|
* @info: atom card_info pointer
|
|
|
|
* @reg: PLL register offset
|
|
|
|
*
|
|
|
|
* Provides a PLL register accessor for the atom interpreter (r4xx+).
|
|
|
|
* Returns the value of the PLL register.
|
|
|
|
*/
|
|
|
|
static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* cail_pll_write - write PLL register
|
|
|
|
*
|
|
|
|
* @info: atom card_info pointer
|
|
|
|
* @reg: PLL register offset
|
|
|
|
* @val: value to write to the pll register
|
|
|
|
*
|
|
|
|
* Provides a PLL register accessor for the atom interpreter (r4xx+).
|
|
|
|
*/
|
|
|
|
static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
|
|
|
|
{
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* cail_mc_read - read MC (Memory Controller) register
|
|
|
|
*
|
|
|
|
* @info: atom card_info pointer
|
|
|
|
* @reg: MC register offset
|
|
|
|
*
|
|
|
|
* Provides an MC register accessor for the atom interpreter (r4xx+).
|
|
|
|
* Returns the value of the MC register.
|
|
|
|
*/
|
|
|
|
static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* cail_mc_write - write MC (Memory Controller) register
|
|
|
|
*
|
|
|
|
* @info: atom card_info pointer
|
|
|
|
* @reg: MC register offset
|
|
|
|
* @val: value to write to the pll register
|
|
|
|
*
|
|
|
|
* Provides a MC register accessor for the atom interpreter (r4xx+).
|
|
|
|
*/
|
|
|
|
static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
|
|
|
|
{
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* cail_reg_write - write MMIO register
|
|
|
|
*
|
|
|
|
* @info: atom card_info pointer
|
|
|
|
* @reg: MMIO register offset
|
|
|
|
* @val: value to write to the pll register
|
|
|
|
*
|
|
|
|
* Provides a MMIO register accessor for the atom interpreter (r4xx+).
|
|
|
|
*/
|
|
|
|
static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
|
|
|
|
{
|
|
|
|
struct amdgpu_device *adev = info->dev->dev_private;
|
|
|
|
|
|
|
|
WREG32(reg, val);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* cail_reg_read - read MMIO register
|
|
|
|
*
|
|
|
|
* @info: atom card_info pointer
|
|
|
|
* @reg: MMIO register offset
|
|
|
|
*
|
|
|
|
* Provides an MMIO register accessor for the atom interpreter (r4xx+).
|
|
|
|
* Returns the value of the MMIO register.
|
|
|
|
*/
|
|
|
|
static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
|
|
|
|
{
|
|
|
|
struct amdgpu_device *adev = info->dev->dev_private;
|
|
|
|
uint32_t r;
|
|
|
|
|
|
|
|
r = RREG32(reg);
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* cail_ioreg_write - write IO register
|
|
|
|
*
|
|
|
|
* @info: atom card_info pointer
|
|
|
|
* @reg: IO register offset
|
|
|
|
* @val: value to write to the pll register
|
|
|
|
*
|
|
|
|
* Provides a IO register accessor for the atom interpreter (r4xx+).
|
|
|
|
*/
|
|
|
|
static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
|
|
|
|
{
|
|
|
|
struct amdgpu_device *adev = info->dev->dev_private;
|
|
|
|
|
|
|
|
WREG32_IO(reg, val);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* cail_ioreg_read - read IO register
|
|
|
|
*
|
|
|
|
* @info: atom card_info pointer
|
|
|
|
* @reg: IO register offset
|
|
|
|
*
|
|
|
|
* Provides an IO register accessor for the atom interpreter (r4xx+).
|
|
|
|
* Returns the value of the IO register.
|
|
|
|
*/
|
|
|
|
static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
|
|
|
|
{
|
|
|
|
struct amdgpu_device *adev = info->dev->dev_private;
|
|
|
|
uint32_t r;
|
|
|
|
|
|
|
|
r = RREG32_IO(reg);
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2017-08-23 00:31:43 +08:00
|
|
|
static ssize_t amdgpu_atombios_get_vbios_version(struct device *dev,
|
|
|
|
struct device_attribute *attr,
|
|
|
|
char *buf)
|
|
|
|
{
|
|
|
|
struct drm_device *ddev = dev_get_drvdata(dev);
|
|
|
|
struct amdgpu_device *adev = ddev->dev_private;
|
|
|
|
struct atom_context *ctx = adev->mode_info.atom_context;
|
|
|
|
|
|
|
|
return snprintf(buf, PAGE_SIZE, "%s\n", ctx->vbios_version);
|
|
|
|
}
|
|
|
|
|
|
|
|
static DEVICE_ATTR(vbios_version, 0444, amdgpu_atombios_get_vbios_version,
|
|
|
|
NULL);
|
|
|
|
|
2015-04-21 04:55:21 +08:00
|
|
|
/**
|
|
|
|
* amdgpu_atombios_fini - free the driver info and callbacks for atombios
|
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
*
|
|
|
|
* Frees the driver info and register access callbacks for the ATOM
|
|
|
|
* interpreter (r4xx+).
|
|
|
|
* Called at driver shutdown.
|
|
|
|
*/
|
|
|
|
static void amdgpu_atombios_fini(struct amdgpu_device *adev)
|
|
|
|
{
|
2016-05-27 19:34:11 +08:00
|
|
|
if (adev->mode_info.atom_context) {
|
2015-04-21 04:55:21 +08:00
|
|
|
kfree(adev->mode_info.atom_context->scratch);
|
2016-05-27 19:34:11 +08:00
|
|
|
kfree(adev->mode_info.atom_context->iio);
|
|
|
|
}
|
2015-04-21 04:55:21 +08:00
|
|
|
kfree(adev->mode_info.atom_context);
|
|
|
|
adev->mode_info.atom_context = NULL;
|
|
|
|
kfree(adev->mode_info.atom_card_info);
|
|
|
|
adev->mode_info.atom_card_info = NULL;
|
2017-08-23 00:31:43 +08:00
|
|
|
device_remove_file(adev->dev, &dev_attr_vbios_version);
|
2015-04-21 04:55:21 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* amdgpu_atombios_init - init the driver info and callbacks for atombios
|
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
*
|
|
|
|
* Initializes the driver info and register access callbacks for the
|
|
|
|
* ATOM interpreter (r4xx+).
|
|
|
|
* Returns 0 on sucess, -ENOMEM on failure.
|
|
|
|
* Called at driver startup.
|
|
|
|
*/
|
|
|
|
static int amdgpu_atombios_init(struct amdgpu_device *adev)
|
|
|
|
{
|
|
|
|
struct card_info *atom_card_info =
|
|
|
|
kzalloc(sizeof(struct card_info), GFP_KERNEL);
|
2017-08-23 00:31:43 +08:00
|
|
|
int ret;
|
2015-04-21 04:55:21 +08:00
|
|
|
|
|
|
|
if (!atom_card_info)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
adev->mode_info.atom_card_info = atom_card_info;
|
|
|
|
atom_card_info->dev = adev->ddev;
|
|
|
|
atom_card_info->reg_read = cail_reg_read;
|
|
|
|
atom_card_info->reg_write = cail_reg_write;
|
|
|
|
/* needed for iio ops */
|
|
|
|
if (adev->rio_mem) {
|
|
|
|
atom_card_info->ioreg_read = cail_ioreg_read;
|
|
|
|
atom_card_info->ioreg_write = cail_ioreg_write;
|
|
|
|
} else {
|
2017-01-04 21:06:58 +08:00
|
|
|
DRM_INFO("PCI I/O BAR is not found. Using MMIO to access ATOM BIOS\n");
|
2015-04-21 04:55:21 +08:00
|
|
|
atom_card_info->ioreg_read = cail_reg_read;
|
|
|
|
atom_card_info->ioreg_write = cail_reg_write;
|
|
|
|
}
|
|
|
|
atom_card_info->mc_read = cail_mc_read;
|
|
|
|
atom_card_info->mc_write = cail_mc_write;
|
|
|
|
atom_card_info->pll_read = cail_pll_read;
|
|
|
|
atom_card_info->pll_write = cail_pll_write;
|
|
|
|
|
|
|
|
adev->mode_info.atom_context = amdgpu_atom_parse(atom_card_info, adev->bios);
|
|
|
|
if (!adev->mode_info.atom_context) {
|
|
|
|
amdgpu_atombios_fini(adev);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
mutex_init(&adev->mode_info.atom_context->mutex);
|
2016-09-24 04:23:41 +08:00
|
|
|
if (adev->is_atom_fw) {
|
|
|
|
amdgpu_atomfirmware_scratch_regs_init(adev);
|
|
|
|
amdgpu_atomfirmware_allocate_fb_scratch(adev);
|
|
|
|
} else {
|
|
|
|
amdgpu_atombios_scratch_regs_init(adev);
|
|
|
|
amdgpu_atombios_allocate_fb_scratch(adev);
|
|
|
|
}
|
2017-08-23 00:31:43 +08:00
|
|
|
|
|
|
|
ret = device_create_file(adev->dev, &dev_attr_vbios_version);
|
|
|
|
if (ret) {
|
|
|
|
DRM_ERROR("Failed to create device file for VBIOS version\n");
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2015-04-21 04:55:21 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* if we get transitioned to only one device, take VGA back */
|
|
|
|
/**
|
|
|
|
* amdgpu_vga_set_decode - enable/disable vga decode
|
|
|
|
*
|
|
|
|
* @cookie: amdgpu_device pointer
|
|
|
|
* @state: enable/disable vga decode
|
|
|
|
*
|
|
|
|
* Enable/disable vga decode (all asics).
|
|
|
|
* Returns VGA resource flags.
|
|
|
|
*/
|
|
|
|
static unsigned int amdgpu_vga_set_decode(void *cookie, bool state)
|
|
|
|
{
|
|
|
|
struct amdgpu_device *adev = cookie;
|
|
|
|
amdgpu_asic_set_vga_state(adev, state);
|
|
|
|
if (state)
|
|
|
|
return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
|
|
|
|
VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
|
|
|
|
else
|
|
|
|
return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
|
|
|
|
}
|
|
|
|
|
2017-04-05 13:54:56 +08:00
|
|
|
static void amdgpu_check_block_size(struct amdgpu_device *adev)
|
2017-03-27 11:36:57 +08:00
|
|
|
{
|
|
|
|
/* defines number of bits in page table versus page directory,
|
|
|
|
* a page is 4KB so we have 12 bits offset, minimum 9 bits in the
|
|
|
|
* page table and the remaining bits are in the page directory */
|
2017-04-05 13:54:56 +08:00
|
|
|
if (amdgpu_vm_block_size == -1)
|
|
|
|
return;
|
2017-03-27 11:36:57 +08:00
|
|
|
|
2017-04-05 13:54:56 +08:00
|
|
|
if (amdgpu_vm_block_size < 9) {
|
2017-03-27 11:36:57 +08:00
|
|
|
dev_warn(adev->dev, "VM page table size (%d) too small\n",
|
|
|
|
amdgpu_vm_block_size);
|
2017-04-05 13:54:56 +08:00
|
|
|
goto def_value;
|
2017-03-27 11:36:57 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (amdgpu_vm_block_size > 24 ||
|
|
|
|
(amdgpu_vm_size * 1024) < (1ull << amdgpu_vm_block_size)) {
|
|
|
|
dev_warn(adev->dev, "VM page table size (%d) too large\n",
|
|
|
|
amdgpu_vm_block_size);
|
2017-04-05 13:54:56 +08:00
|
|
|
goto def_value;
|
2017-03-27 11:36:57 +08:00
|
|
|
}
|
2017-04-05 13:54:56 +08:00
|
|
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
def_value:
|
|
|
|
amdgpu_vm_block_size = -1;
|
2017-03-27 11:36:57 +08:00
|
|
|
}
|
|
|
|
|
2017-03-29 16:08:31 +08:00
|
|
|
static void amdgpu_check_vm_size(struct amdgpu_device *adev)
|
|
|
|
{
|
2017-06-16 06:20:09 +08:00
|
|
|
/* no need to check the default value */
|
|
|
|
if (amdgpu_vm_size == -1)
|
|
|
|
return;
|
|
|
|
|
2017-06-22 00:31:41 +08:00
|
|
|
if (!is_power_of_2(amdgpu_vm_size)) {
|
2017-03-29 16:08:31 +08:00
|
|
|
dev_warn(adev->dev, "VM size (%d) must be a power of 2\n",
|
|
|
|
amdgpu_vm_size);
|
|
|
|
goto def_value;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (amdgpu_vm_size < 1) {
|
|
|
|
dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
|
|
|
|
amdgpu_vm_size);
|
|
|
|
goto def_value;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Max GPUVM size for Cayman, SI, CI VI are 40 bits.
|
|
|
|
*/
|
|
|
|
if (amdgpu_vm_size > 1024) {
|
|
|
|
dev_warn(adev->dev, "VM size (%d) too large, max is 1TB\n",
|
|
|
|
amdgpu_vm_size);
|
|
|
|
goto def_value;
|
|
|
|
}
|
|
|
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
def_value:
|
2017-04-05 13:54:56 +08:00
|
|
|
amdgpu_vm_size = -1;
|
2017-03-29 16:08:31 +08:00
|
|
|
}
|
|
|
|
|
2015-04-21 04:55:21 +08:00
|
|
|
/**
|
|
|
|
* amdgpu_check_arguments - validate module params
|
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
*
|
|
|
|
* Validates certain module parameters and updates
|
|
|
|
* the associated values used by the driver (all asics).
|
|
|
|
*/
|
|
|
|
static void amdgpu_check_arguments(struct amdgpu_device *adev)
|
|
|
|
{
|
2015-12-10 17:34:33 +08:00
|
|
|
if (amdgpu_sched_jobs < 4) {
|
|
|
|
dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
|
|
|
|
amdgpu_sched_jobs);
|
|
|
|
amdgpu_sched_jobs = 4;
|
2017-06-22 00:31:41 +08:00
|
|
|
} else if (!is_power_of_2(amdgpu_sched_jobs)){
|
2015-12-10 17:34:33 +08:00
|
|
|
dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
|
|
|
|
amdgpu_sched_jobs);
|
|
|
|
amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
|
|
|
|
}
|
2015-04-21 04:55:21 +08:00
|
|
|
|
2017-08-21 23:58:25 +08:00
|
|
|
if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) {
|
2017-07-07 19:44:05 +08:00
|
|
|
/* gart size must be greater or equal to 32M */
|
|
|
|
dev_warn(adev->dev, "gart size (%d) too small\n",
|
|
|
|
amdgpu_gart_size);
|
2017-08-21 23:58:25 +08:00
|
|
|
amdgpu_gart_size = -1;
|
2015-04-21 04:55:21 +08:00
|
|
|
}
|
|
|
|
|
2017-07-07 19:17:45 +08:00
|
|
|
if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) {
|
2016-03-17 23:25:15 +08:00
|
|
|
/* gtt size must be greater or equal to 32M */
|
2017-07-07 19:17:45 +08:00
|
|
|
dev_warn(adev->dev, "gtt size (%d) too small\n",
|
|
|
|
amdgpu_gtt_size);
|
|
|
|
amdgpu_gtt_size = -1;
|
2015-04-21 04:55:21 +08:00
|
|
|
}
|
|
|
|
|
2017-08-15 16:05:59 +08:00
|
|
|
/* valid range is between 4 and 9 inclusive */
|
|
|
|
if (amdgpu_vm_fragment_size != -1 &&
|
|
|
|
(amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) {
|
|
|
|
dev_warn(adev->dev, "valid range is between 4 and 9\n");
|
|
|
|
amdgpu_vm_fragment_size = -1;
|
|
|
|
}
|
|
|
|
|
2017-03-29 16:08:31 +08:00
|
|
|
amdgpu_check_vm_size(adev);
|
2015-04-21 04:55:21 +08:00
|
|
|
|
2017-04-05 13:54:56 +08:00
|
|
|
amdgpu_check_block_size(adev);
|
2016-08-24 21:51:49 +08:00
|
|
|
|
2016-11-07 09:53:10 +08:00
|
|
|
if (amdgpu_vram_page_split != -1 && (amdgpu_vram_page_split < 16 ||
|
2017-06-22 00:31:41 +08:00
|
|
|
!is_power_of_2(amdgpu_vram_page_split))) {
|
2016-08-24 21:51:49 +08:00
|
|
|
dev_warn(adev->dev, "invalid VRAM page split (%d)\n",
|
|
|
|
amdgpu_vram_page_split);
|
|
|
|
amdgpu_vram_page_split = 1024;
|
|
|
|
}
|
2015-04-21 04:55:21 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* amdgpu_switcheroo_set_state - set switcheroo state
|
|
|
|
*
|
|
|
|
* @pdev: pci dev pointer
|
2015-09-05 17:17:35 +08:00
|
|
|
* @state: vga_switcheroo state
|
2015-04-21 04:55:21 +08:00
|
|
|
*
|
|
|
|
* Callback for the switcheroo driver. Suspends or resumes the
|
|
|
|
* the asics before or after it is powered up using ACPI methods.
|
|
|
|
*/
|
|
|
|
static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
|
|
|
|
{
|
|
|
|
struct drm_device *dev = pci_get_drvdata(pdev);
|
|
|
|
|
|
|
|
if (amdgpu_device_is_px(dev) && state == VGA_SWITCHEROO_OFF)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (state == VGA_SWITCHEROO_ON) {
|
2017-02-28 20:55:52 +08:00
|
|
|
pr_info("amdgpu: switched on\n");
|
2015-04-21 04:55:21 +08:00
|
|
|
/* don't suspend or resume card normally */
|
|
|
|
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
|
|
|
|
|
2016-08-24 01:25:49 +08:00
|
|
|
amdgpu_device_resume(dev, true, true);
|
2015-04-21 04:55:21 +08:00
|
|
|
|
|
|
|
dev->switch_power_state = DRM_SWITCH_POWER_ON;
|
|
|
|
drm_kms_helper_poll_enable(dev);
|
|
|
|
} else {
|
2017-02-28 20:55:52 +08:00
|
|
|
pr_info("amdgpu: switched off\n");
|
2015-04-21 04:55:21 +08:00
|
|
|
drm_kms_helper_poll_disable(dev);
|
|
|
|
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
|
2016-08-24 01:25:49 +08:00
|
|
|
amdgpu_device_suspend(dev, true, true);
|
2015-04-21 04:55:21 +08:00
|
|
|
dev->switch_power_state = DRM_SWITCH_POWER_OFF;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* amdgpu_switcheroo_can_switch - see if switcheroo state can change
|
|
|
|
*
|
|
|
|
* @pdev: pci dev pointer
|
|
|
|
*
|
|
|
|
* Callback for the switcheroo driver. Check of the switcheroo
|
|
|
|
* state can be changed.
|
|
|
|
* Returns true if the state can be changed, false if not.
|
|
|
|
*/
|
|
|
|
static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
|
|
|
|
{
|
|
|
|
struct drm_device *dev = pci_get_drvdata(pdev);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* FIXME: open_count is protected by drm_global_mutex but that would lead to
|
|
|
|
* locking inversion with the driver load path. And the access here is
|
|
|
|
* completely racy anyway. So don't bother with locking for now.
|
|
|
|
*/
|
|
|
|
return dev->open_count == 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
|
|
|
|
.set_gpu_state = amdgpu_switcheroo_set_state,
|
|
|
|
.reprobe = NULL,
|
|
|
|
.can_switch = amdgpu_switcheroo_can_switch,
|
|
|
|
};
|
|
|
|
|
|
|
|
int amdgpu_set_clockgating_state(struct amdgpu_device *adev,
|
2015-05-23 02:39:35 +08:00
|
|
|
enum amd_ip_block_type block_type,
|
|
|
|
enum amd_clockgating_state state)
|
2015-04-21 04:55:21 +08:00
|
|
|
{
|
|
|
|
int i, r = 0;
|
|
|
|
|
|
|
|
for (i = 0; i < adev->num_ip_blocks; i++) {
|
2016-10-14 05:41:13 +08:00
|
|
|
if (!adev->ip_blocks[i].status.valid)
|
2016-06-23 23:53:12 +08:00
|
|
|
continue;
|
2017-02-22 15:33:46 +08:00
|
|
|
if (adev->ip_blocks[i].version->type != block_type)
|
|
|
|
continue;
|
|
|
|
if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
|
|
|
|
continue;
|
|
|
|
r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
|
|
|
|
(void *)adev, state);
|
|
|
|
if (r)
|
|
|
|
DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
|
|
|
|
adev->ip_blocks[i].version->funcs->name, r);
|
2015-04-21 04:55:21 +08:00
|
|
|
}
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
int amdgpu_set_powergating_state(struct amdgpu_device *adev,
|
2015-05-23 02:39:35 +08:00
|
|
|
enum amd_ip_block_type block_type,
|
|
|
|
enum amd_powergating_state state)
|
2015-04-21 04:55:21 +08:00
|
|
|
{
|
|
|
|
int i, r = 0;
|
|
|
|
|
|
|
|
for (i = 0; i < adev->num_ip_blocks; i++) {
|
2016-10-14 05:41:13 +08:00
|
|
|
if (!adev->ip_blocks[i].status.valid)
|
2016-06-23 23:53:12 +08:00
|
|
|
continue;
|
2017-02-22 15:33:46 +08:00
|
|
|
if (adev->ip_blocks[i].version->type != block_type)
|
|
|
|
continue;
|
|
|
|
if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
|
|
|
|
continue;
|
|
|
|
r = adev->ip_blocks[i].version->funcs->set_powergating_state(
|
|
|
|
(void *)adev, state);
|
|
|
|
if (r)
|
|
|
|
DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
|
|
|
|
adev->ip_blocks[i].version->funcs->name, r);
|
2015-04-21 04:55:21 +08:00
|
|
|
}
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2017-01-05 18:44:41 +08:00
|
|
|
void amdgpu_get_clockgating_state(struct amdgpu_device *adev, u32 *flags)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < adev->num_ip_blocks; i++) {
|
|
|
|
if (!adev->ip_blocks[i].status.valid)
|
|
|
|
continue;
|
|
|
|
if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
|
|
|
|
adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-06-23 23:41:04 +08:00
|
|
|
int amdgpu_wait_for_idle(struct amdgpu_device *adev,
|
|
|
|
enum amd_ip_block_type block_type)
|
|
|
|
{
|
|
|
|
int i, r;
|
|
|
|
|
|
|
|
for (i = 0; i < adev->num_ip_blocks; i++) {
|
2016-10-14 05:41:13 +08:00
|
|
|
if (!adev->ip_blocks[i].status.valid)
|
2016-06-23 23:53:12 +08:00
|
|
|
continue;
|
2016-10-14 05:41:13 +08:00
|
|
|
if (adev->ip_blocks[i].version->type == block_type) {
|
|
|
|
r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
|
2016-06-23 23:41:04 +08:00
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
bool amdgpu_is_idle(struct amdgpu_device *adev,
|
|
|
|
enum amd_ip_block_type block_type)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < adev->num_ip_blocks; i++) {
|
2016-10-14 05:41:13 +08:00
|
|
|
if (!adev->ip_blocks[i].status.valid)
|
2016-06-23 23:53:12 +08:00
|
|
|
continue;
|
2016-10-14 05:41:13 +08:00
|
|
|
if (adev->ip_blocks[i].version->type == block_type)
|
|
|
|
return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
|
2016-06-23 23:41:04 +08:00
|
|
|
}
|
|
|
|
return true;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2016-10-14 05:41:13 +08:00
|
|
|
struct amdgpu_ip_block * amdgpu_get_ip_block(struct amdgpu_device *adev,
|
|
|
|
enum amd_ip_block_type type)
|
2015-04-21 04:55:21 +08:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < adev->num_ip_blocks; i++)
|
2016-10-14 05:41:13 +08:00
|
|
|
if (adev->ip_blocks[i].version->type == type)
|
2015-04-21 04:55:21 +08:00
|
|
|
return &adev->ip_blocks[i];
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* amdgpu_ip_block_version_cmp
|
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
2015-05-23 02:39:35 +08:00
|
|
|
* @type: enum amd_ip_block_type
|
2015-04-21 04:55:21 +08:00
|
|
|
* @major: major version
|
|
|
|
* @minor: minor version
|
|
|
|
*
|
|
|
|
* return 0 if equal or greater
|
|
|
|
* return 1 if smaller or the ip_block doesn't exist
|
|
|
|
*/
|
|
|
|
int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev,
|
2015-05-23 02:39:35 +08:00
|
|
|
enum amd_ip_block_type type,
|
2015-04-21 04:55:21 +08:00
|
|
|
u32 major, u32 minor)
|
|
|
|
{
|
2016-10-14 05:41:13 +08:00
|
|
|
struct amdgpu_ip_block *ip_block = amdgpu_get_ip_block(adev, type);
|
2015-04-21 04:55:21 +08:00
|
|
|
|
2016-10-14 05:41:13 +08:00
|
|
|
if (ip_block && ((ip_block->version->major > major) ||
|
|
|
|
((ip_block->version->major == major) &&
|
|
|
|
(ip_block->version->minor >= minor))))
|
2015-04-21 04:55:21 +08:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2016-10-14 05:41:13 +08:00
|
|
|
/**
|
|
|
|
* amdgpu_ip_block_add
|
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
* @ip_block_version: pointer to the IP to add
|
|
|
|
*
|
|
|
|
* Adds the IP block driver information to the collection of IPs
|
|
|
|
* on the asic.
|
|
|
|
*/
|
|
|
|
int amdgpu_ip_block_add(struct amdgpu_device *adev,
|
|
|
|
const struct amdgpu_ip_block_version *ip_block_version)
|
|
|
|
{
|
|
|
|
if (!ip_block_version)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2017-05-03 09:52:06 +08:00
|
|
|
DRM_DEBUG("add ip block number %d <%s>\n", adev->num_ip_blocks,
|
|
|
|
ip_block_version->funcs->name);
|
|
|
|
|
2016-10-14 05:41:13 +08:00
|
|
|
adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-10-01 00:43:04 +08:00
|
|
|
static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
|
2016-08-10 16:01:25 +08:00
|
|
|
{
|
|
|
|
adev->enable_virtual_display = false;
|
|
|
|
|
|
|
|
if (amdgpu_virtual_display) {
|
|
|
|
struct drm_device *ddev = adev->ddev;
|
|
|
|
const char *pci_address_name = pci_name(ddev->pdev);
|
2016-10-01 01:02:18 +08:00
|
|
|
char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
|
2016-08-10 16:01:25 +08:00
|
|
|
|
|
|
|
pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
|
|
|
|
pciaddstr_tmp = pciaddstr;
|
2016-10-01 01:02:18 +08:00
|
|
|
while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
|
|
|
|
pciaddname = strsep(&pciaddname_tmp, ",");
|
2017-01-22 15:16:51 +08:00
|
|
|
if (!strcmp("all", pciaddname)
|
|
|
|
|| !strcmp(pci_address_name, pciaddname)) {
|
2016-10-01 01:02:18 +08:00
|
|
|
long num_crtc;
|
|
|
|
int res = -1;
|
|
|
|
|
2016-08-10 16:01:25 +08:00
|
|
|
adev->enable_virtual_display = true;
|
2016-10-01 01:02:18 +08:00
|
|
|
|
|
|
|
if (pciaddname_tmp)
|
|
|
|
res = kstrtol(pciaddname_tmp, 10,
|
|
|
|
&num_crtc);
|
|
|
|
|
|
|
|
if (!res) {
|
|
|
|
if (num_crtc < 1)
|
|
|
|
num_crtc = 1;
|
|
|
|
if (num_crtc > 6)
|
|
|
|
num_crtc = 6;
|
|
|
|
adev->mode_info.num_crtc = num_crtc;
|
|
|
|
} else {
|
|
|
|
adev->mode_info.num_crtc = 1;
|
|
|
|
}
|
2016-08-10 16:01:25 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-10-01 01:02:18 +08:00
|
|
|
DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
|
|
|
|
amdgpu_virtual_display, pci_address_name,
|
|
|
|
adev->enable_virtual_display, adev->mode_info.num_crtc);
|
2016-08-10 16:01:25 +08:00
|
|
|
|
|
|
|
kfree(pciaddstr);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-04-28 04:58:01 +08:00
|
|
|
static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
|
|
|
|
{
|
|
|
|
const char *chip_name;
|
|
|
|
char fw_name[30];
|
|
|
|
int err;
|
|
|
|
const struct gpu_info_firmware_header_v1_0 *hdr;
|
|
|
|
|
2017-06-05 22:11:59 +08:00
|
|
|
adev->firmware.gpu_info_fw = NULL;
|
|
|
|
|
2017-04-28 04:58:01 +08:00
|
|
|
switch (adev->asic_type) {
|
|
|
|
case CHIP_TOPAZ:
|
|
|
|
case CHIP_TONGA:
|
|
|
|
case CHIP_FIJI:
|
|
|
|
case CHIP_POLARIS11:
|
|
|
|
case CHIP_POLARIS10:
|
|
|
|
case CHIP_POLARIS12:
|
|
|
|
case CHIP_CARRIZO:
|
|
|
|
case CHIP_STONEY:
|
|
|
|
#ifdef CONFIG_DRM_AMDGPU_SI
|
|
|
|
case CHIP_VERDE:
|
|
|
|
case CHIP_TAHITI:
|
|
|
|
case CHIP_PITCAIRN:
|
|
|
|
case CHIP_OLAND:
|
|
|
|
case CHIP_HAINAN:
|
|
|
|
#endif
|
|
|
|
#ifdef CONFIG_DRM_AMDGPU_CIK
|
|
|
|
case CHIP_BONAIRE:
|
|
|
|
case CHIP_HAWAII:
|
|
|
|
case CHIP_KAVERI:
|
|
|
|
case CHIP_KABINI:
|
|
|
|
case CHIP_MULLINS:
|
|
|
|
#endif
|
|
|
|
default:
|
|
|
|
return 0;
|
|
|
|
case CHIP_VEGA10:
|
|
|
|
chip_name = "vega10";
|
|
|
|
break;
|
2017-05-10 00:27:35 +08:00
|
|
|
case CHIP_RAVEN:
|
|
|
|
chip_name = "raven";
|
|
|
|
break;
|
2017-04-28 04:58:01 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
|
2017-06-05 22:11:59 +08:00
|
|
|
err = request_firmware(&adev->firmware.gpu_info_fw, fw_name, adev->dev);
|
2017-04-28 04:58:01 +08:00
|
|
|
if (err) {
|
|
|
|
dev_err(adev->dev,
|
|
|
|
"Failed to load gpu_info firmware \"%s\"\n",
|
|
|
|
fw_name);
|
|
|
|
goto out;
|
|
|
|
}
|
2017-06-05 22:11:59 +08:00
|
|
|
err = amdgpu_ucode_validate(adev->firmware.gpu_info_fw);
|
2017-04-28 04:58:01 +08:00
|
|
|
if (err) {
|
|
|
|
dev_err(adev->dev,
|
|
|
|
"Failed to validate gpu_info firmware \"%s\"\n",
|
|
|
|
fw_name);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2017-06-05 22:11:59 +08:00
|
|
|
hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data;
|
2017-04-28 04:58:01 +08:00
|
|
|
amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
|
|
|
|
|
|
|
|
switch (hdr->version_major) {
|
|
|
|
case 1:
|
|
|
|
{
|
|
|
|
const struct gpu_info_firmware_v1_0 *gpu_info_fw =
|
2017-06-05 22:11:59 +08:00
|
|
|
(const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
|
2017-04-28 04:58:01 +08:00
|
|
|
le32_to_cpu(hdr->header.ucode_array_offset_bytes));
|
|
|
|
|
2017-05-12 07:09:49 +08:00
|
|
|
adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
|
|
|
|
adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
|
|
|
|
adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
|
|
|
|
adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
|
2017-04-28 04:58:01 +08:00
|
|
|
adev->gfx.config.max_texture_channel_caches =
|
2017-05-12 07:09:49 +08:00
|
|
|
le32_to_cpu(gpu_info_fw->gc_num_tccs);
|
|
|
|
adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
|
|
|
|
adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
|
|
|
|
adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
|
|
|
|
adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
|
2017-04-28 04:58:01 +08:00
|
|
|
adev->gfx.config.double_offchip_lds_buf =
|
2017-05-12 07:09:49 +08:00
|
|
|
le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer);
|
|
|
|
adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
|
2017-06-09 22:30:52 +08:00
|
|
|
adev->gfx.cu_info.max_waves_per_simd =
|
|
|
|
le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd);
|
|
|
|
adev->gfx.cu_info.max_scratch_slots_per_cu =
|
|
|
|
le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu);
|
|
|
|
adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
|
2017-04-28 04:58:01 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
dev_err(adev->dev,
|
|
|
|
"Unsupported gpu_info table %d\n", hdr->header.ucode_version);
|
|
|
|
err = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
out:
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2015-04-21 04:55:21 +08:00
|
|
|
static int amdgpu_early_init(struct amdgpu_device *adev)
|
|
|
|
{
|
2015-04-21 05:31:14 +08:00
|
|
|
int i, r;
|
2015-04-21 04:55:21 +08:00
|
|
|
|
2016-10-01 00:43:04 +08:00
|
|
|
amdgpu_device_enable_virtual_display(adev);
|
2016-08-08 11:37:50 +08:00
|
|
|
|
2015-04-21 04:55:21 +08:00
|
|
|
switch (adev->asic_type) {
|
2015-04-21 05:31:14 +08:00
|
|
|
case CHIP_TOPAZ:
|
|
|
|
case CHIP_TONGA:
|
2015-07-08 01:05:16 +08:00
|
|
|
case CHIP_FIJI:
|
2016-03-15 06:33:29 +08:00
|
|
|
case CHIP_POLARIS11:
|
|
|
|
case CHIP_POLARIS10:
|
2016-12-15 04:32:28 +08:00
|
|
|
case CHIP_POLARIS12:
|
2015-04-21 05:31:14 +08:00
|
|
|
case CHIP_CARRIZO:
|
2015-10-09 04:31:43 +08:00
|
|
|
case CHIP_STONEY:
|
|
|
|
if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY)
|
2015-04-21 05:31:14 +08:00
|
|
|
adev->family = AMDGPU_FAMILY_CZ;
|
|
|
|
else
|
|
|
|
adev->family = AMDGPU_FAMILY_VI;
|
|
|
|
|
|
|
|
r = vi_set_ip_blocks(adev);
|
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
break;
|
2016-01-21 17:29:41 +08:00
|
|
|
#ifdef CONFIG_DRM_AMDGPU_SI
|
|
|
|
case CHIP_VERDE:
|
|
|
|
case CHIP_TAHITI:
|
|
|
|
case CHIP_PITCAIRN:
|
|
|
|
case CHIP_OLAND:
|
|
|
|
case CHIP_HAINAN:
|
2016-05-24 21:02:53 +08:00
|
|
|
adev->family = AMDGPU_FAMILY_SI;
|
2016-01-21 17:29:41 +08:00
|
|
|
r = si_set_ip_blocks(adev);
|
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
break;
|
|
|
|
#endif
|
2015-04-21 05:09:27 +08:00
|
|
|
#ifdef CONFIG_DRM_AMDGPU_CIK
|
|
|
|
case CHIP_BONAIRE:
|
|
|
|
case CHIP_HAWAII:
|
|
|
|
case CHIP_KAVERI:
|
|
|
|
case CHIP_KABINI:
|
|
|
|
case CHIP_MULLINS:
|
|
|
|
if ((adev->asic_type == CHIP_BONAIRE) || (adev->asic_type == CHIP_HAWAII))
|
|
|
|
adev->family = AMDGPU_FAMILY_CI;
|
|
|
|
else
|
|
|
|
adev->family = AMDGPU_FAMILY_KV;
|
|
|
|
|
|
|
|
r = cik_set_ip_blocks(adev);
|
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
break;
|
|
|
|
#endif
|
2016-12-07 17:31:19 +08:00
|
|
|
case CHIP_VEGA10:
|
|
|
|
case CHIP_RAVEN:
|
|
|
|
if (adev->asic_type == CHIP_RAVEN)
|
|
|
|
adev->family = AMDGPU_FAMILY_RV;
|
|
|
|
else
|
|
|
|
adev->family = AMDGPU_FAMILY_AI;
|
2017-03-07 03:53:16 +08:00
|
|
|
|
|
|
|
r = soc15_set_ip_blocks(adev);
|
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
break;
|
2015-04-21 04:55:21 +08:00
|
|
|
default:
|
|
|
|
/* FIXME: not supported yet */
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2017-04-28 04:58:01 +08:00
|
|
|
r = amdgpu_device_parse_gpu_info_fw(adev);
|
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
|
2017-01-12 15:14:36 +08:00
|
|
|
if (amdgpu_sriov_vf(adev)) {
|
|
|
|
r = amdgpu_virt_request_full_gpu(adev, true);
|
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2015-04-21 04:55:21 +08:00
|
|
|
for (i = 0; i < adev->num_ip_blocks; i++) {
|
|
|
|
if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
|
2017-05-03 09:40:17 +08:00
|
|
|
DRM_ERROR("disabled ip block: %d <%s>\n",
|
|
|
|
i, adev->ip_blocks[i].version->funcs->name);
|
2016-10-14 05:41:13 +08:00
|
|
|
adev->ip_blocks[i].status.valid = false;
|
2015-04-21 04:55:21 +08:00
|
|
|
} else {
|
2016-10-14 05:41:13 +08:00
|
|
|
if (adev->ip_blocks[i].version->funcs->early_init) {
|
|
|
|
r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
|
2015-12-08 06:02:53 +08:00
|
|
|
if (r == -ENOENT) {
|
2016-10-14 05:41:13 +08:00
|
|
|
adev->ip_blocks[i].status.valid = false;
|
2015-12-08 06:02:53 +08:00
|
|
|
} else if (r) {
|
2016-10-14 05:41:13 +08:00
|
|
|
DRM_ERROR("early_init of IP block <%s> failed %d\n",
|
|
|
|
adev->ip_blocks[i].version->funcs->name, r);
|
2015-04-21 04:55:21 +08:00
|
|
|
return r;
|
2015-12-08 06:02:53 +08:00
|
|
|
} else {
|
2016-10-14 05:41:13 +08:00
|
|
|
adev->ip_blocks[i].status.valid = true;
|
2015-12-08 06:02:53 +08:00
|
|
|
}
|
2015-07-11 01:59:44 +08:00
|
|
|
} else {
|
2016-10-14 05:41:13 +08:00
|
|
|
adev->ip_blocks[i].status.valid = true;
|
2015-04-21 04:55:21 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-06-02 18:32:07 +08:00
|
|
|
adev->cg_flags &= amdgpu_cg_mask;
|
|
|
|
adev->pg_flags &= amdgpu_pg_mask;
|
|
|
|
|
2015-04-21 04:55:21 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int amdgpu_init(struct amdgpu_device *adev)
|
|
|
|
{
|
|
|
|
int i, r;
|
|
|
|
|
|
|
|
for (i = 0; i < adev->num_ip_blocks; i++) {
|
2016-10-14 05:41:13 +08:00
|
|
|
if (!adev->ip_blocks[i].status.valid)
|
2015-04-21 04:55:21 +08:00
|
|
|
continue;
|
2016-10-14 05:41:13 +08:00
|
|
|
r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
|
2015-12-08 06:02:53 +08:00
|
|
|
if (r) {
|
2016-10-14 05:41:13 +08:00
|
|
|
DRM_ERROR("sw_init of IP block <%s> failed %d\n",
|
|
|
|
adev->ip_blocks[i].version->funcs->name, r);
|
2015-04-21 04:55:21 +08:00
|
|
|
return r;
|
2015-12-08 06:02:53 +08:00
|
|
|
}
|
2016-10-14 05:41:13 +08:00
|
|
|
adev->ip_blocks[i].status.sw = true;
|
2015-04-21 04:55:21 +08:00
|
|
|
/* need to do gmc hw init early so we can allocate gpu mem */
|
2016-10-14 05:41:13 +08:00
|
|
|
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
|
2015-04-21 04:55:21 +08:00
|
|
|
r = amdgpu_vram_scratch_init(adev);
|
2015-12-08 06:02:53 +08:00
|
|
|
if (r) {
|
|
|
|
DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
|
2015-04-21 04:55:21 +08:00
|
|
|
return r;
|
2015-12-08 06:02:53 +08:00
|
|
|
}
|
2016-10-14 05:41:13 +08:00
|
|
|
r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
|
2015-12-08 06:02:53 +08:00
|
|
|
if (r) {
|
|
|
|
DRM_ERROR("hw_init %d failed %d\n", i, r);
|
2015-04-21 04:55:21 +08:00
|
|
|
return r;
|
2015-12-08 06:02:53 +08:00
|
|
|
}
|
2015-04-21 04:55:21 +08:00
|
|
|
r = amdgpu_wb_init(adev);
|
2015-12-08 06:02:53 +08:00
|
|
|
if (r) {
|
|
|
|
DRM_ERROR("amdgpu_wb_init failed %d\n", r);
|
2015-04-21 04:55:21 +08:00
|
|
|
return r;
|
2015-12-08 06:02:53 +08:00
|
|
|
}
|
2016-10-14 05:41:13 +08:00
|
|
|
adev->ip_blocks[i].status.hw = true;
|
2017-01-09 15:54:32 +08:00
|
|
|
|
|
|
|
/* right after GMC hw init, we create CSA */
|
|
|
|
if (amdgpu_sriov_vf(adev)) {
|
|
|
|
r = amdgpu_allocate_static_csa(adev);
|
|
|
|
if (r) {
|
|
|
|
DRM_ERROR("allocate CSA failed %d\n", r);
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
}
|
2015-04-21 04:55:21 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < adev->num_ip_blocks; i++) {
|
2016-10-14 05:41:13 +08:00
|
|
|
if (!adev->ip_blocks[i].status.sw)
|
2015-04-21 04:55:21 +08:00
|
|
|
continue;
|
|
|
|
/* gmc hw init is done early */
|
2016-10-14 05:41:13 +08:00
|
|
|
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC)
|
2015-04-21 04:55:21 +08:00
|
|
|
continue;
|
2016-10-14 05:41:13 +08:00
|
|
|
r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
|
2015-12-08 06:02:53 +08:00
|
|
|
if (r) {
|
2016-10-14 05:41:13 +08:00
|
|
|
DRM_ERROR("hw_init of IP block <%s> failed %d\n",
|
|
|
|
adev->ip_blocks[i].version->funcs->name, r);
|
2015-04-21 04:55:21 +08:00
|
|
|
return r;
|
2015-12-08 06:02:53 +08:00
|
|
|
}
|
2016-10-14 05:41:13 +08:00
|
|
|
adev->ip_blocks[i].status.hw = true;
|
2015-04-21 04:55:21 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-05-15 14:20:00 +08:00
|
|
|
static void amdgpu_fill_reset_magic(struct amdgpu_device *adev)
|
|
|
|
{
|
|
|
|
memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool amdgpu_check_vram_lost(struct amdgpu_device *adev)
|
|
|
|
{
|
|
|
|
return !!memcmp(adev->gart.ptr, adev->reset_magic,
|
|
|
|
AMDGPU_RESET_MAGIC_NUM);
|
|
|
|
}
|
|
|
|
|
2017-05-25 12:35:25 +08:00
|
|
|
static int amdgpu_late_set_cg_state(struct amdgpu_device *adev)
|
2015-04-21 04:55:21 +08:00
|
|
|
{
|
|
|
|
int i = 0, r;
|
|
|
|
|
|
|
|
for (i = 0; i < adev->num_ip_blocks; i++) {
|
2016-10-14 05:41:13 +08:00
|
|
|
if (!adev->ip_blocks[i].status.valid)
|
2015-04-21 04:55:21 +08:00
|
|
|
continue;
|
2016-10-08 02:48:18 +08:00
|
|
|
/* skip CG for VCE/UVD, it's handled specially */
|
2016-10-14 05:41:13 +08:00
|
|
|
if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
|
|
|
|
adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) {
|
2016-10-08 02:48:18 +08:00
|
|
|
/* enable clockgating to save power */
|
2016-10-14 05:41:13 +08:00
|
|
|
r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
|
|
|
|
AMD_CG_STATE_GATE);
|
2016-10-08 02:48:18 +08:00
|
|
|
if (r) {
|
|
|
|
DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
|
2016-10-14 05:41:13 +08:00
|
|
|
adev->ip_blocks[i].version->funcs->name, r);
|
2016-10-08 02:48:18 +08:00
|
|
|
return r;
|
|
|
|
}
|
2016-10-07 21:31:37 +08:00
|
|
|
}
|
2015-04-21 04:55:21 +08:00
|
|
|
}
|
2017-05-25 12:35:25 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int amdgpu_late_init(struct amdgpu_device *adev)
|
|
|
|
{
|
|
|
|
int i = 0, r;
|
|
|
|
|
|
|
|
for (i = 0; i < adev->num_ip_blocks; i++) {
|
|
|
|
if (!adev->ip_blocks[i].status.valid)
|
|
|
|
continue;
|
|
|
|
if (adev->ip_blocks[i].version->funcs->late_init) {
|
|
|
|
r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
|
|
|
|
if (r) {
|
|
|
|
DRM_ERROR("late_init of IP block <%s> failed %d\n",
|
|
|
|
adev->ip_blocks[i].version->funcs->name, r);
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
adev->ip_blocks[i].status.late_initialized = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
mod_delayed_work(system_wq, &adev->late_init_work,
|
|
|
|
msecs_to_jiffies(AMDGPU_RESUME_MS));
|
2015-04-21 04:55:21 +08:00
|
|
|
|
2017-05-15 14:20:00 +08:00
|
|
|
amdgpu_fill_reset_magic(adev);
|
2015-04-21 04:55:21 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int amdgpu_fini(struct amdgpu_device *adev)
|
|
|
|
{
|
|
|
|
int i, r;
|
|
|
|
|
2016-10-13 23:22:17 +08:00
|
|
|
/* need to disable SMC first */
|
|
|
|
for (i = 0; i < adev->num_ip_blocks; i++) {
|
2016-10-14 05:41:13 +08:00
|
|
|
if (!adev->ip_blocks[i].status.hw)
|
2016-10-13 23:22:17 +08:00
|
|
|
continue;
|
2016-10-14 05:41:13 +08:00
|
|
|
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
|
2016-10-13 23:22:17 +08:00
|
|
|
/* ungate blocks before hw fini so that we can shutdown the blocks safely */
|
2016-10-14 05:41:13 +08:00
|
|
|
r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
|
|
|
|
AMD_CG_STATE_UNGATE);
|
2016-10-13 23:22:17 +08:00
|
|
|
if (r) {
|
|
|
|
DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
|
2016-10-14 05:41:13 +08:00
|
|
|
adev->ip_blocks[i].version->funcs->name, r);
|
2016-10-13 23:22:17 +08:00
|
|
|
return r;
|
|
|
|
}
|
2016-10-14 05:41:13 +08:00
|
|
|
r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
|
2016-10-13 23:22:17 +08:00
|
|
|
/* XXX handle errors */
|
|
|
|
if (r) {
|
|
|
|
DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
|
2016-10-14 05:41:13 +08:00
|
|
|
adev->ip_blocks[i].version->funcs->name, r);
|
2016-10-13 23:22:17 +08:00
|
|
|
}
|
2016-10-14 05:41:13 +08:00
|
|
|
adev->ip_blocks[i].status.hw = false;
|
2016-10-13 23:22:17 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-04-21 04:55:21 +08:00
|
|
|
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
|
2016-10-14 05:41:13 +08:00
|
|
|
if (!adev->ip_blocks[i].status.hw)
|
2015-04-21 04:55:21 +08:00
|
|
|
continue;
|
2016-10-14 05:41:13 +08:00
|
|
|
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
|
2015-04-21 04:55:21 +08:00
|
|
|
amdgpu_wb_fini(adev);
|
|
|
|
amdgpu_vram_scratch_fini(adev);
|
|
|
|
}
|
2016-11-24 21:44:44 +08:00
|
|
|
|
|
|
|
if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
|
|
|
|
adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) {
|
|
|
|
/* ungate blocks before hw fini so that we can shutdown the blocks safely */
|
|
|
|
r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
|
|
|
|
AMD_CG_STATE_UNGATE);
|
|
|
|
if (r) {
|
|
|
|
DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
|
|
|
|
adev->ip_blocks[i].version->funcs->name, r);
|
|
|
|
return r;
|
|
|
|
}
|
2015-12-08 06:02:53 +08:00
|
|
|
}
|
2016-11-24 21:44:44 +08:00
|
|
|
|
2016-10-14 05:41:13 +08:00
|
|
|
r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
|
2015-04-21 04:55:21 +08:00
|
|
|
/* XXX handle errors */
|
2015-12-08 06:02:53 +08:00
|
|
|
if (r) {
|
2016-10-14 05:41:13 +08:00
|
|
|
DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
|
|
|
|
adev->ip_blocks[i].version->funcs->name, r);
|
2015-12-08 06:02:53 +08:00
|
|
|
}
|
2016-11-24 21:44:44 +08:00
|
|
|
|
2016-10-14 05:41:13 +08:00
|
|
|
adev->ip_blocks[i].status.hw = false;
|
2015-04-21 04:55:21 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
|
2016-10-14 05:41:13 +08:00
|
|
|
if (!adev->ip_blocks[i].status.sw)
|
2015-04-21 04:55:21 +08:00
|
|
|
continue;
|
2016-10-14 05:41:13 +08:00
|
|
|
r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
|
2015-04-21 04:55:21 +08:00
|
|
|
/* XXX handle errors */
|
2015-12-08 06:02:53 +08:00
|
|
|
if (r) {
|
2016-10-14 05:41:13 +08:00
|
|
|
DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
|
|
|
|
adev->ip_blocks[i].version->funcs->name, r);
|
2015-12-08 06:02:53 +08:00
|
|
|
}
|
2016-10-14 05:41:13 +08:00
|
|
|
adev->ip_blocks[i].status.sw = false;
|
|
|
|
adev->ip_blocks[i].status.valid = false;
|
2015-04-21 04:55:21 +08:00
|
|
|
}
|
|
|
|
|
2016-05-19 14:36:34 +08:00
|
|
|
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
|
2016-10-14 05:41:13 +08:00
|
|
|
if (!adev->ip_blocks[i].status.late_initialized)
|
2016-10-03 05:06:44 +08:00
|
|
|
continue;
|
2016-10-14 05:41:13 +08:00
|
|
|
if (adev->ip_blocks[i].version->funcs->late_fini)
|
|
|
|
adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
|
|
|
|
adev->ip_blocks[i].status.late_initialized = false;
|
2016-05-19 14:36:34 +08:00
|
|
|
}
|
|
|
|
|
2017-09-15 15:34:52 +08:00
|
|
|
if (amdgpu_sriov_vf(adev))
|
2017-01-12 15:14:36 +08:00
|
|
|
amdgpu_virt_release_full_gpu(adev, false);
|
2017-01-09 15:54:32 +08:00
|
|
|
|
2015-04-21 04:55:21 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-05-25 12:35:25 +08:00
|
|
|
static void amdgpu_late_init_func_handler(struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct amdgpu_device *adev =
|
|
|
|
container_of(work, struct amdgpu_device, late_init_work.work);
|
|
|
|
amdgpu_late_set_cg_state(adev);
|
|
|
|
}
|
|
|
|
|
2016-12-06 23:38:29 +08:00
|
|
|
int amdgpu_suspend(struct amdgpu_device *adev)
|
2015-04-21 04:55:21 +08:00
|
|
|
{
|
|
|
|
int i, r;
|
|
|
|
|
2017-01-18 12:47:55 +08:00
|
|
|
if (amdgpu_sriov_vf(adev))
|
|
|
|
amdgpu_virt_request_full_gpu(adev, false);
|
|
|
|
|
2016-02-26 10:45:25 +08:00
|
|
|
/* ungate SMC block first */
|
|
|
|
r = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_SMC,
|
|
|
|
AMD_CG_STATE_UNGATE);
|
|
|
|
if (r) {
|
|
|
|
DRM_ERROR("set_clockgating_state(ungate) SMC failed %d\n",r);
|
|
|
|
}
|
|
|
|
|
2015-04-21 04:55:21 +08:00
|
|
|
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
|
2016-10-14 05:41:13 +08:00
|
|
|
if (!adev->ip_blocks[i].status.valid)
|
2015-04-21 04:55:21 +08:00
|
|
|
continue;
|
|
|
|
/* ungate blocks so that suspend can properly shut them down */
|
2016-02-26 10:45:25 +08:00
|
|
|
if (i != AMD_IP_BLOCK_TYPE_SMC) {
|
2016-10-14 05:41:13 +08:00
|
|
|
r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
|
|
|
|
AMD_CG_STATE_UNGATE);
|
2016-02-26 10:45:25 +08:00
|
|
|
if (r) {
|
2016-10-14 05:41:13 +08:00
|
|
|
DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
|
|
|
|
adev->ip_blocks[i].version->funcs->name, r);
|
2016-02-26 10:45:25 +08:00
|
|
|
}
|
2015-12-08 06:02:53 +08:00
|
|
|
}
|
2015-04-21 04:55:21 +08:00
|
|
|
/* XXX handle errors */
|
2016-10-14 05:41:13 +08:00
|
|
|
r = adev->ip_blocks[i].version->funcs->suspend(adev);
|
2015-04-21 04:55:21 +08:00
|
|
|
/* XXX handle errors */
|
2015-12-08 06:02:53 +08:00
|
|
|
if (r) {
|
2016-10-14 05:41:13 +08:00
|
|
|
DRM_ERROR("suspend of IP block <%s> failed %d\n",
|
|
|
|
adev->ip_blocks[i].version->funcs->name, r);
|
2015-12-08 06:02:53 +08:00
|
|
|
}
|
2015-04-21 04:55:21 +08:00
|
|
|
}
|
|
|
|
|
2017-01-18 12:47:55 +08:00
|
|
|
if (amdgpu_sriov_vf(adev))
|
|
|
|
amdgpu_virt_release_full_gpu(adev, false);
|
|
|
|
|
2015-04-21 04:55:21 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-02-09 11:55:49 +08:00
|
|
|
static int amdgpu_sriov_reinit_early(struct amdgpu_device *adev)
|
2017-01-23 14:22:08 +08:00
|
|
|
{
|
|
|
|
int i, r;
|
|
|
|
|
2017-04-26 12:00:49 +08:00
|
|
|
static enum amd_ip_block_type ip_order[] = {
|
|
|
|
AMD_IP_BLOCK_TYPE_GMC,
|
|
|
|
AMD_IP_BLOCK_TYPE_COMMON,
|
|
|
|
AMD_IP_BLOCK_TYPE_IH,
|
|
|
|
};
|
2017-01-23 14:22:08 +08:00
|
|
|
|
2017-04-26 12:00:49 +08:00
|
|
|
for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
|
|
|
|
int j;
|
|
|
|
struct amdgpu_ip_block *block;
|
2017-01-23 14:22:08 +08:00
|
|
|
|
2017-04-26 12:00:49 +08:00
|
|
|
for (j = 0; j < adev->num_ip_blocks; j++) {
|
|
|
|
block = &adev->ip_blocks[j];
|
|
|
|
|
|
|
|
if (block->version->type != ip_order[i] ||
|
|
|
|
!block->status.valid)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
r = block->version->funcs->hw_init(adev);
|
|
|
|
DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"successed");
|
2017-01-23 14:22:08 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-02-09 11:55:49 +08:00
|
|
|
static int amdgpu_sriov_reinit_late(struct amdgpu_device *adev)
|
2017-01-23 14:22:08 +08:00
|
|
|
{
|
|
|
|
int i, r;
|
|
|
|
|
2017-04-26 12:00:49 +08:00
|
|
|
static enum amd_ip_block_type ip_order[] = {
|
|
|
|
AMD_IP_BLOCK_TYPE_SMC,
|
|
|
|
AMD_IP_BLOCK_TYPE_DCE,
|
|
|
|
AMD_IP_BLOCK_TYPE_GFX,
|
|
|
|
AMD_IP_BLOCK_TYPE_SDMA,
|
2017-06-15 20:07:36 +08:00
|
|
|
AMD_IP_BLOCK_TYPE_UVD,
|
|
|
|
AMD_IP_BLOCK_TYPE_VCE
|
2017-04-26 12:00:49 +08:00
|
|
|
};
|
2017-01-23 14:22:08 +08:00
|
|
|
|
2017-04-26 12:00:49 +08:00
|
|
|
for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
|
|
|
|
int j;
|
|
|
|
struct amdgpu_ip_block *block;
|
2017-01-23 14:22:08 +08:00
|
|
|
|
2017-04-26 12:00:49 +08:00
|
|
|
for (j = 0; j < adev->num_ip_blocks; j++) {
|
|
|
|
block = &adev->ip_blocks[j];
|
|
|
|
|
|
|
|
if (block->version->type != ip_order[i] ||
|
|
|
|
!block->status.valid)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
r = block->version->funcs->hw_init(adev);
|
|
|
|
DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"successed");
|
2017-01-23 14:22:08 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-05-05 10:33:33 +08:00
|
|
|
static int amdgpu_resume_phase1(struct amdgpu_device *adev)
|
2015-04-21 04:55:21 +08:00
|
|
|
{
|
|
|
|
int i, r;
|
|
|
|
|
2017-01-23 14:22:08 +08:00
|
|
|
for (i = 0; i < adev->num_ip_blocks; i++) {
|
|
|
|
if (!adev->ip_blocks[i].status.valid)
|
|
|
|
continue;
|
|
|
|
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
|
|
|
|
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
|
2017-05-05 10:33:33 +08:00
|
|
|
adev->ip_blocks[i].version->type ==
|
|
|
|
AMD_IP_BLOCK_TYPE_IH) {
|
|
|
|
r = adev->ip_blocks[i].version->funcs->resume(adev);
|
|
|
|
if (r) {
|
|
|
|
DRM_ERROR("resume of IP block <%s> failed %d\n",
|
|
|
|
adev->ip_blocks[i].version->funcs->name, r);
|
|
|
|
return r;
|
|
|
|
}
|
2017-01-23 14:22:08 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-05-05 10:33:33 +08:00
|
|
|
static int amdgpu_resume_phase2(struct amdgpu_device *adev)
|
2015-04-21 04:55:21 +08:00
|
|
|
{
|
|
|
|
int i, r;
|
|
|
|
|
|
|
|
for (i = 0; i < adev->num_ip_blocks; i++) {
|
2016-10-14 05:41:13 +08:00
|
|
|
if (!adev->ip_blocks[i].status.valid)
|
2015-04-21 04:55:21 +08:00
|
|
|
continue;
|
2017-05-05 10:33:33 +08:00
|
|
|
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
|
|
|
|
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
|
|
|
|
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH )
|
|
|
|
continue;
|
2016-10-14 05:41:13 +08:00
|
|
|
r = adev->ip_blocks[i].version->funcs->resume(adev);
|
2015-12-08 06:02:53 +08:00
|
|
|
if (r) {
|
2016-10-14 05:41:13 +08:00
|
|
|
DRM_ERROR("resume of IP block <%s> failed %d\n",
|
|
|
|
adev->ip_blocks[i].version->funcs->name, r);
|
2015-04-21 04:55:21 +08:00
|
|
|
return r;
|
2015-12-08 06:02:53 +08:00
|
|
|
}
|
2015-04-21 04:55:21 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-05-05 10:33:33 +08:00
|
|
|
static int amdgpu_resume(struct amdgpu_device *adev)
|
|
|
|
{
|
|
|
|
int r;
|
|
|
|
|
|
|
|
r = amdgpu_resume_phase1(adev);
|
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
r = amdgpu_resume_phase2(adev);
|
|
|
|
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2016-03-31 13:26:59 +08:00
|
|
|
static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
|
2016-06-11 14:51:32 +08:00
|
|
|
{
|
2016-09-24 04:23:41 +08:00
|
|
|
if (adev->is_atom_fw) {
|
|
|
|
if (amdgpu_atomfirmware_gpu_supports_virtualization(adev))
|
|
|
|
adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
|
|
|
|
} else {
|
|
|
|
if (amdgpu_atombios_has_gpu_virtualization_table(adev))
|
|
|
|
adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
|
|
|
|
}
|
2016-06-11 14:51:32 +08:00
|
|
|
}
|
|
|
|
|
2015-04-21 04:55:21 +08:00
|
|
|
/**
|
|
|
|
* amdgpu_device_init - initialize the driver
|
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
* @pdev: drm dev pointer
|
|
|
|
* @pdev: pci dev pointer
|
|
|
|
* @flags: driver flags
|
|
|
|
*
|
|
|
|
* Initializes the driver info and hw (all asics).
|
|
|
|
* Returns 0 for success or an error on failure.
|
|
|
|
* Called at driver startup.
|
|
|
|
*/
|
|
|
|
int amdgpu_device_init(struct amdgpu_device *adev,
|
|
|
|
struct drm_device *ddev,
|
|
|
|
struct pci_dev *pdev,
|
|
|
|
uint32_t flags)
|
|
|
|
{
|
|
|
|
int r, i;
|
|
|
|
bool runtime = false;
|
2016-08-18 05:49:27 +08:00
|
|
|
u32 max_MBps;
|
2015-04-21 04:55:21 +08:00
|
|
|
|
|
|
|
adev->shutdown = false;
|
|
|
|
adev->dev = &pdev->dev;
|
|
|
|
adev->ddev = ddev;
|
|
|
|
adev->pdev = pdev;
|
|
|
|
adev->flags = flags;
|
2015-07-22 11:29:01 +08:00
|
|
|
adev->asic_type = flags & AMD_ASIC_MASK;
|
2015-04-21 04:55:21 +08:00
|
|
|
adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
|
2017-07-07 17:56:59 +08:00
|
|
|
adev->mc.gart_size = 512 * 1024 * 1024;
|
2015-04-21 04:55:21 +08:00
|
|
|
adev->accel_working = false;
|
|
|
|
adev->num_rings = 0;
|
|
|
|
adev->mman.buffer_funcs = NULL;
|
|
|
|
adev->mman.buffer_funcs_ring = NULL;
|
|
|
|
adev->vm_manager.vm_pte_funcs = NULL;
|
2016-02-09 00:37:38 +08:00
|
|
|
adev->vm_manager.vm_pte_num_rings = 0;
|
2015-04-21 04:55:21 +08:00
|
|
|
adev->gart.gart_funcs = NULL;
|
2016-10-25 20:00:45 +08:00
|
|
|
adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
|
2015-04-21 04:55:21 +08:00
|
|
|
|
|
|
|
adev->smc_rreg = &amdgpu_invalid_rreg;
|
|
|
|
adev->smc_wreg = &amdgpu_invalid_wreg;
|
|
|
|
adev->pcie_rreg = &amdgpu_invalid_rreg;
|
|
|
|
adev->pcie_wreg = &amdgpu_invalid_wreg;
|
2016-08-31 13:23:25 +08:00
|
|
|
adev->pciep_rreg = &amdgpu_invalid_rreg;
|
|
|
|
adev->pciep_wreg = &amdgpu_invalid_wreg;
|
2015-04-21 04:55:21 +08:00
|
|
|
adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
|
|
|
|
adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
|
|
|
|
adev->didt_rreg = &amdgpu_invalid_rreg;
|
|
|
|
adev->didt_wreg = &amdgpu_invalid_wreg;
|
2016-06-08 12:47:41 +08:00
|
|
|
adev->gc_cac_rreg = &amdgpu_invalid_rreg;
|
|
|
|
adev->gc_cac_wreg = &amdgpu_invalid_wreg;
|
2015-04-21 04:55:21 +08:00
|
|
|
adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
|
|
|
|
adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
|
|
|
|
|
2016-06-08 12:47:41 +08:00
|
|
|
|
2015-06-06 03:04:33 +08:00
|
|
|
DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
|
|
|
|
amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
|
|
|
|
pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
|
2015-04-21 04:55:21 +08:00
|
|
|
|
|
|
|
/* mutex initialization are all done here so we
|
|
|
|
* can recall function without having locking issues */
|
|
|
|
atomic_set(&adev->irq.ih.lock, 0);
|
2017-03-04 07:37:23 +08:00
|
|
|
mutex_init(&adev->firmware.mutex);
|
2015-04-21 04:55:21 +08:00
|
|
|
mutex_init(&adev->pm.mutex);
|
|
|
|
mutex_init(&adev->gfx.gpu_clock_mutex);
|
|
|
|
mutex_init(&adev->srbm_mutex);
|
|
|
|
mutex_init(&adev->grbm_idx_mutex);
|
|
|
|
mutex_init(&adev->mn_lock);
|
2017-09-28 21:47:32 +08:00
|
|
|
mutex_init(&adev->virt.vf_errors.lock);
|
2015-04-21 04:55:21 +08:00
|
|
|
hash_init(adev->mn_hash);
|
|
|
|
|
|
|
|
amdgpu_check_arguments(adev);
|
|
|
|
|
|
|
|
spin_lock_init(&adev->mmio_idx_lock);
|
|
|
|
spin_lock_init(&adev->smc_idx_lock);
|
|
|
|
spin_lock_init(&adev->pcie_idx_lock);
|
|
|
|
spin_lock_init(&adev->uvd_ctx_idx_lock);
|
|
|
|
spin_lock_init(&adev->didt_idx_lock);
|
2016-06-08 12:47:41 +08:00
|
|
|
spin_lock_init(&adev->gc_cac_idx_lock);
|
2017-07-04 09:21:50 +08:00
|
|
|
spin_lock_init(&adev->se_cac_idx_lock);
|
2015-04-21 04:55:21 +08:00
|
|
|
spin_lock_init(&adev->audio_endpt_idx_lock);
|
2016-08-18 05:49:27 +08:00
|
|
|
spin_lock_init(&adev->mm_stats.lock);
|
2015-04-21 04:55:21 +08:00
|
|
|
|
2016-08-17 11:41:30 +08:00
|
|
|
INIT_LIST_HEAD(&adev->shadow_list);
|
|
|
|
mutex_init(&adev->shadow_list_lock);
|
|
|
|
|
2016-08-30 16:13:10 +08:00
|
|
|
INIT_LIST_HEAD(&adev->gtt_list);
|
|
|
|
spin_lock_init(&adev->gtt_list_lock);
|
|
|
|
|
2017-03-07 05:27:55 +08:00
|
|
|
INIT_LIST_HEAD(&adev->ring_lru_list);
|
|
|
|
spin_lock_init(&adev->ring_lru_list_lock);
|
|
|
|
|
2017-05-25 12:35:25 +08:00
|
|
|
INIT_DELAYED_WORK(&adev->late_init_work, amdgpu_late_init_func_handler);
|
|
|
|
|
2017-06-09 02:58:05 +08:00
|
|
|
/* Registers mapping */
|
|
|
|
/* TODO: block userspace mapping of io register */
|
2016-01-21 19:08:55 +08:00
|
|
|
if (adev->asic_type >= CHIP_BONAIRE) {
|
|
|
|
adev->rmmio_base = pci_resource_start(adev->pdev, 5);
|
|
|
|
adev->rmmio_size = pci_resource_len(adev->pdev, 5);
|
|
|
|
} else {
|
|
|
|
adev->rmmio_base = pci_resource_start(adev->pdev, 2);
|
|
|
|
adev->rmmio_size = pci_resource_len(adev->pdev, 2);
|
|
|
|
}
|
2015-04-21 04:55:21 +08:00
|
|
|
|
|
|
|
adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
|
|
|
|
if (adev->rmmio == NULL) {
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
|
|
|
|
DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
|
|
|
|
|
2017-06-08 17:15:16 +08:00
|
|
|
/* doorbell bar mapping */
|
|
|
|
amdgpu_doorbell_init(adev);
|
2015-04-21 04:55:21 +08:00
|
|
|
|
|
|
|
/* io port mapping */
|
|
|
|
for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
|
|
|
|
if (pci_resource_flags(adev->pdev, i) & IORESOURCE_IO) {
|
|
|
|
adev->rio_mem_size = pci_resource_len(adev->pdev, i);
|
|
|
|
adev->rio_mem = pci_iomap(adev->pdev, i, adev->rio_mem_size);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (adev->rio_mem == NULL)
|
2017-01-04 21:06:58 +08:00
|
|
|
DRM_INFO("PCI I/O BAR is not found.\n");
|
2015-04-21 04:55:21 +08:00
|
|
|
|
|
|
|
/* early init functions */
|
|
|
|
r = amdgpu_early_init(adev);
|
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
|
|
|
|
/* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
|
|
|
|
/* this will fail for cards that aren't VGA class devices, just
|
|
|
|
* ignore it */
|
|
|
|
vga_client_register(adev->pdev, adev, NULL, amdgpu_vga_set_decode);
|
|
|
|
|
|
|
|
if (amdgpu_runtime_pm == 1)
|
|
|
|
runtime = true;
|
2016-04-26 01:12:18 +08:00
|
|
|
if (amdgpu_device_is_px(ddev))
|
2015-04-21 04:55:21 +08:00
|
|
|
runtime = true;
|
2017-03-11 04:23:45 +08:00
|
|
|
if (!pci_is_thunderbolt_attached(adev->pdev))
|
|
|
|
vga_switcheroo_register_client(adev->pdev,
|
|
|
|
&amdgpu_switcheroo_ops, runtime);
|
2015-04-21 04:55:21 +08:00
|
|
|
if (runtime)
|
|
|
|
vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
|
|
|
|
|
|
|
|
/* Read BIOS */
|
2016-06-04 06:21:41 +08:00
|
|
|
if (!amdgpu_get_bios(adev)) {
|
|
|
|
r = -EINVAL;
|
|
|
|
goto failed;
|
|
|
|
}
|
2016-12-15 04:52:45 +08:00
|
|
|
|
2015-04-21 04:55:21 +08:00
|
|
|
r = amdgpu_atombios_init(adev);
|
2015-12-08 06:02:53 +08:00
|
|
|
if (r) {
|
|
|
|
dev_err(adev->dev, "amdgpu_atombios_init failed\n");
|
2017-09-28 21:47:32 +08:00
|
|
|
amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
|
2016-06-04 06:21:41 +08:00
|
|
|
goto failed;
|
2015-12-08 06:02:53 +08:00
|
|
|
}
|
2015-04-21 04:55:21 +08:00
|
|
|
|
2016-03-31 13:26:59 +08:00
|
|
|
/* detect if we are with an SRIOV vbios */
|
|
|
|
amdgpu_device_detect_sriov_bios(adev);
|
2016-06-11 14:51:32 +08:00
|
|
|
|
2015-04-21 04:55:21 +08:00
|
|
|
/* Post card if necessary */
|
2016-09-14 19:38:08 +08:00
|
|
|
if (amdgpu_vpost_needed(adev)) {
|
2015-04-21 04:55:21 +08:00
|
|
|
if (!adev->bios) {
|
2016-09-14 19:38:08 +08:00
|
|
|
dev_err(adev->dev, "no vBIOS found\n");
|
2017-09-28 21:47:32 +08:00
|
|
|
amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
|
2016-06-04 06:21:41 +08:00
|
|
|
r = -EINVAL;
|
|
|
|
goto failed;
|
2015-04-21 04:55:21 +08:00
|
|
|
}
|
2016-09-14 19:38:08 +08:00
|
|
|
DRM_INFO("GPU posting now...\n");
|
2016-03-31 13:26:59 +08:00
|
|
|
r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
|
|
|
|
if (r) {
|
|
|
|
dev_err(adev->dev, "gpu post error!\n");
|
2017-09-28 21:47:32 +08:00
|
|
|
amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_GPU_POST_ERROR, 0, 0);
|
2016-03-31 13:26:59 +08:00
|
|
|
goto failed;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
DRM_INFO("GPU post is not needed\n");
|
2015-04-21 04:55:21 +08:00
|
|
|
}
|
|
|
|
|
2017-07-10 22:43:10 +08:00
|
|
|
if (adev->is_atom_fw) {
|
|
|
|
/* Initialize clocks */
|
|
|
|
r = amdgpu_atomfirmware_get_clock_info(adev);
|
|
|
|
if (r) {
|
|
|
|
dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
|
2017-09-28 21:47:32 +08:00
|
|
|
amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
|
2017-07-10 22:43:10 +08:00
|
|
|
goto failed;
|
|
|
|
}
|
|
|
|
} else {
|
2016-09-24 04:23:41 +08:00
|
|
|
/* Initialize clocks */
|
|
|
|
r = amdgpu_atombios_get_clock_info(adev);
|
|
|
|
if (r) {
|
|
|
|
dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
|
2017-09-28 21:47:32 +08:00
|
|
|
amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
|
2017-06-24 01:55:15 +08:00
|
|
|
goto failed;
|
2016-09-24 04:23:41 +08:00
|
|
|
}
|
|
|
|
/* init i2c buses */
|
|
|
|
amdgpu_atombios_i2c_init(adev);
|
2015-12-08 06:02:53 +08:00
|
|
|
}
|
2015-04-21 04:55:21 +08:00
|
|
|
|
|
|
|
/* Fence driver */
|
|
|
|
r = amdgpu_fence_driver_init(adev);
|
2015-12-08 06:02:53 +08:00
|
|
|
if (r) {
|
|
|
|
dev_err(adev->dev, "amdgpu_fence_driver_init failed\n");
|
2017-09-28 21:47:32 +08:00
|
|
|
amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
|
2016-06-04 06:21:41 +08:00
|
|
|
goto failed;
|
2015-12-08 06:02:53 +08:00
|
|
|
}
|
2015-04-21 04:55:21 +08:00
|
|
|
|
|
|
|
/* init the mode config */
|
|
|
|
drm_mode_config_init(adev->ddev);
|
|
|
|
|
|
|
|
r = amdgpu_init(adev);
|
|
|
|
if (r) {
|
2015-12-08 06:02:53 +08:00
|
|
|
dev_err(adev->dev, "amdgpu_init failed\n");
|
2017-09-28 21:47:32 +08:00
|
|
|
amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
|
2015-04-21 04:55:21 +08:00
|
|
|
amdgpu_fini(adev);
|
2016-06-04 06:21:41 +08:00
|
|
|
goto failed;
|
2015-04-21 04:55:21 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
adev->accel_working = true;
|
|
|
|
|
2017-06-01 21:42:59 +08:00
|
|
|
amdgpu_vm_check_compute_bug(adev);
|
|
|
|
|
2016-08-18 05:49:27 +08:00
|
|
|
/* Initialize the buffer migration limit. */
|
|
|
|
if (amdgpu_moverate >= 0)
|
|
|
|
max_MBps = amdgpu_moverate;
|
|
|
|
else
|
|
|
|
max_MBps = 8; /* Allow 8 MB/s. */
|
|
|
|
/* Get a log2 for easy divisions. */
|
|
|
|
adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
|
|
|
|
|
2015-04-21 04:55:21 +08:00
|
|
|
r = amdgpu_ib_pool_init(adev);
|
|
|
|
if (r) {
|
|
|
|
dev_err(adev->dev, "IB initialization failed (%d).\n", r);
|
2017-09-28 21:47:32 +08:00
|
|
|
amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
|
2016-06-04 06:21:41 +08:00
|
|
|
goto failed;
|
2015-04-21 04:55:21 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
r = amdgpu_ib_ring_tests(adev);
|
|
|
|
if (r)
|
|
|
|
DRM_ERROR("ib ring test failed (%d).\n", r);
|
|
|
|
|
2017-02-08 17:38:13 +08:00
|
|
|
amdgpu_fbdev_init(adev);
|
|
|
|
|
2015-04-21 04:55:21 +08:00
|
|
|
r = amdgpu_gem_debugfs_init(adev);
|
2017-02-09 13:42:27 +08:00
|
|
|
if (r)
|
2015-04-21 04:55:21 +08:00
|
|
|
DRM_ERROR("registering gem debugfs failed (%d).\n", r);
|
|
|
|
|
|
|
|
r = amdgpu_debugfs_regs_init(adev);
|
2017-02-09 13:42:27 +08:00
|
|
|
if (r)
|
2015-04-21 04:55:21 +08:00
|
|
|
DRM_ERROR("registering register debugfs failed (%d).\n", r);
|
|
|
|
|
2017-05-10 23:04:06 +08:00
|
|
|
r = amdgpu_debugfs_test_ib_ring_init(adev);
|
|
|
|
if (r)
|
|
|
|
DRM_ERROR("registering register test ib ring debugfs failed (%d).\n", r);
|
|
|
|
|
drm/amdgpu: introduce a firmware debugfs to dump all current firmware versions
This patch implements the debugfs to dump all currect firmware
version:
root@jenkins-All-Series:/home/jenkins# cat /sys/kernel/debug/dri/0/amdgpu_firmware_info
VCE feature version: 0, firmware version: 0x34040300
UVD feature version: 0, firmware version: 0x01451000
MC feature version: 0, firmware version: 0x00000000
ME feature version: 37, firmware version: 0x00000093
PFP feature version: 37, firmware version: 0x000000da
CE feature version: 37, firmware version: 0x00000080
RLC feature version: 1, firmware version: 0x0000010e
MEC feature version: 37, firmware version: 0x0000029e
MEC2 feature version: 37, firmware version: 0x0000029e
SMC feature version: 0, firmware version: 0x013353e6
SDMA0 feature version: 31, firmware version: 0x00000036
SDMA1 feature version: 0, firmware version: 0x00000036
Suggested-by: Alex Deucher <Alexander.Deucher@amd.com>
Signed-off-by: Huang Rui <ray.huang@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
2016-06-12 15:51:09 +08:00
|
|
|
r = amdgpu_debugfs_firmware_init(adev);
|
2017-02-09 13:42:27 +08:00
|
|
|
if (r)
|
drm/amdgpu: introduce a firmware debugfs to dump all current firmware versions
This patch implements the debugfs to dump all currect firmware
version:
root@jenkins-All-Series:/home/jenkins# cat /sys/kernel/debug/dri/0/amdgpu_firmware_info
VCE feature version: 0, firmware version: 0x34040300
UVD feature version: 0, firmware version: 0x01451000
MC feature version: 0, firmware version: 0x00000000
ME feature version: 37, firmware version: 0x00000093
PFP feature version: 37, firmware version: 0x000000da
CE feature version: 37, firmware version: 0x00000080
RLC feature version: 1, firmware version: 0x0000010e
MEC feature version: 37, firmware version: 0x0000029e
MEC2 feature version: 37, firmware version: 0x0000029e
SMC feature version: 0, firmware version: 0x013353e6
SDMA0 feature version: 31, firmware version: 0x00000036
SDMA1 feature version: 0, firmware version: 0x00000036
Suggested-by: Alex Deucher <Alexander.Deucher@amd.com>
Signed-off-by: Huang Rui <ray.huang@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
2016-06-12 15:51:09 +08:00
|
|
|
DRM_ERROR("registering firmware debugfs failed (%d).\n", r);
|
|
|
|
|
2017-08-23 00:31:43 +08:00
|
|
|
r = amdgpu_debugfs_vbios_dump_init(adev);
|
|
|
|
if (r)
|
|
|
|
DRM_ERROR("Creating vbios dump debugfs failed (%d).\n", r);
|
|
|
|
|
2015-04-21 04:55:21 +08:00
|
|
|
if ((amdgpu_testing & 1)) {
|
|
|
|
if (adev->accel_working)
|
|
|
|
amdgpu_test_moves(adev);
|
|
|
|
else
|
|
|
|
DRM_INFO("amdgpu: acceleration disabled, skipping move tests\n");
|
|
|
|
}
|
|
|
|
if (amdgpu_benchmarking) {
|
|
|
|
if (adev->accel_working)
|
|
|
|
amdgpu_benchmark(adev, amdgpu_benchmarking);
|
|
|
|
else
|
|
|
|
DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
/* enable clockgating, etc. after ib tests, etc. since some blocks require
|
|
|
|
* explicit gating rather than handling it automatically.
|
|
|
|
*/
|
|
|
|
r = amdgpu_late_init(adev);
|
2015-12-08 06:02:53 +08:00
|
|
|
if (r) {
|
|
|
|
dev_err(adev->dev, "amdgpu_late_init failed\n");
|
2017-09-28 21:47:32 +08:00
|
|
|
amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
|
2016-06-04 06:21:41 +08:00
|
|
|
goto failed;
|
2015-12-08 06:02:53 +08:00
|
|
|
}
|
2015-04-21 04:55:21 +08:00
|
|
|
|
|
|
|
return 0;
|
2016-06-04 06:21:41 +08:00
|
|
|
|
|
|
|
failed:
|
2017-06-24 01:55:15 +08:00
|
|
|
amdgpu_vf_error_trans_all(adev);
|
2016-06-04 06:21:41 +08:00
|
|
|
if (runtime)
|
|
|
|
vga_switcheroo_fini_domain_pm_ops(adev->dev);
|
|
|
|
return r;
|
2015-04-21 04:55:21 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* amdgpu_device_fini - tear down the driver
|
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
*
|
|
|
|
* Tear down the driver info (all asics).
|
|
|
|
* Called at driver shutdown.
|
|
|
|
*/
|
|
|
|
void amdgpu_device_fini(struct amdgpu_device *adev)
|
|
|
|
{
|
|
|
|
int r;
|
|
|
|
|
|
|
|
DRM_INFO("amdgpu: finishing device.\n");
|
|
|
|
adev->shutdown = true;
|
2017-04-25 16:47:42 +08:00
|
|
|
if (adev->mode_info.mode_config_initialized)
|
|
|
|
drm_crtc_force_disable_all(adev->ddev);
|
2015-04-21 04:55:21 +08:00
|
|
|
/* evict vram memory */
|
|
|
|
amdgpu_bo_evict_vram(adev);
|
|
|
|
amdgpu_ib_pool_fini(adev);
|
|
|
|
amdgpu_fence_driver_fini(adev);
|
|
|
|
amdgpu_fbdev_fini(adev);
|
|
|
|
r = amdgpu_fini(adev);
|
2017-06-05 22:11:59 +08:00
|
|
|
if (adev->firmware.gpu_info_fw) {
|
|
|
|
release_firmware(adev->firmware.gpu_info_fw);
|
|
|
|
adev->firmware.gpu_info_fw = NULL;
|
|
|
|
}
|
2015-04-21 04:55:21 +08:00
|
|
|
adev->accel_working = false;
|
2017-05-25 12:35:25 +08:00
|
|
|
cancel_delayed_work_sync(&adev->late_init_work);
|
2015-04-21 04:55:21 +08:00
|
|
|
/* free i2c buses */
|
|
|
|
amdgpu_i2c_fini(adev);
|
|
|
|
amdgpu_atombios_fini(adev);
|
|
|
|
kfree(adev->bios);
|
|
|
|
adev->bios = NULL;
|
2017-03-11 04:23:45 +08:00
|
|
|
if (!pci_is_thunderbolt_attached(adev->pdev))
|
|
|
|
vga_switcheroo_unregister_client(adev->pdev);
|
2016-06-04 06:21:41 +08:00
|
|
|
if (adev->flags & AMD_IS_PX)
|
|
|
|
vga_switcheroo_fini_domain_pm_ops(adev->dev);
|
2015-04-21 04:55:21 +08:00
|
|
|
vga_client_register(adev->pdev, NULL, NULL, NULL);
|
|
|
|
if (adev->rio_mem)
|
|
|
|
pci_iounmap(adev->pdev, adev->rio_mem);
|
|
|
|
adev->rio_mem = NULL;
|
|
|
|
iounmap(adev->rmmio);
|
|
|
|
adev->rmmio = NULL;
|
2017-06-08 17:15:16 +08:00
|
|
|
amdgpu_doorbell_fini(adev);
|
2015-04-21 04:55:21 +08:00
|
|
|
amdgpu_debugfs_regs_cleanup(adev);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Suspend & resume.
|
|
|
|
*/
|
|
|
|
/**
|
2016-08-24 01:25:49 +08:00
|
|
|
* amdgpu_device_suspend - initiate device suspend
|
2015-04-21 04:55:21 +08:00
|
|
|
*
|
|
|
|
* @pdev: drm dev pointer
|
|
|
|
* @state: suspend state
|
|
|
|
*
|
|
|
|
* Puts the hw in the suspend state (all asics).
|
|
|
|
* Returns 0 for success or an error on failure.
|
|
|
|
* Called at driver suspend.
|
|
|
|
*/
|
2016-08-24 01:25:49 +08:00
|
|
|
int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
|
2015-04-21 04:55:21 +08:00
|
|
|
{
|
|
|
|
struct amdgpu_device *adev;
|
|
|
|
struct drm_crtc *crtc;
|
|
|
|
struct drm_connector *connector;
|
2015-08-06 00:41:48 +08:00
|
|
|
int r;
|
2015-04-21 04:55:21 +08:00
|
|
|
|
|
|
|
if (dev == NULL || dev->dev_private == NULL) {
|
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
|
|
|
|
adev = dev->dev_private;
|
|
|
|
|
|
|
|
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
drm_kms_helper_poll_disable(dev);
|
|
|
|
|
|
|
|
/* turn off display hw */
|
2015-09-24 02:32:06 +08:00
|
|
|
drm_modeset_lock_all(dev);
|
2015-04-21 04:55:21 +08:00
|
|
|
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
|
|
|
|
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
|
|
|
|
}
|
2015-09-24 02:32:06 +08:00
|
|
|
drm_modeset_unlock_all(dev);
|
2015-04-21 04:55:21 +08:00
|
|
|
|
2015-11-10 06:21:45 +08:00
|
|
|
amdgpu_amdkfd_suspend(adev);
|
|
|
|
|
2015-10-08 12:03:36 +08:00
|
|
|
/* unpin the front buffers and cursors */
|
2015-04-21 04:55:21 +08:00
|
|
|
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
|
2015-10-08 12:03:36 +08:00
|
|
|
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
|
2015-04-21 04:55:21 +08:00
|
|
|
struct amdgpu_framebuffer *rfb = to_amdgpu_framebuffer(crtc->primary->fb);
|
|
|
|
struct amdgpu_bo *robj;
|
|
|
|
|
2015-10-08 12:03:36 +08:00
|
|
|
if (amdgpu_crtc->cursor_bo) {
|
|
|
|
struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
|
2017-04-25 01:52:41 +08:00
|
|
|
r = amdgpu_bo_reserve(aobj, true);
|
2015-10-08 12:03:36 +08:00
|
|
|
if (r == 0) {
|
|
|
|
amdgpu_bo_unpin(aobj);
|
|
|
|
amdgpu_bo_unreserve(aobj);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-04-21 04:55:21 +08:00
|
|
|
if (rfb == NULL || rfb->obj == NULL) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
robj = gem_to_amdgpu_bo(rfb->obj);
|
|
|
|
/* don't unpin kernel fb objects */
|
|
|
|
if (!amdgpu_fbdev_robj_is_fb(adev, robj)) {
|
2017-04-25 01:52:41 +08:00
|
|
|
r = amdgpu_bo_reserve(robj, true);
|
2015-04-21 04:55:21 +08:00
|
|
|
if (r == 0) {
|
|
|
|
amdgpu_bo_unpin(robj);
|
|
|
|
amdgpu_bo_unreserve(robj);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
/* evict vram memory */
|
|
|
|
amdgpu_bo_evict_vram(adev);
|
|
|
|
|
2015-08-06 00:41:48 +08:00
|
|
|
amdgpu_fence_driver_suspend(adev);
|
2015-04-21 04:55:21 +08:00
|
|
|
|
|
|
|
r = amdgpu_suspend(adev);
|
|
|
|
|
2016-10-11 00:41:36 +08:00
|
|
|
/* evict remaining vram memory
|
|
|
|
* This second call to evict vram is to evict the gart page table
|
|
|
|
* using the CPU.
|
|
|
|
*/
|
2015-04-21 04:55:21 +08:00
|
|
|
amdgpu_bo_evict_vram(adev);
|
|
|
|
|
2017-07-01 05:08:45 +08:00
|
|
|
amdgpu_atombios_scratch_regs_save(adev);
|
2015-04-21 04:55:21 +08:00
|
|
|
pci_save_state(dev->pdev);
|
|
|
|
if (suspend) {
|
|
|
|
/* Shut down the device */
|
|
|
|
pci_disable_device(dev->pdev);
|
|
|
|
pci_set_power_state(dev->pdev, PCI_D3hot);
|
2016-09-07 17:09:12 +08:00
|
|
|
} else {
|
|
|
|
r = amdgpu_asic_reset(adev);
|
|
|
|
if (r)
|
|
|
|
DRM_ERROR("amdgpu asic reset failed\n");
|
2015-04-21 04:55:21 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (fbcon) {
|
|
|
|
console_lock();
|
|
|
|
amdgpu_fbdev_set_suspend(adev, 1);
|
|
|
|
console_unlock();
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2016-08-24 01:25:49 +08:00
|
|
|
* amdgpu_device_resume - initiate device resume
|
2015-04-21 04:55:21 +08:00
|
|
|
*
|
|
|
|
* @pdev: drm dev pointer
|
|
|
|
*
|
|
|
|
* Bring the hw back to operating state (all asics).
|
|
|
|
* Returns 0 for success or an error on failure.
|
|
|
|
* Called at driver resume.
|
|
|
|
*/
|
2016-08-24 01:25:49 +08:00
|
|
|
int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
|
2015-04-21 04:55:21 +08:00
|
|
|
{
|
|
|
|
struct drm_connector *connector;
|
|
|
|
struct amdgpu_device *adev = dev->dev_private;
|
2015-10-08 12:03:36 +08:00
|
|
|
struct drm_crtc *crtc;
|
2017-04-13 16:12:26 +08:00
|
|
|
int r = 0;
|
2015-04-21 04:55:21 +08:00
|
|
|
|
|
|
|
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
|
|
|
|
return 0;
|
|
|
|
|
2016-09-07 17:09:12 +08:00
|
|
|
if (fbcon)
|
2015-04-21 04:55:21 +08:00
|
|
|
console_lock();
|
2016-09-07 17:09:12 +08:00
|
|
|
|
2015-04-21 04:55:21 +08:00
|
|
|
if (resume) {
|
|
|
|
pci_set_power_state(dev->pdev, PCI_D0);
|
|
|
|
pci_restore_state(dev->pdev);
|
2016-09-07 17:09:12 +08:00
|
|
|
r = pci_enable_device(dev->pdev);
|
2017-04-13 16:12:26 +08:00
|
|
|
if (r)
|
|
|
|
goto unlock;
|
2015-04-21 04:55:21 +08:00
|
|
|
}
|
2017-07-01 05:08:45 +08:00
|
|
|
amdgpu_atombios_scratch_regs_restore(adev);
|
2015-04-21 04:55:21 +08:00
|
|
|
|
|
|
|
/* post card */
|
2017-02-10 15:59:59 +08:00
|
|
|
if (amdgpu_need_post(adev)) {
|
2016-09-07 17:09:12 +08:00
|
|
|
r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
|
|
|
|
if (r)
|
|
|
|
DRM_ERROR("amdgpu asic init failed\n");
|
|
|
|
}
|
2015-04-21 04:55:21 +08:00
|
|
|
|
|
|
|
r = amdgpu_resume(adev);
|
2017-03-30 13:21:01 +08:00
|
|
|
if (r) {
|
2016-02-04 15:10:08 +08:00
|
|
|
DRM_ERROR("amdgpu_resume failed (%d).\n", r);
|
2017-04-13 16:12:26 +08:00
|
|
|
goto unlock;
|
2017-03-30 13:21:01 +08:00
|
|
|
}
|
2015-08-06 00:41:48 +08:00
|
|
|
amdgpu_fence_driver_resume(adev);
|
|
|
|
|
2016-02-04 15:10:08 +08:00
|
|
|
if (resume) {
|
|
|
|
r = amdgpu_ib_ring_tests(adev);
|
|
|
|
if (r)
|
|
|
|
DRM_ERROR("ib ring test failed (%d).\n", r);
|
|
|
|
}
|
2015-04-21 04:55:21 +08:00
|
|
|
|
|
|
|
r = amdgpu_late_init(adev);
|
2017-04-13 16:12:26 +08:00
|
|
|
if (r)
|
|
|
|
goto unlock;
|
2015-04-21 04:55:21 +08:00
|
|
|
|
2015-10-08 12:03:36 +08:00
|
|
|
/* pin cursors */
|
|
|
|
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
|
|
|
|
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
|
|
|
|
|
|
|
|
if (amdgpu_crtc->cursor_bo) {
|
|
|
|
struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
|
2017-04-25 01:52:41 +08:00
|
|
|
r = amdgpu_bo_reserve(aobj, true);
|
2015-10-08 12:03:36 +08:00
|
|
|
if (r == 0) {
|
|
|
|
r = amdgpu_bo_pin(aobj,
|
|
|
|
AMDGPU_GEM_DOMAIN_VRAM,
|
|
|
|
&amdgpu_crtc->cursor_addr);
|
|
|
|
if (r != 0)
|
|
|
|
DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
|
|
|
|
amdgpu_bo_unreserve(aobj);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2015-11-10 06:21:45 +08:00
|
|
|
r = amdgpu_amdkfd_resume(adev);
|
|
|
|
if (r)
|
|
|
|
return r;
|
2015-10-08 12:03:36 +08:00
|
|
|
|
2015-04-21 04:55:21 +08:00
|
|
|
/* blat the mode back in */
|
|
|
|
if (fbcon) {
|
|
|
|
drm_helper_resume_force_mode(dev);
|
|
|
|
/* turn on display hw */
|
2015-09-24 02:32:06 +08:00
|
|
|
drm_modeset_lock_all(dev);
|
2015-04-21 04:55:21 +08:00
|
|
|
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
|
|
|
|
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
|
|
|
|
}
|
2015-09-24 02:32:06 +08:00
|
|
|
drm_modeset_unlock_all(dev);
|
2015-04-21 04:55:21 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
drm_kms_helper_poll_enable(dev);
|
2016-07-18 23:41:37 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Most of the connector probing functions try to acquire runtime pm
|
|
|
|
* refs to ensure that the GPU is powered on when connector polling is
|
|
|
|
* performed. Since we're calling this from a runtime PM callback,
|
|
|
|
* trying to acquire rpm refs will cause us to deadlock.
|
|
|
|
*
|
|
|
|
* Since we're guaranteed to be holding the rpm lock, it's safe to
|
|
|
|
* temporarily disable the rpm helpers so this doesn't deadlock us.
|
|
|
|
*/
|
|
|
|
#ifdef CONFIG_PM
|
|
|
|
dev->dev->power.disable_depth++;
|
|
|
|
#endif
|
2015-11-25 03:30:56 +08:00
|
|
|
drm_helper_hpd_irq_event(dev);
|
2016-07-18 23:41:37 +08:00
|
|
|
#ifdef CONFIG_PM
|
|
|
|
dev->dev->power.disable_depth--;
|
|
|
|
#endif
|
2015-04-21 04:55:21 +08:00
|
|
|
|
2017-04-13 16:12:26 +08:00
|
|
|
if (fbcon)
|
2015-04-21 04:55:21 +08:00
|
|
|
amdgpu_fbdev_set_suspend(adev, 0);
|
2017-04-13 16:12:26 +08:00
|
|
|
|
|
|
|
unlock:
|
|
|
|
if (fbcon)
|
2015-04-21 04:55:21 +08:00
|
|
|
console_unlock();
|
|
|
|
|
2017-04-13 16:12:26 +08:00
|
|
|
return r;
|
2015-04-21 04:55:21 +08:00
|
|
|
}
|
|
|
|
|
2016-07-15 11:19:20 +08:00
|
|
|
static bool amdgpu_check_soft_reset(struct amdgpu_device *adev)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
bool asic_hang = false;
|
|
|
|
|
|
|
|
for (i = 0; i < adev->num_ip_blocks; i++) {
|
2016-10-14 05:41:13 +08:00
|
|
|
if (!adev->ip_blocks[i].status.valid)
|
2016-07-15 11:19:20 +08:00
|
|
|
continue;
|
2016-10-14 05:41:13 +08:00
|
|
|
if (adev->ip_blocks[i].version->funcs->check_soft_reset)
|
|
|
|
adev->ip_blocks[i].status.hang =
|
|
|
|
adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
|
|
|
|
if (adev->ip_blocks[i].status.hang) {
|
|
|
|
DRM_INFO("IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
|
2016-07-15 11:19:20 +08:00
|
|
|
asic_hang = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return asic_hang;
|
|
|
|
}
|
|
|
|
|
2016-09-18 22:09:35 +08:00
|
|
|
static int amdgpu_pre_soft_reset(struct amdgpu_device *adev)
|
2016-07-18 10:04:34 +08:00
|
|
|
{
|
|
|
|
int i, r = 0;
|
|
|
|
|
|
|
|
for (i = 0; i < adev->num_ip_blocks; i++) {
|
2016-10-14 05:41:13 +08:00
|
|
|
if (!adev->ip_blocks[i].status.valid)
|
2016-07-18 10:04:34 +08:00
|
|
|
continue;
|
2016-10-14 05:41:13 +08:00
|
|
|
if (adev->ip_blocks[i].status.hang &&
|
|
|
|
adev->ip_blocks[i].version->funcs->pre_soft_reset) {
|
|
|
|
r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
|
2016-07-18 10:04:34 +08:00
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-07-15 15:57:13 +08:00
|
|
|
static bool amdgpu_need_full_reset(struct amdgpu_device *adev)
|
|
|
|
{
|
2016-10-14 04:07:03 +08:00
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < adev->num_ip_blocks; i++) {
|
2016-10-14 05:41:13 +08:00
|
|
|
if (!adev->ip_blocks[i].status.valid)
|
2016-10-14 04:07:03 +08:00
|
|
|
continue;
|
2016-10-14 05:41:13 +08:00
|
|
|
if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
|
|
|
|
(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
|
|
|
|
(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
|
2017-09-14 16:25:19 +08:00
|
|
|
(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
|
|
|
|
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
|
2016-10-14 05:41:13 +08:00
|
|
|
if (adev->ip_blocks[i].status.hang) {
|
2016-10-14 04:07:03 +08:00
|
|
|
DRM_INFO("Some block need full reset!\n");
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
2016-07-15 15:57:13 +08:00
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int amdgpu_soft_reset(struct amdgpu_device *adev)
|
|
|
|
{
|
|
|
|
int i, r = 0;
|
|
|
|
|
|
|
|
for (i = 0; i < adev->num_ip_blocks; i++) {
|
2016-10-14 05:41:13 +08:00
|
|
|
if (!adev->ip_blocks[i].status.valid)
|
2016-07-15 15:57:13 +08:00
|
|
|
continue;
|
2016-10-14 05:41:13 +08:00
|
|
|
if (adev->ip_blocks[i].status.hang &&
|
|
|
|
adev->ip_blocks[i].version->funcs->soft_reset) {
|
|
|
|
r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
|
2016-07-15 15:57:13 +08:00
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int amdgpu_post_soft_reset(struct amdgpu_device *adev)
|
|
|
|
{
|
|
|
|
int i, r = 0;
|
|
|
|
|
|
|
|
for (i = 0; i < adev->num_ip_blocks; i++) {
|
2016-10-14 05:41:13 +08:00
|
|
|
if (!adev->ip_blocks[i].status.valid)
|
2016-07-15 15:57:13 +08:00
|
|
|
continue;
|
2016-10-14 05:41:13 +08:00
|
|
|
if (adev->ip_blocks[i].status.hang &&
|
|
|
|
adev->ip_blocks[i].version->funcs->post_soft_reset)
|
|
|
|
r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
|
2016-07-15 15:57:13 +08:00
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-08-05 17:30:17 +08:00
|
|
|
bool amdgpu_need_backup(struct amdgpu_device *adev)
|
|
|
|
{
|
|
|
|
if (adev->flags & AMD_IS_APU)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return amdgpu_lockup_timeout > 0 ? true : false;
|
|
|
|
}
|
|
|
|
|
2016-07-21 17:20:52 +08:00
|
|
|
static int amdgpu_recover_vram_from_shadow(struct amdgpu_device *adev,
|
|
|
|
struct amdgpu_ring *ring,
|
|
|
|
struct amdgpu_bo *bo,
|
2016-10-25 20:00:45 +08:00
|
|
|
struct dma_fence **fence)
|
2016-07-21 17:20:52 +08:00
|
|
|
{
|
|
|
|
uint32_t domain;
|
|
|
|
int r;
|
|
|
|
|
2017-04-21 14:24:26 +08:00
|
|
|
if (!bo->shadow)
|
|
|
|
return 0;
|
|
|
|
|
2017-04-25 01:53:04 +08:00
|
|
|
r = amdgpu_bo_reserve(bo, true);
|
2017-04-21 14:24:26 +08:00
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
|
|
|
|
/* if bo has been evicted, then no need to recover */
|
|
|
|
if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
|
2017-04-21 13:08:43 +08:00
|
|
|
r = amdgpu_bo_validate(bo->shadow);
|
|
|
|
if (r) {
|
|
|
|
DRM_ERROR("bo validate failed!\n");
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
2017-04-21 14:24:26 +08:00
|
|
|
r = amdgpu_bo_restore_from_shadow(adev, ring, bo,
|
2016-07-21 17:20:52 +08:00
|
|
|
NULL, fence, true);
|
2017-04-21 14:24:26 +08:00
|
|
|
if (r) {
|
|
|
|
DRM_ERROR("recover page table failed!\n");
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
}
|
2016-07-21 17:20:52 +08:00
|
|
|
err:
|
2017-04-21 14:24:26 +08:00
|
|
|
amdgpu_bo_unreserve(bo);
|
|
|
|
return r;
|
2016-07-21 17:20:52 +08:00
|
|
|
}
|
|
|
|
|
2017-01-23 14:22:08 +08:00
|
|
|
/**
|
|
|
|
* amdgpu_sriov_gpu_reset - reset the asic
|
|
|
|
*
|
|
|
|
* @adev: amdgpu device pointer
|
2017-04-26 14:51:54 +08:00
|
|
|
* @job: which job trigger hang
|
2017-01-23 14:22:08 +08:00
|
|
|
*
|
|
|
|
* Attempt the reset the GPU if it has hung (all asics).
|
|
|
|
* for SRIOV case.
|
|
|
|
* Returns 0 for success or an error on failure.
|
|
|
|
*/
|
2017-04-26 14:51:54 +08:00
|
|
|
int amdgpu_sriov_gpu_reset(struct amdgpu_device *adev, struct amdgpu_job *job)
|
2017-01-23 14:22:08 +08:00
|
|
|
{
|
drm/amdgpu/SRIOV:implement guilty job TDR for(V2)
1,TDR will kickout guilty job if it hang exceed the threshold
of the given one from kernel paramter "job_hang_limit", that
way a bad command stream will not infinitly cause GPU hang.
by default this threshold is 1 so a job will be kicked out
after it hang.
2,if a job timeout TDR routine will not reset all sched/ring,
instead if will only reset on the givn one which is indicated
by @job of amdgpu_sriov_gpu_reset, that way we don't need to
reset and recover each sched/ring if we already know which job
cause GPU hang.
3,unblock sriov_gpu_reset for AI family.
V2:
1:put kickout guilty job after sched parked.
2:since parking scheduler prior to kickout already occupies a
while, we can do last check on the in question job before
doing hw_reset.
TODO:
1:when a job is considered as guilty, we should mark some flag
in its fence status flag, and let UMD side aware that this
fence signaling is not due to job complete but job hang.
2:if gpu reset cause all video memory lost, we need introduce
a new policy to implement TDR, like drop all jobs not yet
signaled, and all IOCTL on this device will return ERROR
DEVICE_LOST.
this will be implemented later.
Signed-off-by: Monk Liu <Monk.Liu@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
2017-05-11 13:36:44 +08:00
|
|
|
int i, j, r = 0;
|
2017-01-23 14:22:08 +08:00
|
|
|
int resched;
|
|
|
|
struct amdgpu_bo *bo, *tmp;
|
|
|
|
struct amdgpu_ring *ring;
|
|
|
|
struct dma_fence *fence = NULL, *next = NULL;
|
|
|
|
|
2017-01-25 15:48:01 +08:00
|
|
|
mutex_lock(&adev->virt.lock_reset);
|
2017-01-23 14:22:08 +08:00
|
|
|
atomic_inc(&adev->gpu_reset_counter);
|
2017-09-15 18:57:12 +08:00
|
|
|
adev->in_sriov_reset = true;
|
2017-01-23 14:22:08 +08:00
|
|
|
|
|
|
|
/* block TTM */
|
|
|
|
resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
|
|
|
|
|
drm/amdgpu/SRIOV:implement guilty job TDR for(V2)
1,TDR will kickout guilty job if it hang exceed the threshold
of the given one from kernel paramter "job_hang_limit", that
way a bad command stream will not infinitly cause GPU hang.
by default this threshold is 1 so a job will be kicked out
after it hang.
2,if a job timeout TDR routine will not reset all sched/ring,
instead if will only reset on the givn one which is indicated
by @job of amdgpu_sriov_gpu_reset, that way we don't need to
reset and recover each sched/ring if we already know which job
cause GPU hang.
3,unblock sriov_gpu_reset for AI family.
V2:
1:put kickout guilty job after sched parked.
2:since parking scheduler prior to kickout already occupies a
while, we can do last check on the in question job before
doing hw_reset.
TODO:
1:when a job is considered as guilty, we should mark some flag
in its fence status flag, and let UMD side aware that this
fence signaling is not due to job complete but job hang.
2:if gpu reset cause all video memory lost, we need introduce
a new policy to implement TDR, like drop all jobs not yet
signaled, and all IOCTL on this device will return ERROR
DEVICE_LOST.
this will be implemented later.
Signed-off-by: Monk Liu <Monk.Liu@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
2017-05-11 13:36:44 +08:00
|
|
|
/* we start from the ring trigger GPU hang */
|
|
|
|
j = job ? job->ring->idx : 0;
|
2017-01-23 14:22:08 +08:00
|
|
|
|
drm/amdgpu/SRIOV:implement guilty job TDR for(V2)
1,TDR will kickout guilty job if it hang exceed the threshold
of the given one from kernel paramter "job_hang_limit", that
way a bad command stream will not infinitly cause GPU hang.
by default this threshold is 1 so a job will be kicked out
after it hang.
2,if a job timeout TDR routine will not reset all sched/ring,
instead if will only reset on the givn one which is indicated
by @job of amdgpu_sriov_gpu_reset, that way we don't need to
reset and recover each sched/ring if we already know which job
cause GPU hang.
3,unblock sriov_gpu_reset for AI family.
V2:
1:put kickout guilty job after sched parked.
2:since parking scheduler prior to kickout already occupies a
while, we can do last check on the in question job before
doing hw_reset.
TODO:
1:when a job is considered as guilty, we should mark some flag
in its fence status flag, and let UMD side aware that this
fence signaling is not due to job complete but job hang.
2:if gpu reset cause all video memory lost, we need introduce
a new policy to implement TDR, like drop all jobs not yet
signaled, and all IOCTL on this device will return ERROR
DEVICE_LOST.
this will be implemented later.
Signed-off-by: Monk Liu <Monk.Liu@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
2017-05-11 13:36:44 +08:00
|
|
|
/* block scheduler */
|
|
|
|
for (i = j; i < j + AMDGPU_MAX_RINGS; ++i) {
|
|
|
|
ring = adev->rings[i % AMDGPU_MAX_RINGS];
|
2017-01-23 14:22:08 +08:00
|
|
|
if (!ring || !ring->sched.thread)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
kthread_park(ring->sched.thread);
|
drm/amdgpu/SRIOV:implement guilty job TDR for(V2)
1,TDR will kickout guilty job if it hang exceed the threshold
of the given one from kernel paramter "job_hang_limit", that
way a bad command stream will not infinitly cause GPU hang.
by default this threshold is 1 so a job will be kicked out
after it hang.
2,if a job timeout TDR routine will not reset all sched/ring,
instead if will only reset on the givn one which is indicated
by @job of amdgpu_sriov_gpu_reset, that way we don't need to
reset and recover each sched/ring if we already know which job
cause GPU hang.
3,unblock sriov_gpu_reset for AI family.
V2:
1:put kickout guilty job after sched parked.
2:since parking scheduler prior to kickout already occupies a
while, we can do last check on the in question job before
doing hw_reset.
TODO:
1:when a job is considered as guilty, we should mark some flag
in its fence status flag, and let UMD side aware that this
fence signaling is not due to job complete but job hang.
2:if gpu reset cause all video memory lost, we need introduce
a new policy to implement TDR, like drop all jobs not yet
signaled, and all IOCTL on this device will return ERROR
DEVICE_LOST.
this will be implemented later.
Signed-off-by: Monk Liu <Monk.Liu@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
2017-05-11 13:36:44 +08:00
|
|
|
|
|
|
|
if (job && j != i)
|
|
|
|
continue;
|
|
|
|
|
2017-05-11 13:59:15 +08:00
|
|
|
/* here give the last chance to check if job removed from mirror-list
|
drm/amdgpu/SRIOV:implement guilty job TDR for(V2)
1,TDR will kickout guilty job if it hang exceed the threshold
of the given one from kernel paramter "job_hang_limit", that
way a bad command stream will not infinitly cause GPU hang.
by default this threshold is 1 so a job will be kicked out
after it hang.
2,if a job timeout TDR routine will not reset all sched/ring,
instead if will only reset on the givn one which is indicated
by @job of amdgpu_sriov_gpu_reset, that way we don't need to
reset and recover each sched/ring if we already know which job
cause GPU hang.
3,unblock sriov_gpu_reset for AI family.
V2:
1:put kickout guilty job after sched parked.
2:since parking scheduler prior to kickout already occupies a
while, we can do last check on the in question job before
doing hw_reset.
TODO:
1:when a job is considered as guilty, we should mark some flag
in its fence status flag, and let UMD side aware that this
fence signaling is not due to job complete but job hang.
2:if gpu reset cause all video memory lost, we need introduce
a new policy to implement TDR, like drop all jobs not yet
signaled, and all IOCTL on this device will return ERROR
DEVICE_LOST.
this will be implemented later.
Signed-off-by: Monk Liu <Monk.Liu@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
2017-05-11 13:36:44 +08:00
|
|
|
* since we already pay some time on kthread_park */
|
2017-05-11 13:59:15 +08:00
|
|
|
if (job && list_empty(&job->base.node)) {
|
drm/amdgpu/SRIOV:implement guilty job TDR for(V2)
1,TDR will kickout guilty job if it hang exceed the threshold
of the given one from kernel paramter "job_hang_limit", that
way a bad command stream will not infinitly cause GPU hang.
by default this threshold is 1 so a job will be kicked out
after it hang.
2,if a job timeout TDR routine will not reset all sched/ring,
instead if will only reset on the givn one which is indicated
by @job of amdgpu_sriov_gpu_reset, that way we don't need to
reset and recover each sched/ring if we already know which job
cause GPU hang.
3,unblock sriov_gpu_reset for AI family.
V2:
1:put kickout guilty job after sched parked.
2:since parking scheduler prior to kickout already occupies a
while, we can do last check on the in question job before
doing hw_reset.
TODO:
1:when a job is considered as guilty, we should mark some flag
in its fence status flag, and let UMD side aware that this
fence signaling is not due to job complete but job hang.
2:if gpu reset cause all video memory lost, we need introduce
a new policy to implement TDR, like drop all jobs not yet
signaled, and all IOCTL on this device will return ERROR
DEVICE_LOST.
this will be implemented later.
Signed-off-by: Monk Liu <Monk.Liu@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
2017-05-11 13:36:44 +08:00
|
|
|
kthread_unpark(ring->sched.thread);
|
|
|
|
goto give_up_reset;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (amd_sched_invalidate_job(&job->base, amdgpu_job_hang_limit))
|
|
|
|
amd_sched_job_kickout(&job->base);
|
|
|
|
|
|
|
|
/* only do job_reset on the hang ring if @job not NULL */
|
2017-01-23 14:22:08 +08:00
|
|
|
amd_sched_hw_job_reset(&ring->sched);
|
|
|
|
|
drm/amdgpu/SRIOV:implement guilty job TDR for(V2)
1,TDR will kickout guilty job if it hang exceed the threshold
of the given one from kernel paramter "job_hang_limit", that
way a bad command stream will not infinitly cause GPU hang.
by default this threshold is 1 so a job will be kicked out
after it hang.
2,if a job timeout TDR routine will not reset all sched/ring,
instead if will only reset on the givn one which is indicated
by @job of amdgpu_sriov_gpu_reset, that way we don't need to
reset and recover each sched/ring if we already know which job
cause GPU hang.
3,unblock sriov_gpu_reset for AI family.
V2:
1:put kickout guilty job after sched parked.
2:since parking scheduler prior to kickout already occupies a
while, we can do last check on the in question job before
doing hw_reset.
TODO:
1:when a job is considered as guilty, we should mark some flag
in its fence status flag, and let UMD side aware that this
fence signaling is not due to job complete but job hang.
2:if gpu reset cause all video memory lost, we need introduce
a new policy to implement TDR, like drop all jobs not yet
signaled, and all IOCTL on this device will return ERROR
DEVICE_LOST.
this will be implemented later.
Signed-off-by: Monk Liu <Monk.Liu@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
2017-05-11 13:36:44 +08:00
|
|
|
/* after all hw jobs are reset, hw fence is meaningless, so force_completion */
|
|
|
|
amdgpu_fence_driver_force_completion_ring(ring);
|
|
|
|
}
|
2017-01-23 14:22:08 +08:00
|
|
|
|
|
|
|
/* request to take full control of GPU before re-initialization */
|
2017-04-26 14:51:54 +08:00
|
|
|
if (job)
|
2017-01-23 14:22:08 +08:00
|
|
|
amdgpu_virt_reset_gpu(adev);
|
|
|
|
else
|
|
|
|
amdgpu_virt_request_full_gpu(adev, true);
|
|
|
|
|
|
|
|
|
|
|
|
/* Resume IP prior to SMC */
|
2017-02-09 11:55:49 +08:00
|
|
|
amdgpu_sriov_reinit_early(adev);
|
2017-01-23 14:22:08 +08:00
|
|
|
|
|
|
|
/* we need recover gart prior to run SMC/CP/SDMA resume */
|
|
|
|
amdgpu_ttm_recover_gart(adev);
|
|
|
|
|
|
|
|
/* now we are okay to resume SMC/CP/SDMA */
|
2017-02-09 11:55:49 +08:00
|
|
|
amdgpu_sriov_reinit_late(adev);
|
2017-01-23 14:22:08 +08:00
|
|
|
|
|
|
|
amdgpu_irq_gpu_reset_resume_helper(adev);
|
|
|
|
|
|
|
|
if (amdgpu_ib_ring_tests(adev))
|
|
|
|
dev_err(adev->dev, "[GPU_RESET] ib ring test failed (%d).\n", r);
|
|
|
|
|
|
|
|
/* release full control of GPU after ib test */
|
|
|
|
amdgpu_virt_release_full_gpu(adev, true);
|
|
|
|
|
|
|
|
DRM_INFO("recover vram bo from shadow\n");
|
|
|
|
|
|
|
|
ring = adev->mman.buffer_funcs_ring;
|
|
|
|
mutex_lock(&adev->shadow_list_lock);
|
|
|
|
list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
|
2017-05-01 16:15:31 +08:00
|
|
|
next = NULL;
|
2017-01-23 14:22:08 +08:00
|
|
|
amdgpu_recover_vram_from_shadow(adev, ring, bo, &next);
|
|
|
|
if (fence) {
|
|
|
|
r = dma_fence_wait(fence, false);
|
|
|
|
if (r) {
|
|
|
|
WARN(r, "recovery from shadow isn't completed\n");
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
dma_fence_put(fence);
|
|
|
|
fence = next;
|
|
|
|
}
|
|
|
|
mutex_unlock(&adev->shadow_list_lock);
|
|
|
|
|
|
|
|
if (fence) {
|
|
|
|
r = dma_fence_wait(fence, false);
|
|
|
|
if (r)
|
|
|
|
WARN(r, "recovery from shadow isn't completed\n");
|
|
|
|
}
|
|
|
|
dma_fence_put(fence);
|
|
|
|
|
drm/amdgpu/SRIOV:implement guilty job TDR for(V2)
1,TDR will kickout guilty job if it hang exceed the threshold
of the given one from kernel paramter "job_hang_limit", that
way a bad command stream will not infinitly cause GPU hang.
by default this threshold is 1 so a job will be kicked out
after it hang.
2,if a job timeout TDR routine will not reset all sched/ring,
instead if will only reset on the givn one which is indicated
by @job of amdgpu_sriov_gpu_reset, that way we don't need to
reset and recover each sched/ring if we already know which job
cause GPU hang.
3,unblock sriov_gpu_reset for AI family.
V2:
1:put kickout guilty job after sched parked.
2:since parking scheduler prior to kickout already occupies a
while, we can do last check on the in question job before
doing hw_reset.
TODO:
1:when a job is considered as guilty, we should mark some flag
in its fence status flag, and let UMD side aware that this
fence signaling is not due to job complete but job hang.
2:if gpu reset cause all video memory lost, we need introduce
a new policy to implement TDR, like drop all jobs not yet
signaled, and all IOCTL on this device will return ERROR
DEVICE_LOST.
this will be implemented later.
Signed-off-by: Monk Liu <Monk.Liu@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
2017-05-11 13:36:44 +08:00
|
|
|
for (i = j; i < j + AMDGPU_MAX_RINGS; ++i) {
|
|
|
|
ring = adev->rings[i % AMDGPU_MAX_RINGS];
|
2017-01-23 14:22:08 +08:00
|
|
|
if (!ring || !ring->sched.thread)
|
|
|
|
continue;
|
|
|
|
|
drm/amdgpu/SRIOV:implement guilty job TDR for(V2)
1,TDR will kickout guilty job if it hang exceed the threshold
of the given one from kernel paramter "job_hang_limit", that
way a bad command stream will not infinitly cause GPU hang.
by default this threshold is 1 so a job will be kicked out
after it hang.
2,if a job timeout TDR routine will not reset all sched/ring,
instead if will only reset on the givn one which is indicated
by @job of amdgpu_sriov_gpu_reset, that way we don't need to
reset and recover each sched/ring if we already know which job
cause GPU hang.
3,unblock sriov_gpu_reset for AI family.
V2:
1:put kickout guilty job after sched parked.
2:since parking scheduler prior to kickout already occupies a
while, we can do last check on the in question job before
doing hw_reset.
TODO:
1:when a job is considered as guilty, we should mark some flag
in its fence status flag, and let UMD side aware that this
fence signaling is not due to job complete but job hang.
2:if gpu reset cause all video memory lost, we need introduce
a new policy to implement TDR, like drop all jobs not yet
signaled, and all IOCTL on this device will return ERROR
DEVICE_LOST.
this will be implemented later.
Signed-off-by: Monk Liu <Monk.Liu@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
2017-05-11 13:36:44 +08:00
|
|
|
if (job && j != i) {
|
|
|
|
kthread_unpark(ring->sched.thread);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2017-01-23 14:22:08 +08:00
|
|
|
amd_sched_job_recovery(&ring->sched);
|
|
|
|
kthread_unpark(ring->sched.thread);
|
|
|
|
}
|
|
|
|
|
|
|
|
drm_helper_resume_force_mode(adev->ddev);
|
drm/amdgpu/SRIOV:implement guilty job TDR for(V2)
1,TDR will kickout guilty job if it hang exceed the threshold
of the given one from kernel paramter "job_hang_limit", that
way a bad command stream will not infinitly cause GPU hang.
by default this threshold is 1 so a job will be kicked out
after it hang.
2,if a job timeout TDR routine will not reset all sched/ring,
instead if will only reset on the givn one which is indicated
by @job of amdgpu_sriov_gpu_reset, that way we don't need to
reset and recover each sched/ring if we already know which job
cause GPU hang.
3,unblock sriov_gpu_reset for AI family.
V2:
1:put kickout guilty job after sched parked.
2:since parking scheduler prior to kickout already occupies a
while, we can do last check on the in question job before
doing hw_reset.
TODO:
1:when a job is considered as guilty, we should mark some flag
in its fence status flag, and let UMD side aware that this
fence signaling is not due to job complete but job hang.
2:if gpu reset cause all video memory lost, we need introduce
a new policy to implement TDR, like drop all jobs not yet
signaled, and all IOCTL on this device will return ERROR
DEVICE_LOST.
this will be implemented later.
Signed-off-by: Monk Liu <Monk.Liu@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
2017-05-11 13:36:44 +08:00
|
|
|
give_up_reset:
|
2017-01-23 14:22:08 +08:00
|
|
|
ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
|
|
|
|
if (r) {
|
|
|
|
/* bad news, how to tell it to userspace ? */
|
|
|
|
dev_info(adev->dev, "GPU reset failed\n");
|
drm/amdgpu/SRIOV:implement guilty job TDR for(V2)
1,TDR will kickout guilty job if it hang exceed the threshold
of the given one from kernel paramter "job_hang_limit", that
way a bad command stream will not infinitly cause GPU hang.
by default this threshold is 1 so a job will be kicked out
after it hang.
2,if a job timeout TDR routine will not reset all sched/ring,
instead if will only reset on the givn one which is indicated
by @job of amdgpu_sriov_gpu_reset, that way we don't need to
reset and recover each sched/ring if we already know which job
cause GPU hang.
3,unblock sriov_gpu_reset for AI family.
V2:
1:put kickout guilty job after sched parked.
2:since parking scheduler prior to kickout already occupies a
while, we can do last check on the in question job before
doing hw_reset.
TODO:
1:when a job is considered as guilty, we should mark some flag
in its fence status flag, and let UMD side aware that this
fence signaling is not due to job complete but job hang.
2:if gpu reset cause all video memory lost, we need introduce
a new policy to implement TDR, like drop all jobs not yet
signaled, and all IOCTL on this device will return ERROR
DEVICE_LOST.
this will be implemented later.
Signed-off-by: Monk Liu <Monk.Liu@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
2017-05-11 13:36:44 +08:00
|
|
|
} else {
|
|
|
|
dev_info(adev->dev, "GPU reset successed!\n");
|
2017-01-23 14:22:08 +08:00
|
|
|
}
|
|
|
|
|
2017-09-15 18:57:12 +08:00
|
|
|
adev->in_sriov_reset = false;
|
2017-01-25 15:48:01 +08:00
|
|
|
mutex_unlock(&adev->virt.lock_reset);
|
2017-01-23 14:22:08 +08:00
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2015-04-21 04:55:21 +08:00
|
|
|
/**
|
|
|
|
* amdgpu_gpu_reset - reset the asic
|
|
|
|
*
|
|
|
|
* @adev: amdgpu device pointer
|
|
|
|
*
|
|
|
|
* Attempt the reset the GPU if it has hung (all asics).
|
|
|
|
* Returns 0 for success or an error on failure.
|
|
|
|
*/
|
|
|
|
int amdgpu_gpu_reset(struct amdgpu_device *adev)
|
|
|
|
{
|
|
|
|
int i, r;
|
|
|
|
int resched;
|
2017-05-15 14:20:00 +08:00
|
|
|
bool need_full_reset, vram_lost = false;
|
2016-12-17 22:48:57 +08:00
|
|
|
|
2016-07-15 11:19:20 +08:00
|
|
|
if (!amdgpu_check_soft_reset(adev)) {
|
|
|
|
DRM_INFO("No hardware hang detected. Did some blocks stall?\n");
|
|
|
|
return 0;
|
|
|
|
}
|
2015-04-21 04:55:21 +08:00
|
|
|
|
2015-05-06 03:13:49 +08:00
|
|
|
atomic_inc(&adev->gpu_reset_counter);
|
2015-04-21 04:55:21 +08:00
|
|
|
|
2016-06-30 16:44:41 +08:00
|
|
|
/* block TTM */
|
|
|
|
resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
|
|
|
|
|
2016-06-12 15:41:58 +08:00
|
|
|
/* block scheduler */
|
|
|
|
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
|
|
|
|
struct amdgpu_ring *ring = adev->rings[i];
|
|
|
|
|
2017-04-24 17:09:15 +08:00
|
|
|
if (!ring || !ring->sched.thread)
|
2016-06-12 15:41:58 +08:00
|
|
|
continue;
|
|
|
|
kthread_park(ring->sched.thread);
|
2016-06-30 13:56:02 +08:00
|
|
|
amd_sched_hw_job_reset(&ring->sched);
|
2016-06-12 15:41:58 +08:00
|
|
|
}
|
2016-06-30 16:53:02 +08:00
|
|
|
/* after all hw jobs are reset, hw fence is meaningless, so force_completion */
|
|
|
|
amdgpu_fence_driver_force_completion(adev);
|
2015-04-21 04:55:21 +08:00
|
|
|
|
2016-07-15 15:57:13 +08:00
|
|
|
need_full_reset = amdgpu_need_full_reset(adev);
|
2015-04-21 04:55:21 +08:00
|
|
|
|
2016-07-15 15:57:13 +08:00
|
|
|
if (!need_full_reset) {
|
|
|
|
amdgpu_pre_soft_reset(adev);
|
|
|
|
r = amdgpu_soft_reset(adev);
|
|
|
|
amdgpu_post_soft_reset(adev);
|
|
|
|
if (r || amdgpu_check_soft_reset(adev)) {
|
|
|
|
DRM_INFO("soft reset failed, will fallback to full reset!\n");
|
|
|
|
need_full_reset = true;
|
|
|
|
}
|
2016-06-28 10:38:50 +08:00
|
|
|
}
|
|
|
|
|
2016-07-15 15:57:13 +08:00
|
|
|
if (need_full_reset) {
|
|
|
|
r = amdgpu_suspend(adev);
|
2016-01-16 00:59:48 +08:00
|
|
|
|
2016-07-15 15:57:13 +08:00
|
|
|
retry:
|
2017-07-01 05:08:45 +08:00
|
|
|
amdgpu_atombios_scratch_regs_save(adev);
|
2016-07-15 15:57:13 +08:00
|
|
|
r = amdgpu_asic_reset(adev);
|
2017-07-01 05:08:45 +08:00
|
|
|
amdgpu_atombios_scratch_regs_restore(adev);
|
2016-07-15 15:57:13 +08:00
|
|
|
/* post card */
|
|
|
|
amdgpu_atom_asic_init(adev->mode_info.atom_context);
|
|
|
|
|
|
|
|
if (!r) {
|
|
|
|
dev_info(adev->dev, "GPU reset succeeded, trying to resume\n");
|
2017-05-05 10:33:33 +08:00
|
|
|
r = amdgpu_resume_phase1(adev);
|
|
|
|
if (r)
|
|
|
|
goto out;
|
2017-05-15 14:20:00 +08:00
|
|
|
vram_lost = amdgpu_check_vram_lost(adev);
|
2017-05-15 16:48:27 +08:00
|
|
|
if (vram_lost) {
|
2017-05-15 14:20:00 +08:00
|
|
|
DRM_ERROR("VRAM is lost!\n");
|
2017-05-15 16:48:27 +08:00
|
|
|
atomic_inc(&adev->vram_lost_counter);
|
|
|
|
}
|
2017-05-05 10:33:33 +08:00
|
|
|
r = amdgpu_ttm_recover_gart(adev);
|
|
|
|
if (r)
|
|
|
|
goto out;
|
|
|
|
r = amdgpu_resume_phase2(adev);
|
|
|
|
if (r)
|
|
|
|
goto out;
|
2017-05-15 14:20:00 +08:00
|
|
|
if (vram_lost)
|
|
|
|
amdgpu_fill_reset_magic(adev);
|
2016-07-15 15:57:13 +08:00
|
|
|
}
|
2015-04-21 04:55:21 +08:00
|
|
|
}
|
2017-05-05 10:33:33 +08:00
|
|
|
out:
|
2015-04-21 04:55:21 +08:00
|
|
|
if (!r) {
|
2016-07-27 13:15:20 +08:00
|
|
|
amdgpu_irq_gpu_reset_resume_helper(adev);
|
2016-06-30 15:02:26 +08:00
|
|
|
r = amdgpu_ib_ring_tests(adev);
|
|
|
|
if (r) {
|
|
|
|
dev_err(adev->dev, "ib ring test failed (%d).\n", r);
|
2016-06-29 16:01:49 +08:00
|
|
|
r = amdgpu_suspend(adev);
|
2016-07-21 17:20:52 +08:00
|
|
|
need_full_reset = true;
|
2016-06-29 16:01:49 +08:00
|
|
|
goto retry;
|
2016-06-30 15:02:26 +08:00
|
|
|
}
|
2016-07-21 17:20:52 +08:00
|
|
|
/**
|
|
|
|
* recovery vm page tables, since we cannot depend on VRAM is
|
|
|
|
* consistent after gpu full reset.
|
|
|
|
*/
|
|
|
|
if (need_full_reset && amdgpu_need_backup(adev)) {
|
|
|
|
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
|
|
|
|
struct amdgpu_bo *bo, *tmp;
|
2016-10-25 20:00:45 +08:00
|
|
|
struct dma_fence *fence = NULL, *next = NULL;
|
2016-07-21 17:20:52 +08:00
|
|
|
|
|
|
|
DRM_INFO("recover vram bo from shadow\n");
|
|
|
|
mutex_lock(&adev->shadow_list_lock);
|
|
|
|
list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
|
2017-05-01 16:15:31 +08:00
|
|
|
next = NULL;
|
2016-07-21 17:20:52 +08:00
|
|
|
amdgpu_recover_vram_from_shadow(adev, ring, bo, &next);
|
|
|
|
if (fence) {
|
2016-10-25 20:00:45 +08:00
|
|
|
r = dma_fence_wait(fence, false);
|
2016-07-21 17:20:52 +08:00
|
|
|
if (r) {
|
2017-01-22 18:52:56 +08:00
|
|
|
WARN(r, "recovery from shadow isn't completed\n");
|
2016-07-21 17:20:52 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2016-06-30 15:02:26 +08:00
|
|
|
|
2016-10-25 20:00:45 +08:00
|
|
|
dma_fence_put(fence);
|
2016-07-21 17:20:52 +08:00
|
|
|
fence = next;
|
|
|
|
}
|
|
|
|
mutex_unlock(&adev->shadow_list_lock);
|
|
|
|
if (fence) {
|
2016-10-25 20:00:45 +08:00
|
|
|
r = dma_fence_wait(fence, false);
|
2016-07-21 17:20:52 +08:00
|
|
|
if (r)
|
2017-01-22 18:52:56 +08:00
|
|
|
WARN(r, "recovery from shadow isn't completed\n");
|
2016-07-21 17:20:52 +08:00
|
|
|
}
|
2016-10-25 20:00:45 +08:00
|
|
|
dma_fence_put(fence);
|
2016-07-21 17:20:52 +08:00
|
|
|
}
|
2015-04-21 04:55:21 +08:00
|
|
|
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
|
|
|
|
struct amdgpu_ring *ring = adev->rings[i];
|
2017-04-24 17:09:15 +08:00
|
|
|
|
|
|
|
if (!ring || !ring->sched.thread)
|
2015-04-21 04:55:21 +08:00
|
|
|
continue;
|
2016-07-21 17:20:52 +08:00
|
|
|
|
2016-06-30 13:56:02 +08:00
|
|
|
amd_sched_job_recovery(&ring->sched);
|
2016-06-12 15:41:58 +08:00
|
|
|
kthread_unpark(ring->sched.thread);
|
2015-04-21 04:55:21 +08:00
|
|
|
}
|
|
|
|
} else {
|
2016-06-30 16:53:02 +08:00
|
|
|
dev_err(adev->dev, "asic resume failed (%d).\n", r);
|
2017-09-28 21:47:32 +08:00
|
|
|
amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ASIC_RESUME_FAIL, 0, r);
|
2015-04-21 04:55:21 +08:00
|
|
|
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
|
2017-04-24 17:09:15 +08:00
|
|
|
if (adev->rings[i] && adev->rings[i]->sched.thread) {
|
2016-06-12 15:41:58 +08:00
|
|
|
kthread_unpark(adev->rings[i]->sched.thread);
|
|
|
|
}
|
2015-04-21 04:55:21 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
drm_helper_resume_force_mode(adev->ddev);
|
|
|
|
|
|
|
|
ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
|
2017-06-24 01:55:15 +08:00
|
|
|
if (r) {
|
2015-04-21 04:55:21 +08:00
|
|
|
/* bad news, how to tell it to userspace ? */
|
|
|
|
dev_info(adev->dev, "GPU reset failed\n");
|
2017-09-28 21:47:32 +08:00
|
|
|
amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
|
2017-06-24 01:55:15 +08:00
|
|
|
}
|
|
|
|
else {
|
2017-05-05 10:50:09 +08:00
|
|
|
dev_info(adev->dev, "GPU reset successed!\n");
|
2017-06-24 01:55:15 +08:00
|
|
|
}
|
2015-04-21 04:55:21 +08:00
|
|
|
|
2017-06-24 01:55:15 +08:00
|
|
|
amdgpu_vf_error_trans_all(adev);
|
2015-04-21 04:55:21 +08:00
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2015-11-12 08:45:06 +08:00
|
|
|
void amdgpu_get_pcie_info(struct amdgpu_device *adev)
|
|
|
|
{
|
|
|
|
u32 mask;
|
|
|
|
int ret;
|
|
|
|
|
2016-02-04 23:21:23 +08:00
|
|
|
if (amdgpu_pcie_gen_cap)
|
|
|
|
adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
|
2015-11-12 08:45:06 +08:00
|
|
|
|
2016-02-04 23:21:23 +08:00
|
|
|
if (amdgpu_pcie_lane_cap)
|
|
|
|
adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
|
2015-11-12 08:45:06 +08:00
|
|
|
|
2016-02-04 23:21:23 +08:00
|
|
|
/* covers APUs as well */
|
|
|
|
if (pci_is_root_bus(adev->pdev->bus)) {
|
|
|
|
if (adev->pm.pcie_gen_mask == 0)
|
|
|
|
adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
|
|
|
|
if (adev->pm.pcie_mlw_mask == 0)
|
|
|
|
adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
|
2015-11-12 08:45:06 +08:00
|
|
|
return;
|
2016-02-04 23:21:23 +08:00
|
|
|
}
|
2015-11-12 08:45:06 +08:00
|
|
|
|
2016-02-04 23:21:23 +08:00
|
|
|
if (adev->pm.pcie_gen_mask == 0) {
|
|
|
|
ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
|
|
|
|
if (!ret) {
|
|
|
|
adev->pm.pcie_gen_mask = (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
|
|
|
|
CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
|
|
|
|
CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
|
|
|
|
|
|
|
|
if (mask & DRM_PCIE_SPEED_25)
|
|
|
|
adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
|
|
|
|
if (mask & DRM_PCIE_SPEED_50)
|
|
|
|
adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2;
|
|
|
|
if (mask & DRM_PCIE_SPEED_80)
|
|
|
|
adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3;
|
|
|
|
} else {
|
|
|
|
adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (adev->pm.pcie_mlw_mask == 0) {
|
|
|
|
ret = drm_pcie_get_max_link_width(adev->ddev, &mask);
|
|
|
|
if (!ret) {
|
|
|
|
switch (mask) {
|
|
|
|
case 32:
|
|
|
|
adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
|
|
|
|
CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
|
|
|
|
CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
|
|
|
|
CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
|
|
|
|
CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
|
|
|
|
CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
|
|
|
|
CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
|
|
|
|
break;
|
|
|
|
case 16:
|
|
|
|
adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
|
|
|
|
CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
|
|
|
|
CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
|
|
|
|
CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
|
|
|
|
CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
|
|
|
|
CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
|
|
|
|
break;
|
|
|
|
case 12:
|
|
|
|
adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
|
|
|
|
CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
|
|
|
|
CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
|
|
|
|
CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
|
|
|
|
CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
|
|
|
|
break;
|
|
|
|
case 8:
|
|
|
|
adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
|
|
|
|
CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
|
|
|
|
CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
|
|
|
|
CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
|
|
|
|
break;
|
|
|
|
case 4:
|
|
|
|
adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
|
|
|
|
CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
|
|
|
|
CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
|
|
|
|
break;
|
|
|
|
case 2:
|
|
|
|
adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
|
|
|
|
CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
|
|
|
|
break;
|
|
|
|
case 1:
|
|
|
|
adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
|
2015-11-12 08:45:06 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2015-04-21 04:55:21 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Debugfs
|
|
|
|
*/
|
|
|
|
int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
|
2016-05-03 00:46:15 +08:00
|
|
|
const struct drm_info_list *files,
|
2015-04-21 04:55:21 +08:00
|
|
|
unsigned nfiles)
|
|
|
|
{
|
|
|
|
unsigned i;
|
|
|
|
|
|
|
|
for (i = 0; i < adev->debugfs_count; i++) {
|
|
|
|
if (adev->debugfs[i].files == files) {
|
|
|
|
/* Already registered */
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
i = adev->debugfs_count + 1;
|
|
|
|
if (i > AMDGPU_DEBUGFS_MAX_COMPONENTS) {
|
|
|
|
DRM_ERROR("Reached maximum number of debugfs components.\n");
|
|
|
|
DRM_ERROR("Report so we increase "
|
|
|
|
"AMDGPU_DEBUGFS_MAX_COMPONENTS.\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
adev->debugfs[adev->debugfs_count].files = files;
|
|
|
|
adev->debugfs[adev->debugfs_count].num_files = nfiles;
|
|
|
|
adev->debugfs_count = i;
|
|
|
|
#if defined(CONFIG_DEBUG_FS)
|
|
|
|
drm_debugfs_create_files(files, nfiles,
|
|
|
|
adev->ddev->primary->debugfs_root,
|
|
|
|
adev->ddev->primary);
|
|
|
|
#endif
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
#if defined(CONFIG_DEBUG_FS)
|
|
|
|
|
|
|
|
static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
|
|
|
|
size_t size, loff_t *pos)
|
|
|
|
{
|
2016-12-05 07:24:56 +08:00
|
|
|
struct amdgpu_device *adev = file_inode(f)->i_private;
|
2015-04-21 04:55:21 +08:00
|
|
|
ssize_t result = 0;
|
|
|
|
int r;
|
2016-07-28 21:39:22 +08:00
|
|
|
bool pm_pg_lock, use_bank;
|
2016-06-27 23:55:07 +08:00
|
|
|
unsigned instance_bank, sh_bank, se_bank;
|
2015-04-21 04:55:21 +08:00
|
|
|
|
|
|
|
if (size & 0x3 || *pos & 0x3)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2016-07-28 21:39:22 +08:00
|
|
|
/* are we reading registers for which a PG lock is necessary? */
|
|
|
|
pm_pg_lock = (*pos >> 23) & 1;
|
|
|
|
|
2016-06-27 23:55:07 +08:00
|
|
|
if (*pos & (1ULL << 62)) {
|
|
|
|
se_bank = (*pos >> 24) & 0x3FF;
|
|
|
|
sh_bank = (*pos >> 34) & 0x3FF;
|
|
|
|
instance_bank = (*pos >> 44) & 0x3FF;
|
2016-10-09 19:41:26 +08:00
|
|
|
|
|
|
|
if (se_bank == 0x3FF)
|
|
|
|
se_bank = 0xFFFFFFFF;
|
|
|
|
if (sh_bank == 0x3FF)
|
|
|
|
sh_bank = 0xFFFFFFFF;
|
|
|
|
if (instance_bank == 0x3FF)
|
|
|
|
instance_bank = 0xFFFFFFFF;
|
2016-06-27 23:55:07 +08:00
|
|
|
use_bank = 1;
|
|
|
|
} else {
|
|
|
|
use_bank = 0;
|
|
|
|
}
|
|
|
|
|
2017-03-15 17:34:25 +08:00
|
|
|
*pos &= (1UL << 22) - 1;
|
2016-07-28 21:39:22 +08:00
|
|
|
|
2016-06-27 23:55:07 +08:00
|
|
|
if (use_bank) {
|
2016-10-09 19:41:26 +08:00
|
|
|
if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
|
|
|
|
(se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
|
2016-06-27 23:55:07 +08:00
|
|
|
return -EINVAL;
|
|
|
|
mutex_lock(&adev->grbm_idx_mutex);
|
|
|
|
amdgpu_gfx_select_se_sh(adev, se_bank,
|
|
|
|
sh_bank, instance_bank);
|
|
|
|
}
|
|
|
|
|
2016-07-28 21:39:22 +08:00
|
|
|
if (pm_pg_lock)
|
|
|
|
mutex_lock(&adev->pm.mutex);
|
|
|
|
|
2015-04-21 04:55:21 +08:00
|
|
|
while (size) {
|
|
|
|
uint32_t value;
|
|
|
|
|
|
|
|
if (*pos > adev->rmmio_size)
|
2016-06-27 23:55:07 +08:00
|
|
|
goto end;
|
2015-04-21 04:55:21 +08:00
|
|
|
|
|
|
|
value = RREG32(*pos >> 2);
|
|
|
|
r = put_user(value, (uint32_t *)buf);
|
2016-06-27 23:55:07 +08:00
|
|
|
if (r) {
|
|
|
|
result = r;
|
|
|
|
goto end;
|
|
|
|
}
|
2015-04-21 04:55:21 +08:00
|
|
|
|
|
|
|
result += 4;
|
|
|
|
buf += 4;
|
|
|
|
*pos += 4;
|
|
|
|
size -= 4;
|
|
|
|
}
|
|
|
|
|
2016-06-27 23:55:07 +08:00
|
|
|
end:
|
|
|
|
if (use_bank) {
|
|
|
|
amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
|
|
|
|
mutex_unlock(&adev->grbm_idx_mutex);
|
|
|
|
}
|
|
|
|
|
2016-07-28 21:39:22 +08:00
|
|
|
if (pm_pg_lock)
|
|
|
|
mutex_unlock(&adev->pm.mutex);
|
|
|
|
|
2015-04-21 04:55:21 +08:00
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
|
|
|
|
size_t size, loff_t *pos)
|
|
|
|
{
|
2016-12-05 07:24:56 +08:00
|
|
|
struct amdgpu_device *adev = file_inode(f)->i_private;
|
2015-04-21 04:55:21 +08:00
|
|
|
ssize_t result = 0;
|
|
|
|
int r;
|
2016-10-10 19:31:23 +08:00
|
|
|
bool pm_pg_lock, use_bank;
|
|
|
|
unsigned instance_bank, sh_bank, se_bank;
|
2015-04-21 04:55:21 +08:00
|
|
|
|
|
|
|
if (size & 0x3 || *pos & 0x3)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2016-10-10 19:31:23 +08:00
|
|
|
/* are we reading registers for which a PG lock is necessary? */
|
|
|
|
pm_pg_lock = (*pos >> 23) & 1;
|
|
|
|
|
|
|
|
if (*pos & (1ULL << 62)) {
|
|
|
|
se_bank = (*pos >> 24) & 0x3FF;
|
|
|
|
sh_bank = (*pos >> 34) & 0x3FF;
|
|
|
|
instance_bank = (*pos >> 44) & 0x3FF;
|
|
|
|
|
|
|
|
if (se_bank == 0x3FF)
|
|
|
|
se_bank = 0xFFFFFFFF;
|
|
|
|
if (sh_bank == 0x3FF)
|
|
|
|
sh_bank = 0xFFFFFFFF;
|
|
|
|
if (instance_bank == 0x3FF)
|
|
|
|
instance_bank = 0xFFFFFFFF;
|
|
|
|
use_bank = 1;
|
|
|
|
} else {
|
|
|
|
use_bank = 0;
|
|
|
|
}
|
|
|
|
|
2017-03-15 17:34:25 +08:00
|
|
|
*pos &= (1UL << 22) - 1;
|
2016-10-10 19:31:23 +08:00
|
|
|
|
|
|
|
if (use_bank) {
|
|
|
|
if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
|
|
|
|
(se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
|
|
|
|
return -EINVAL;
|
|
|
|
mutex_lock(&adev->grbm_idx_mutex);
|
|
|
|
amdgpu_gfx_select_se_sh(adev, se_bank,
|
|
|
|
sh_bank, instance_bank);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (pm_pg_lock)
|
|
|
|
mutex_lock(&adev->pm.mutex);
|
|
|
|
|
2015-04-21 04:55:21 +08:00
|
|
|
while (size) {
|
|
|
|
uint32_t value;
|
|
|
|
|
|
|
|
if (*pos > adev->rmmio_size)
|
|
|
|
return result;
|
|
|
|
|
|
|
|
r = get_user(value, (uint32_t *)buf);
|
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
|
|
|
|
WREG32(*pos >> 2, value);
|
|
|
|
|
|
|
|
result += 4;
|
|
|
|
buf += 4;
|
|
|
|
*pos += 4;
|
|
|
|
size -= 4;
|
|
|
|
}
|
|
|
|
|
2016-10-10 19:31:23 +08:00
|
|
|
if (use_bank) {
|
|
|
|
amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
|
|
|
|
mutex_unlock(&adev->grbm_idx_mutex);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (pm_pg_lock)
|
|
|
|
mutex_unlock(&adev->pm.mutex);
|
|
|
|
|
2015-04-21 04:55:21 +08:00
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2016-04-16 01:08:44 +08:00
|
|
|
static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
|
|
|
|
size_t size, loff_t *pos)
|
|
|
|
{
|
2016-12-05 07:24:56 +08:00
|
|
|
struct amdgpu_device *adev = file_inode(f)->i_private;
|
2016-04-16 01:08:44 +08:00
|
|
|
ssize_t result = 0;
|
|
|
|
int r;
|
|
|
|
|
|
|
|
if (size & 0x3 || *pos & 0x3)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
while (size) {
|
|
|
|
uint32_t value;
|
|
|
|
|
|
|
|
value = RREG32_PCIE(*pos >> 2);
|
|
|
|
r = put_user(value, (uint32_t *)buf);
|
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
|
|
|
|
result += 4;
|
|
|
|
buf += 4;
|
|
|
|
*pos += 4;
|
|
|
|
size -= 4;
|
|
|
|
}
|
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user *buf,
|
|
|
|
size_t size, loff_t *pos)
|
|
|
|
{
|
2016-12-05 07:24:56 +08:00
|
|
|
struct amdgpu_device *adev = file_inode(f)->i_private;
|
2016-04-16 01:08:44 +08:00
|
|
|
ssize_t result = 0;
|
|
|
|
int r;
|
|
|
|
|
|
|
|
if (size & 0x3 || *pos & 0x3)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
while (size) {
|
|
|
|
uint32_t value;
|
|
|
|
|
|
|
|
r = get_user(value, (uint32_t *)buf);
|
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
|
|
|
|
WREG32_PCIE(*pos >> 2, value);
|
|
|
|
|
|
|
|
result += 4;
|
|
|
|
buf += 4;
|
|
|
|
*pos += 4;
|
|
|
|
size -= 4;
|
|
|
|
}
|
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
|
|
|
|
size_t size, loff_t *pos)
|
|
|
|
{
|
2016-12-05 07:24:56 +08:00
|
|
|
struct amdgpu_device *adev = file_inode(f)->i_private;
|
2016-04-16 01:08:44 +08:00
|
|
|
ssize_t result = 0;
|
|
|
|
int r;
|
|
|
|
|
|
|
|
if (size & 0x3 || *pos & 0x3)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
while (size) {
|
|
|
|
uint32_t value;
|
|
|
|
|
|
|
|
value = RREG32_DIDT(*pos >> 2);
|
|
|
|
r = put_user(value, (uint32_t *)buf);
|
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
|
|
|
|
result += 4;
|
|
|
|
buf += 4;
|
|
|
|
*pos += 4;
|
|
|
|
size -= 4;
|
|
|
|
}
|
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user *buf,
|
|
|
|
size_t size, loff_t *pos)
|
|
|
|
{
|
2016-12-05 07:24:56 +08:00
|
|
|
struct amdgpu_device *adev = file_inode(f)->i_private;
|
2016-04-16 01:08:44 +08:00
|
|
|
ssize_t result = 0;
|
|
|
|
int r;
|
|
|
|
|
|
|
|
if (size & 0x3 || *pos & 0x3)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
while (size) {
|
|
|
|
uint32_t value;
|
|
|
|
|
|
|
|
r = get_user(value, (uint32_t *)buf);
|
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
|
|
|
|
WREG32_DIDT(*pos >> 2, value);
|
|
|
|
|
|
|
|
result += 4;
|
|
|
|
buf += 4;
|
|
|
|
*pos += 4;
|
|
|
|
size -= 4;
|
|
|
|
}
|
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
|
|
|
|
size_t size, loff_t *pos)
|
|
|
|
{
|
2016-12-05 07:24:56 +08:00
|
|
|
struct amdgpu_device *adev = file_inode(f)->i_private;
|
2016-04-16 01:08:44 +08:00
|
|
|
ssize_t result = 0;
|
|
|
|
int r;
|
|
|
|
|
|
|
|
if (size & 0x3 || *pos & 0x3)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
while (size) {
|
|
|
|
uint32_t value;
|
|
|
|
|
2016-08-29 20:39:29 +08:00
|
|
|
value = RREG32_SMC(*pos);
|
2016-04-16 01:08:44 +08:00
|
|
|
r = put_user(value, (uint32_t *)buf);
|
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
|
|
|
|
result += 4;
|
|
|
|
buf += 4;
|
|
|
|
*pos += 4;
|
|
|
|
size -= 4;
|
|
|
|
}
|
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *buf,
|
|
|
|
size_t size, loff_t *pos)
|
|
|
|
{
|
2016-12-05 07:24:56 +08:00
|
|
|
struct amdgpu_device *adev = file_inode(f)->i_private;
|
2016-04-16 01:08:44 +08:00
|
|
|
ssize_t result = 0;
|
|
|
|
int r;
|
|
|
|
|
|
|
|
if (size & 0x3 || *pos & 0x3)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
while (size) {
|
|
|
|
uint32_t value;
|
|
|
|
|
|
|
|
r = get_user(value, (uint32_t *)buf);
|
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
|
2016-08-29 20:39:29 +08:00
|
|
|
WREG32_SMC(*pos, value);
|
2016-04-16 01:08:44 +08:00
|
|
|
|
|
|
|
result += 4;
|
|
|
|
buf += 4;
|
|
|
|
*pos += 4;
|
|
|
|
size -= 4;
|
|
|
|
}
|
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2016-06-27 21:57:18 +08:00
|
|
|
static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf,
|
|
|
|
size_t size, loff_t *pos)
|
|
|
|
{
|
2016-12-05 07:24:56 +08:00
|
|
|
struct amdgpu_device *adev = file_inode(f)->i_private;
|
2016-06-27 21:57:18 +08:00
|
|
|
ssize_t result = 0;
|
|
|
|
int r;
|
|
|
|
uint32_t *config, no_regs = 0;
|
|
|
|
|
|
|
|
if (size & 0x3 || *pos & 0x3)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2016-09-18 23:00:52 +08:00
|
|
|
config = kmalloc_array(256, sizeof(*config), GFP_KERNEL);
|
2016-06-27 21:57:18 +08:00
|
|
|
if (!config)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
/* version, increment each time something is added */
|
2017-01-19 02:01:25 +08:00
|
|
|
config[no_regs++] = 3;
|
2016-06-27 21:57:18 +08:00
|
|
|
config[no_regs++] = adev->gfx.config.max_shader_engines;
|
|
|
|
config[no_regs++] = adev->gfx.config.max_tile_pipes;
|
|
|
|
config[no_regs++] = adev->gfx.config.max_cu_per_sh;
|
|
|
|
config[no_regs++] = adev->gfx.config.max_sh_per_se;
|
|
|
|
config[no_regs++] = adev->gfx.config.max_backends_per_se;
|
|
|
|
config[no_regs++] = adev->gfx.config.max_texture_channel_caches;
|
|
|
|
config[no_regs++] = adev->gfx.config.max_gprs;
|
|
|
|
config[no_regs++] = adev->gfx.config.max_gs_threads;
|
|
|
|
config[no_regs++] = adev->gfx.config.max_hw_contexts;
|
|
|
|
config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_frontend;
|
|
|
|
config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_backend;
|
|
|
|
config[no_regs++] = adev->gfx.config.sc_hiz_tile_fifo_size;
|
|
|
|
config[no_regs++] = adev->gfx.config.sc_earlyz_tile_fifo_size;
|
|
|
|
config[no_regs++] = adev->gfx.config.num_tile_pipes;
|
|
|
|
config[no_regs++] = adev->gfx.config.backend_enable_mask;
|
|
|
|
config[no_regs++] = adev->gfx.config.mem_max_burst_length_bytes;
|
|
|
|
config[no_regs++] = adev->gfx.config.mem_row_size_in_kb;
|
|
|
|
config[no_regs++] = adev->gfx.config.shader_engine_tile_size;
|
|
|
|
config[no_regs++] = adev->gfx.config.num_gpus;
|
|
|
|
config[no_regs++] = adev->gfx.config.multi_gpu_tile_size;
|
|
|
|
config[no_regs++] = adev->gfx.config.mc_arb_ramcfg;
|
|
|
|
config[no_regs++] = adev->gfx.config.gb_addr_config;
|
|
|
|
config[no_regs++] = adev->gfx.config.num_rbs;
|
|
|
|
|
2016-08-13 03:14:31 +08:00
|
|
|
/* rev==1 */
|
|
|
|
config[no_regs++] = adev->rev_id;
|
|
|
|
config[no_regs++] = adev->pg_flags;
|
|
|
|
config[no_regs++] = adev->cg_flags;
|
|
|
|
|
2016-08-18 00:00:51 +08:00
|
|
|
/* rev==2 */
|
|
|
|
config[no_regs++] = adev->family;
|
|
|
|
config[no_regs++] = adev->external_rev_id;
|
|
|
|
|
2017-01-19 02:01:25 +08:00
|
|
|
/* rev==3 */
|
|
|
|
config[no_regs++] = adev->pdev->device;
|
|
|
|
config[no_regs++] = adev->pdev->revision;
|
|
|
|
config[no_regs++] = adev->pdev->subsystem_device;
|
|
|
|
config[no_regs++] = adev->pdev->subsystem_vendor;
|
|
|
|
|
2016-06-27 21:57:18 +08:00
|
|
|
while (size && (*pos < no_regs * 4)) {
|
|
|
|
uint32_t value;
|
|
|
|
|
|
|
|
value = config[*pos >> 2];
|
|
|
|
r = put_user(value, (uint32_t *)buf);
|
|
|
|
if (r) {
|
|
|
|
kfree(config);
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
result += 4;
|
|
|
|
buf += 4;
|
|
|
|
*pos += 4;
|
|
|
|
size -= 4;
|
|
|
|
}
|
|
|
|
|
|
|
|
kfree(config);
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2016-09-15 22:08:44 +08:00
|
|
|
static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
|
|
|
|
size_t size, loff_t *pos)
|
|
|
|
{
|
2016-12-05 07:24:56 +08:00
|
|
|
struct amdgpu_device *adev = file_inode(f)->i_private;
|
2017-02-10 03:29:01 +08:00
|
|
|
int idx, x, outsize, r, valuesize;
|
|
|
|
uint32_t values[16];
|
2016-09-15 22:08:44 +08:00
|
|
|
|
2017-02-10 03:29:01 +08:00
|
|
|
if (size & 3 || *pos & 0x3)
|
2016-09-15 22:08:44 +08:00
|
|
|
return -EINVAL;
|
|
|
|
|
2017-02-16 02:32:29 +08:00
|
|
|
if (amdgpu_dpm == 0)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2016-09-15 22:08:44 +08:00
|
|
|
/* convert offset to sensor number */
|
|
|
|
idx = *pos >> 2;
|
|
|
|
|
2017-02-10 03:29:01 +08:00
|
|
|
valuesize = sizeof(values);
|
2016-09-15 22:08:44 +08:00
|
|
|
if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->read_sensor)
|
2017-09-06 18:43:52 +08:00
|
|
|
r = amdgpu_dpm_read_sensor(adev, idx, &values[0], &valuesize);
|
2016-09-15 22:08:44 +08:00
|
|
|
else
|
|
|
|
return -EINVAL;
|
|
|
|
|
2017-02-10 03:29:01 +08:00
|
|
|
if (size > valuesize)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
outsize = 0;
|
|
|
|
x = 0;
|
|
|
|
if (!r) {
|
|
|
|
while (size) {
|
|
|
|
r = put_user(values[x++], (int32_t *)buf);
|
|
|
|
buf += 4;
|
|
|
|
size -= 4;
|
|
|
|
outsize += 4;
|
|
|
|
}
|
|
|
|
}
|
2016-09-15 22:08:44 +08:00
|
|
|
|
2017-02-10 03:29:01 +08:00
|
|
|
return !r ? outsize : r;
|
2016-09-15 22:08:44 +08:00
|
|
|
}
|
2016-06-27 21:57:18 +08:00
|
|
|
|
2016-10-12 02:48:55 +08:00
|
|
|
static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
|
|
|
|
size_t size, loff_t *pos)
|
|
|
|
{
|
|
|
|
struct amdgpu_device *adev = f->f_inode->i_private;
|
|
|
|
int r, x;
|
|
|
|
ssize_t result=0;
|
2016-10-14 21:49:09 +08:00
|
|
|
uint32_t offset, se, sh, cu, wave, simd, data[32];
|
2016-10-12 02:48:55 +08:00
|
|
|
|
|
|
|
if (size & 3 || *pos & 3)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
/* decode offset */
|
|
|
|
offset = (*pos & 0x7F);
|
|
|
|
se = ((*pos >> 7) & 0xFF);
|
|
|
|
sh = ((*pos >> 15) & 0xFF);
|
|
|
|
cu = ((*pos >> 23) & 0xFF);
|
|
|
|
wave = ((*pos >> 31) & 0xFF);
|
|
|
|
simd = ((*pos >> 37) & 0xFF);
|
|
|
|
|
|
|
|
/* switch to the specific se/sh/cu */
|
|
|
|
mutex_lock(&adev->grbm_idx_mutex);
|
|
|
|
amdgpu_gfx_select_se_sh(adev, se, sh, cu);
|
|
|
|
|
|
|
|
x = 0;
|
2016-10-14 21:49:09 +08:00
|
|
|
if (adev->gfx.funcs->read_wave_data)
|
|
|
|
adev->gfx.funcs->read_wave_data(adev, simd, wave, data, &x);
|
2016-10-12 02:48:55 +08:00
|
|
|
|
|
|
|
amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
|
|
|
|
mutex_unlock(&adev->grbm_idx_mutex);
|
|
|
|
|
2016-10-14 00:15:03 +08:00
|
|
|
if (!x)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2016-10-14 21:49:09 +08:00
|
|
|
while (size && (offset < x * 4)) {
|
2016-10-12 02:48:55 +08:00
|
|
|
uint32_t value;
|
|
|
|
|
2016-10-14 21:49:09 +08:00
|
|
|
value = data[offset >> 2];
|
2016-10-12 02:48:55 +08:00
|
|
|
r = put_user(value, (uint32_t *)buf);
|
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
|
|
|
|
result += 4;
|
|
|
|
buf += 4;
|
2016-10-14 21:49:09 +08:00
|
|
|
offset += 4;
|
2016-10-12 02:48:55 +08:00
|
|
|
size -= 4;
|
|
|
|
}
|
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2016-12-06 00:39:19 +08:00
|
|
|
static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
|
|
|
|
size_t size, loff_t *pos)
|
|
|
|
{
|
|
|
|
struct amdgpu_device *adev = f->f_inode->i_private;
|
|
|
|
int r;
|
|
|
|
ssize_t result = 0;
|
|
|
|
uint32_t offset, se, sh, cu, wave, simd, thread, bank, *data;
|
|
|
|
|
|
|
|
if (size & 3 || *pos & 3)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
/* decode offset */
|
|
|
|
offset = (*pos & 0xFFF); /* in dwords */
|
|
|
|
se = ((*pos >> 12) & 0xFF);
|
|
|
|
sh = ((*pos >> 20) & 0xFF);
|
|
|
|
cu = ((*pos >> 28) & 0xFF);
|
|
|
|
wave = ((*pos >> 36) & 0xFF);
|
|
|
|
simd = ((*pos >> 44) & 0xFF);
|
|
|
|
thread = ((*pos >> 52) & 0xFF);
|
|
|
|
bank = ((*pos >> 60) & 1);
|
|
|
|
|
|
|
|
data = kmalloc_array(1024, sizeof(*data), GFP_KERNEL);
|
|
|
|
if (!data)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
/* switch to the specific se/sh/cu */
|
|
|
|
mutex_lock(&adev->grbm_idx_mutex);
|
|
|
|
amdgpu_gfx_select_se_sh(adev, se, sh, cu);
|
|
|
|
|
|
|
|
if (bank == 0) {
|
|
|
|
if (adev->gfx.funcs->read_wave_vgprs)
|
|
|
|
adev->gfx.funcs->read_wave_vgprs(adev, simd, wave, thread, offset, size>>2, data);
|
|
|
|
} else {
|
|
|
|
if (adev->gfx.funcs->read_wave_sgprs)
|
|
|
|
adev->gfx.funcs->read_wave_sgprs(adev, simd, wave, offset, size>>2, data);
|
|
|
|
}
|
|
|
|
|
|
|
|
amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
|
|
|
|
mutex_unlock(&adev->grbm_idx_mutex);
|
|
|
|
|
|
|
|
while (size) {
|
|
|
|
uint32_t value;
|
|
|
|
|
|
|
|
value = data[offset++];
|
|
|
|
r = put_user(value, (uint32_t *)buf);
|
|
|
|
if (r) {
|
|
|
|
result = r;
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
|
|
|
result += 4;
|
|
|
|
buf += 4;
|
|
|
|
size -= 4;
|
|
|
|
}
|
|
|
|
|
|
|
|
err:
|
|
|
|
kfree(data);
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2015-04-21 04:55:21 +08:00
|
|
|
static const struct file_operations amdgpu_debugfs_regs_fops = {
|
|
|
|
.owner = THIS_MODULE,
|
|
|
|
.read = amdgpu_debugfs_regs_read,
|
|
|
|
.write = amdgpu_debugfs_regs_write,
|
|
|
|
.llseek = default_llseek
|
|
|
|
};
|
2016-04-16 01:08:44 +08:00
|
|
|
static const struct file_operations amdgpu_debugfs_regs_didt_fops = {
|
|
|
|
.owner = THIS_MODULE,
|
|
|
|
.read = amdgpu_debugfs_regs_didt_read,
|
|
|
|
.write = amdgpu_debugfs_regs_didt_write,
|
|
|
|
.llseek = default_llseek
|
|
|
|
};
|
|
|
|
static const struct file_operations amdgpu_debugfs_regs_pcie_fops = {
|
|
|
|
.owner = THIS_MODULE,
|
|
|
|
.read = amdgpu_debugfs_regs_pcie_read,
|
|
|
|
.write = amdgpu_debugfs_regs_pcie_write,
|
|
|
|
.llseek = default_llseek
|
|
|
|
};
|
|
|
|
static const struct file_operations amdgpu_debugfs_regs_smc_fops = {
|
|
|
|
.owner = THIS_MODULE,
|
|
|
|
.read = amdgpu_debugfs_regs_smc_read,
|
|
|
|
.write = amdgpu_debugfs_regs_smc_write,
|
|
|
|
.llseek = default_llseek
|
|
|
|
};
|
|
|
|
|
2016-06-27 21:57:18 +08:00
|
|
|
static const struct file_operations amdgpu_debugfs_gca_config_fops = {
|
|
|
|
.owner = THIS_MODULE,
|
|
|
|
.read = amdgpu_debugfs_gca_config_read,
|
|
|
|
.llseek = default_llseek
|
|
|
|
};
|
|
|
|
|
2016-09-15 22:08:44 +08:00
|
|
|
static const struct file_operations amdgpu_debugfs_sensors_fops = {
|
|
|
|
.owner = THIS_MODULE,
|
|
|
|
.read = amdgpu_debugfs_sensor_read,
|
|
|
|
.llseek = default_llseek
|
|
|
|
};
|
|
|
|
|
2016-10-12 02:48:55 +08:00
|
|
|
static const struct file_operations amdgpu_debugfs_wave_fops = {
|
|
|
|
.owner = THIS_MODULE,
|
|
|
|
.read = amdgpu_debugfs_wave_read,
|
|
|
|
.llseek = default_llseek
|
|
|
|
};
|
2016-12-06 00:39:19 +08:00
|
|
|
static const struct file_operations amdgpu_debugfs_gpr_fops = {
|
|
|
|
.owner = THIS_MODULE,
|
|
|
|
.read = amdgpu_debugfs_gpr_read,
|
|
|
|
.llseek = default_llseek
|
|
|
|
};
|
2016-10-12 02:48:55 +08:00
|
|
|
|
2016-04-16 01:08:44 +08:00
|
|
|
static const struct file_operations *debugfs_regs[] = {
|
|
|
|
&amdgpu_debugfs_regs_fops,
|
|
|
|
&amdgpu_debugfs_regs_didt_fops,
|
|
|
|
&amdgpu_debugfs_regs_pcie_fops,
|
|
|
|
&amdgpu_debugfs_regs_smc_fops,
|
2016-06-27 21:57:18 +08:00
|
|
|
&amdgpu_debugfs_gca_config_fops,
|
2016-09-15 22:08:44 +08:00
|
|
|
&amdgpu_debugfs_sensors_fops,
|
2016-10-12 02:48:55 +08:00
|
|
|
&amdgpu_debugfs_wave_fops,
|
2016-12-06 00:39:19 +08:00
|
|
|
&amdgpu_debugfs_gpr_fops,
|
2016-04-16 01:08:44 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
static const char *debugfs_regs_names[] = {
|
|
|
|
"amdgpu_regs",
|
|
|
|
"amdgpu_regs_didt",
|
|
|
|
"amdgpu_regs_pcie",
|
|
|
|
"amdgpu_regs_smc",
|
2016-06-27 21:57:18 +08:00
|
|
|
"amdgpu_gca_config",
|
2016-09-15 22:08:44 +08:00
|
|
|
"amdgpu_sensors",
|
2016-10-12 02:48:55 +08:00
|
|
|
"amdgpu_wave",
|
2016-12-06 00:39:19 +08:00
|
|
|
"amdgpu_gpr",
|
2016-04-16 01:08:44 +08:00
|
|
|
};
|
2015-04-21 04:55:21 +08:00
|
|
|
|
|
|
|
static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
|
|
|
|
{
|
|
|
|
struct drm_minor *minor = adev->ddev->primary;
|
|
|
|
struct dentry *ent, *root = minor->debugfs_root;
|
2016-04-16 01:08:44 +08:00
|
|
|
unsigned i, j;
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
|
|
|
|
ent = debugfs_create_file(debugfs_regs_names[i],
|
|
|
|
S_IFREG | S_IRUGO, root,
|
|
|
|
adev, debugfs_regs[i]);
|
|
|
|
if (IS_ERR(ent)) {
|
|
|
|
for (j = 0; j < i; j++) {
|
|
|
|
debugfs_remove(adev->debugfs_regs[i]);
|
|
|
|
adev->debugfs_regs[i] = NULL;
|
|
|
|
}
|
|
|
|
return PTR_ERR(ent);
|
|
|
|
}
|
2015-04-21 04:55:21 +08:00
|
|
|
|
2016-04-16 01:08:44 +08:00
|
|
|
if (!i)
|
|
|
|
i_size_write(ent->d_inode, adev->rmmio_size);
|
|
|
|
adev->debugfs_regs[i] = ent;
|
|
|
|
}
|
2015-04-21 04:55:21 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev)
|
|
|
|
{
|
2016-04-16 01:08:44 +08:00
|
|
|
unsigned i;
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
|
|
|
|
if (adev->debugfs_regs[i]) {
|
|
|
|
debugfs_remove(adev->debugfs_regs[i]);
|
|
|
|
adev->debugfs_regs[i] = NULL;
|
|
|
|
}
|
|
|
|
}
|
2015-04-21 04:55:21 +08:00
|
|
|
}
|
|
|
|
|
2017-05-10 23:04:06 +08:00
|
|
|
static int amdgpu_debugfs_test_ib(struct seq_file *m, void *data)
|
|
|
|
{
|
|
|
|
struct drm_info_node *node = (struct drm_info_node *) m->private;
|
|
|
|
struct drm_device *dev = node->minor->dev;
|
|
|
|
struct amdgpu_device *adev = dev->dev_private;
|
|
|
|
int r = 0, i;
|
|
|
|
|
|
|
|
/* hold on the scheduler */
|
|
|
|
for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
|
|
|
|
struct amdgpu_ring *ring = adev->rings[i];
|
|
|
|
|
|
|
|
if (!ring || !ring->sched.thread)
|
|
|
|
continue;
|
|
|
|
kthread_park(ring->sched.thread);
|
|
|
|
}
|
|
|
|
|
|
|
|
seq_printf(m, "run ib test:\n");
|
|
|
|
r = amdgpu_ib_ring_tests(adev);
|
|
|
|
if (r)
|
|
|
|
seq_printf(m, "ib ring tests failed (%d).\n", r);
|
|
|
|
else
|
|
|
|
seq_printf(m, "ib ring tests passed.\n");
|
|
|
|
|
|
|
|
/* go on the scheduler */
|
|
|
|
for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
|
|
|
|
struct amdgpu_ring *ring = adev->rings[i];
|
|
|
|
|
|
|
|
if (!ring || !ring->sched.thread)
|
|
|
|
continue;
|
|
|
|
kthread_unpark(ring->sched.thread);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct drm_info_list amdgpu_debugfs_test_ib_ring_list[] = {
|
|
|
|
{"amdgpu_test_ib", &amdgpu_debugfs_test_ib}
|
|
|
|
};
|
|
|
|
|
|
|
|
static int amdgpu_debugfs_test_ib_ring_init(struct amdgpu_device *adev)
|
|
|
|
{
|
|
|
|
return amdgpu_debugfs_add_files(adev,
|
|
|
|
amdgpu_debugfs_test_ib_ring_list, 1);
|
|
|
|
}
|
|
|
|
|
2015-04-21 04:55:21 +08:00
|
|
|
int amdgpu_debugfs_init(struct drm_minor *minor)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
2017-08-23 00:31:43 +08:00
|
|
|
|
|
|
|
static int amdgpu_debugfs_get_vbios_dump(struct seq_file *m, void *data)
|
|
|
|
{
|
|
|
|
struct drm_info_node *node = (struct drm_info_node *) m->private;
|
|
|
|
struct drm_device *dev = node->minor->dev;
|
|
|
|
struct amdgpu_device *adev = dev->dev_private;
|
|
|
|
|
|
|
|
seq_write(m, adev->bios, adev->bios_size);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct drm_info_list amdgpu_vbios_dump_list[] = {
|
|
|
|
{"amdgpu_vbios",
|
|
|
|
amdgpu_debugfs_get_vbios_dump,
|
|
|
|
0, NULL},
|
|
|
|
};
|
|
|
|
|
|
|
|
static int amdgpu_debugfs_vbios_dump_init(struct amdgpu_device *adev)
|
|
|
|
{
|
|
|
|
return amdgpu_debugfs_add_files(adev,
|
|
|
|
amdgpu_vbios_dump_list, 1);
|
|
|
|
}
|
2015-06-27 15:16:05 +08:00
|
|
|
#else
|
2017-06-22 05:51:02 +08:00
|
|
|
static int amdgpu_debugfs_test_ib_ring_init(struct amdgpu_device *adev)
|
2017-05-10 23:04:06 +08:00
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
2015-06-27 15:16:05 +08:00
|
|
|
static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
2017-08-23 00:31:43 +08:00
|
|
|
static int amdgpu_debugfs_vbios_dump_init(struct amdgpu_device *adev)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
2015-06-27 15:16:05 +08:00
|
|
|
static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev) { }
|
2015-04-21 04:55:21 +08:00
|
|
|
#endif
|