Merge branch 'drm-next-4.10' of git://people.freedesktop.org/~agd5f/linux into drm-next
First new feature pull for 4.10. Highlights: - Support for multple virtual displays in the virtual dce component - New VM mgr to support non-contiguous vram buffers - Support for UVD powergating on additional asics - Power management improvements - lots of code cleanup and bug fixes * 'drm-next-4.10' of git://people.freedesktop.org/~agd5f/linux: (107 commits) drm/amdgpu: turn on/off uvd clock when dpm enable/disable on CI drm/amdgpu: disable dpm before turn off clock when vce idle. drm/amdgpu: enable uvd bypass mode for CI/VI. drm/amdgpu: just not load smc firmware if smu is already running drm/amdgpu: when suspend, set boot state instand of disable dpm. drm/amdgpu: use failed label to handle context init failure drm/amdgpu: add amdgpu_ttm_bo_eviction_valuable callback drm/ttm: make eviction decision a driver callback v2 drm/ttm: fix coding style in ttm_bo_driver.h drm/radeon/pm: autoswitch power state when in balanced mode drm/amd/powerplay: fix spelling mistake and add KERN_WARNING to printks drm/amdgpu:new ids flag for preempt drm/amdgpu: mark symbols static where possible drm/amdgpu: change function declarations and add missing header dependencies drm/amdgpu: s/amdgpuCrtc/amdgpu_crtc/ in pageflip code drm/amdgpu/atom: remove a bunch of unused functions drm/amdgpu: consolidate atom scratch reg handling for hangs drm/amdgpu: use amdgpu_bo_[create|free]_kernel for wb drm/amdgpu: add VCE VM session tracking drm/amdgpu: improve parse_cs handling a bit ...
This commit is contained in:
commit
a1873c6271
|
@ -24,7 +24,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
|
|||
atombios_encoders.o amdgpu_sa.o atombios_i2c.o \
|
||||
amdgpu_prime.o amdgpu_vm.o amdgpu_ib.o amdgpu_pll.o \
|
||||
amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o \
|
||||
amdgpu_gtt_mgr.o
|
||||
amdgpu_gtt_mgr.o amdgpu_vram_mgr.o
|
||||
|
||||
# add asic specific block
|
||||
amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \
|
||||
|
|
|
@ -90,7 +90,6 @@
|
|||
#define ENCODER_OBJECT_ID_INTERNAL_VCE 0x24
|
||||
#define ENCODER_OBJECT_ID_INTERNAL_UNIPHY3 0x25
|
||||
#define ENCODER_OBJECT_ID_INTERNAL_AMCLK 0x27
|
||||
#define ENCODER_OBJECT_ID_VIRTUAL 0x28
|
||||
|
||||
#define ENCODER_OBJECT_ID_GENERAL_EXTERNAL_DVO 0xFF
|
||||
|
||||
|
@ -120,7 +119,6 @@
|
|||
#define CONNECTOR_OBJECT_ID_eDP 0x14
|
||||
#define CONNECTOR_OBJECT_ID_MXM 0x15
|
||||
#define CONNECTOR_OBJECT_ID_LVDS_eDP 0x16
|
||||
#define CONNECTOR_OBJECT_ID_VIRTUAL 0x17
|
||||
|
||||
/* deleted */
|
||||
|
||||
|
@ -149,7 +147,6 @@
|
|||
#define GRAPH_OBJECT_ENUM_ID5 0x05
|
||||
#define GRAPH_OBJECT_ENUM_ID6 0x06
|
||||
#define GRAPH_OBJECT_ENUM_ID7 0x07
|
||||
#define GRAPH_OBJECT_ENUM_VIRTUAL 0x08
|
||||
|
||||
/****************************************************/
|
||||
/* Graphics Object ID Bit definition */
|
||||
|
@ -411,10 +408,6 @@
|
|||
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
|
||||
ENCODER_OBJECT_ID_HDMI_ANX9805 << OBJECT_ID_SHIFT)
|
||||
|
||||
#define ENCODER_VIRTUAL_ENUM_VIRTUAL ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
|
||||
GRAPH_OBJECT_ENUM_VIRTUAL << ENUM_ID_SHIFT |\
|
||||
ENCODER_OBJECT_ID_VIRTUAL << OBJECT_ID_SHIFT)
|
||||
|
||||
/****************************************************/
|
||||
/* Connector Object ID definition - Shared with BIOS */
|
||||
/****************************************************/
|
||||
|
|
|
@ -53,7 +53,11 @@
|
|||
#include "amdgpu_ucode.h"
|
||||
#include "amdgpu_ttm.h"
|
||||
#include "amdgpu_gds.h"
|
||||
#include "amdgpu_sync.h"
|
||||
#include "amdgpu_ring.h"
|
||||
#include "amdgpu_vm.h"
|
||||
#include "amd_powerplay.h"
|
||||
#include "amdgpu_dpm.h"
|
||||
#include "amdgpu_acp.h"
|
||||
|
||||
#include "gpu_scheduler.h"
|
||||
|
@ -97,6 +101,7 @@ extern char *amdgpu_disable_cu;
|
|||
extern int amdgpu_sclk_deep_sleep_en;
|
||||
extern char *amdgpu_virtual_display;
|
||||
extern unsigned amdgpu_pp_feature_mask;
|
||||
extern int amdgpu_vram_page_split;
|
||||
|
||||
#define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000
|
||||
#define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */
|
||||
|
@ -107,12 +112,6 @@ extern unsigned amdgpu_pp_feature_mask;
|
|||
#define AMDGPUFB_CONN_LIMIT 4
|
||||
#define AMDGPU_BIOS_NUM_SCRATCH 8
|
||||
|
||||
/* max number of rings */
|
||||
#define AMDGPU_MAX_RINGS 16
|
||||
#define AMDGPU_MAX_GFX_RINGS 1
|
||||
#define AMDGPU_MAX_COMPUTE_RINGS 8
|
||||
#define AMDGPU_MAX_VCE_RINGS 3
|
||||
|
||||
/* max number of IP instances */
|
||||
#define AMDGPU_MAX_SDMA_INSTANCES 2
|
||||
|
||||
|
@ -152,8 +151,6 @@ extern unsigned amdgpu_pp_feature_mask;
|
|||
|
||||
struct amdgpu_device;
|
||||
struct amdgpu_ib;
|
||||
struct amdgpu_vm;
|
||||
struct amdgpu_ring;
|
||||
struct amdgpu_cs_parser;
|
||||
struct amdgpu_job;
|
||||
struct amdgpu_irq_src;
|
||||
|
@ -198,22 +195,39 @@ int amdgpu_wait_for_idle(struct amdgpu_device *adev,
|
|||
bool amdgpu_is_idle(struct amdgpu_device *adev,
|
||||
enum amd_ip_block_type block_type);
|
||||
|
||||
#define AMDGPU_MAX_IP_NUM 16
|
||||
|
||||
struct amdgpu_ip_block_status {
|
||||
bool valid;
|
||||
bool sw;
|
||||
bool hw;
|
||||
bool late_initialized;
|
||||
bool hang;
|
||||
};
|
||||
|
||||
struct amdgpu_ip_block_version {
|
||||
enum amd_ip_block_type type;
|
||||
u32 major;
|
||||
u32 minor;
|
||||
u32 rev;
|
||||
const enum amd_ip_block_type type;
|
||||
const u32 major;
|
||||
const u32 minor;
|
||||
const u32 rev;
|
||||
const struct amd_ip_funcs *funcs;
|
||||
};
|
||||
|
||||
struct amdgpu_ip_block {
|
||||
struct amdgpu_ip_block_status status;
|
||||
const struct amdgpu_ip_block_version *version;
|
||||
};
|
||||
|
||||
int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev,
|
||||
enum amd_ip_block_type type,
|
||||
u32 major, u32 minor);
|
||||
|
||||
const struct amdgpu_ip_block_version * amdgpu_get_ip_block(
|
||||
struct amdgpu_device *adev,
|
||||
struct amdgpu_ip_block * amdgpu_get_ip_block(struct amdgpu_device *adev,
|
||||
enum amd_ip_block_type type);
|
||||
|
||||
int amdgpu_ip_block_add(struct amdgpu_device *adev,
|
||||
const struct amdgpu_ip_block_version *ip_block_version);
|
||||
|
||||
/* provided by hw blocks that can move/clear data. e.g., gfx or sdma */
|
||||
struct amdgpu_buffer_funcs {
|
||||
/* maximum bytes in a single operation */
|
||||
|
@ -286,47 +300,6 @@ struct amdgpu_ih_funcs {
|
|||
void (*set_rptr)(struct amdgpu_device *adev);
|
||||
};
|
||||
|
||||
/* provided by hw blocks that expose a ring buffer for commands */
|
||||
struct amdgpu_ring_funcs {
|
||||
/* ring read/write ptr handling */
|
||||
u32 (*get_rptr)(struct amdgpu_ring *ring);
|
||||
u32 (*get_wptr)(struct amdgpu_ring *ring);
|
||||
void (*set_wptr)(struct amdgpu_ring *ring);
|
||||
/* validating and patching of IBs */
|
||||
int (*parse_cs)(struct amdgpu_cs_parser *p, uint32_t ib_idx);
|
||||
/* command emit functions */
|
||||
void (*emit_ib)(struct amdgpu_ring *ring,
|
||||
struct amdgpu_ib *ib,
|
||||
unsigned vm_id, bool ctx_switch);
|
||||
void (*emit_fence)(struct amdgpu_ring *ring, uint64_t addr,
|
||||
uint64_t seq, unsigned flags);
|
||||
void (*emit_pipeline_sync)(struct amdgpu_ring *ring);
|
||||
void (*emit_vm_flush)(struct amdgpu_ring *ring, unsigned vm_id,
|
||||
uint64_t pd_addr);
|
||||
void (*emit_hdp_flush)(struct amdgpu_ring *ring);
|
||||
void (*emit_hdp_invalidate)(struct amdgpu_ring *ring);
|
||||
void (*emit_gds_switch)(struct amdgpu_ring *ring, uint32_t vmid,
|
||||
uint32_t gds_base, uint32_t gds_size,
|
||||
uint32_t gws_base, uint32_t gws_size,
|
||||
uint32_t oa_base, uint32_t oa_size);
|
||||
/* testing functions */
|
||||
int (*test_ring)(struct amdgpu_ring *ring);
|
||||
int (*test_ib)(struct amdgpu_ring *ring, long timeout);
|
||||
/* insert NOP packets */
|
||||
void (*insert_nop)(struct amdgpu_ring *ring, uint32_t count);
|
||||
/* pad the indirect buffer to the necessary number of dw */
|
||||
void (*pad_ib)(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
|
||||
unsigned (*init_cond_exec)(struct amdgpu_ring *ring);
|
||||
void (*patch_cond_exec)(struct amdgpu_ring *ring, unsigned offset);
|
||||
/* note usage for clock and power gating */
|
||||
void (*begin_use)(struct amdgpu_ring *ring);
|
||||
void (*end_use)(struct amdgpu_ring *ring);
|
||||
void (*emit_switch_buffer) (struct amdgpu_ring *ring);
|
||||
void (*emit_cntxcntl) (struct amdgpu_ring *ring, uint32_t flags);
|
||||
unsigned (*get_emit_ib_size) (struct amdgpu_ring *ring);
|
||||
unsigned (*get_dma_frame_size) (struct amdgpu_ring *ring);
|
||||
};
|
||||
|
||||
/*
|
||||
* BIOS.
|
||||
*/
|
||||
|
@ -363,47 +336,6 @@ struct amdgpu_clock {
|
|||
uint32_t max_pixel_clock;
|
||||
};
|
||||
|
||||
/*
|
||||
* Fences.
|
||||
*/
|
||||
struct amdgpu_fence_driver {
|
||||
uint64_t gpu_addr;
|
||||
volatile uint32_t *cpu_addr;
|
||||
/* sync_seq is protected by ring emission lock */
|
||||
uint32_t sync_seq;
|
||||
atomic_t last_seq;
|
||||
bool initialized;
|
||||
struct amdgpu_irq_src *irq_src;
|
||||
unsigned irq_type;
|
||||
struct timer_list fallback_timer;
|
||||
unsigned num_fences_mask;
|
||||
spinlock_t lock;
|
||||
struct fence **fences;
|
||||
};
|
||||
|
||||
/* some special values for the owner field */
|
||||
#define AMDGPU_FENCE_OWNER_UNDEFINED ((void*)0ul)
|
||||
#define AMDGPU_FENCE_OWNER_VM ((void*)1ul)
|
||||
|
||||
#define AMDGPU_FENCE_FLAG_64BIT (1 << 0)
|
||||
#define AMDGPU_FENCE_FLAG_INT (1 << 1)
|
||||
|
||||
int amdgpu_fence_driver_init(struct amdgpu_device *adev);
|
||||
void amdgpu_fence_driver_fini(struct amdgpu_device *adev);
|
||||
void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev);
|
||||
|
||||
int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
|
||||
unsigned num_hw_submission);
|
||||
int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
|
||||
struct amdgpu_irq_src *irq_src,
|
||||
unsigned irq_type);
|
||||
void amdgpu_fence_driver_suspend(struct amdgpu_device *adev);
|
||||
void amdgpu_fence_driver_resume(struct amdgpu_device *adev);
|
||||
int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **fence);
|
||||
void amdgpu_fence_process(struct amdgpu_ring *ring);
|
||||
int amdgpu_fence_wait_empty(struct amdgpu_ring *ring);
|
||||
unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring);
|
||||
|
||||
/*
|
||||
* BO.
|
||||
*/
|
||||
|
@ -464,7 +396,6 @@ struct amdgpu_bo {
|
|||
*/
|
||||
struct list_head va;
|
||||
/* Constant after initialization */
|
||||
struct amdgpu_device *adev;
|
||||
struct drm_gem_object gem_base;
|
||||
struct amdgpu_bo *parent;
|
||||
struct amdgpu_bo *shadow;
|
||||
|
@ -561,27 +492,6 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv,
|
|||
int amdgpu_mode_dumb_mmap(struct drm_file *filp,
|
||||
struct drm_device *dev,
|
||||
uint32_t handle, uint64_t *offset_p);
|
||||
/*
|
||||
* Synchronization
|
||||
*/
|
||||
struct amdgpu_sync {
|
||||
DECLARE_HASHTABLE(fences, 4);
|
||||
struct fence *last_vm_update;
|
||||
};
|
||||
|
||||
void amdgpu_sync_create(struct amdgpu_sync *sync);
|
||||
int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
|
||||
struct fence *f);
|
||||
int amdgpu_sync_resv(struct amdgpu_device *adev,
|
||||
struct amdgpu_sync *sync,
|
||||
struct reservation_object *resv,
|
||||
void *owner);
|
||||
struct fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
|
||||
struct amdgpu_ring *ring);
|
||||
struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync);
|
||||
void amdgpu_sync_free(struct amdgpu_sync *sync);
|
||||
int amdgpu_sync_init(void);
|
||||
void amdgpu_sync_fini(void);
|
||||
int amdgpu_fence_slab_init(void);
|
||||
void amdgpu_fence_slab_fini(void);
|
||||
|
||||
|
@ -723,14 +633,6 @@ struct amdgpu_ib {
|
|||
uint32_t flags;
|
||||
};
|
||||
|
||||
enum amdgpu_ring_type {
|
||||
AMDGPU_RING_TYPE_GFX,
|
||||
AMDGPU_RING_TYPE_COMPUTE,
|
||||
AMDGPU_RING_TYPE_SDMA,
|
||||
AMDGPU_RING_TYPE_UVD,
|
||||
AMDGPU_RING_TYPE_VCE
|
||||
};
|
||||
|
||||
extern const struct amd_sched_backend_ops amdgpu_sched_ops;
|
||||
|
||||
int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
|
||||
|
@ -744,213 +646,6 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
|
|||
struct amd_sched_entity *entity, void *owner,
|
||||
struct fence **f);
|
||||
|
||||
struct amdgpu_ring {
|
||||
struct amdgpu_device *adev;
|
||||
const struct amdgpu_ring_funcs *funcs;
|
||||
struct amdgpu_fence_driver fence_drv;
|
||||
struct amd_gpu_scheduler sched;
|
||||
|
||||
struct amdgpu_bo *ring_obj;
|
||||
volatile uint32_t *ring;
|
||||
unsigned rptr_offs;
|
||||
unsigned wptr;
|
||||
unsigned wptr_old;
|
||||
unsigned ring_size;
|
||||
unsigned max_dw;
|
||||
int count_dw;
|
||||
uint64_t gpu_addr;
|
||||
uint32_t align_mask;
|
||||
uint32_t ptr_mask;
|
||||
bool ready;
|
||||
u32 nop;
|
||||
u32 idx;
|
||||
u32 me;
|
||||
u32 pipe;
|
||||
u32 queue;
|
||||
struct amdgpu_bo *mqd_obj;
|
||||
u32 doorbell_index;
|
||||
bool use_doorbell;
|
||||
unsigned wptr_offs;
|
||||
unsigned fence_offs;
|
||||
uint64_t current_ctx;
|
||||
enum amdgpu_ring_type type;
|
||||
char name[16];
|
||||
unsigned cond_exe_offs;
|
||||
u64 cond_exe_gpu_addr;
|
||||
volatile u32 *cond_exe_cpu_addr;
|
||||
#if defined(CONFIG_DEBUG_FS)
|
||||
struct dentry *ent;
|
||||
#endif
|
||||
};
|
||||
|
||||
/*
|
||||
* VM
|
||||
*/
|
||||
|
||||
/* maximum number of VMIDs */
|
||||
#define AMDGPU_NUM_VM 16
|
||||
|
||||
/* Maximum number of PTEs the hardware can write with one command */
|
||||
#define AMDGPU_VM_MAX_UPDATE_SIZE 0x3FFFF
|
||||
|
||||
/* number of entries in page table */
|
||||
#define AMDGPU_VM_PTE_COUNT (1 << amdgpu_vm_block_size)
|
||||
|
||||
/* PTBs (Page Table Blocks) need to be aligned to 32K */
|
||||
#define AMDGPU_VM_PTB_ALIGN_SIZE 32768
|
||||
|
||||
/* LOG2 number of continuous pages for the fragment field */
|
||||
#define AMDGPU_LOG2_PAGES_PER_FRAG 4
|
||||
|
||||
#define AMDGPU_PTE_VALID (1 << 0)
|
||||
#define AMDGPU_PTE_SYSTEM (1 << 1)
|
||||
#define AMDGPU_PTE_SNOOPED (1 << 2)
|
||||
|
||||
/* VI only */
|
||||
#define AMDGPU_PTE_EXECUTABLE (1 << 4)
|
||||
|
||||
#define AMDGPU_PTE_READABLE (1 << 5)
|
||||
#define AMDGPU_PTE_WRITEABLE (1 << 6)
|
||||
|
||||
#define AMDGPU_PTE_FRAG(x) ((x & 0x1f) << 7)
|
||||
|
||||
/* How to programm VM fault handling */
|
||||
#define AMDGPU_VM_FAULT_STOP_NEVER 0
|
||||
#define AMDGPU_VM_FAULT_STOP_FIRST 1
|
||||
#define AMDGPU_VM_FAULT_STOP_ALWAYS 2
|
||||
|
||||
struct amdgpu_vm_pt {
|
||||
struct amdgpu_bo_list_entry entry;
|
||||
uint64_t addr;
|
||||
uint64_t shadow_addr;
|
||||
};
|
||||
|
||||
struct amdgpu_vm {
|
||||
/* tree of virtual addresses mapped */
|
||||
struct rb_root va;
|
||||
|
||||
/* protecting invalidated */
|
||||
spinlock_t status_lock;
|
||||
|
||||
/* BOs moved, but not yet updated in the PT */
|
||||
struct list_head invalidated;
|
||||
|
||||
/* BOs cleared in the PT because of a move */
|
||||
struct list_head cleared;
|
||||
|
||||
/* BO mappings freed, but not yet updated in the PT */
|
||||
struct list_head freed;
|
||||
|
||||
/* contains the page directory */
|
||||
struct amdgpu_bo *page_directory;
|
||||
unsigned max_pde_used;
|
||||
struct fence *page_directory_fence;
|
||||
uint64_t last_eviction_counter;
|
||||
|
||||
/* array of page tables, one for each page directory entry */
|
||||
struct amdgpu_vm_pt *page_tables;
|
||||
|
||||
/* for id and flush management per ring */
|
||||
struct amdgpu_vm_id *ids[AMDGPU_MAX_RINGS];
|
||||
|
||||
/* protecting freed */
|
||||
spinlock_t freed_lock;
|
||||
|
||||
/* Scheduler entity for page table updates */
|
||||
struct amd_sched_entity entity;
|
||||
|
||||
/* client id */
|
||||
u64 client_id;
|
||||
};
|
||||
|
||||
struct amdgpu_vm_id {
|
||||
struct list_head list;
|
||||
struct fence *first;
|
||||
struct amdgpu_sync active;
|
||||
struct fence *last_flush;
|
||||
atomic64_t owner;
|
||||
|
||||
uint64_t pd_gpu_addr;
|
||||
/* last flushed PD/PT update */
|
||||
struct fence *flushed_updates;
|
||||
|
||||
uint32_t current_gpu_reset_count;
|
||||
|
||||
uint32_t gds_base;
|
||||
uint32_t gds_size;
|
||||
uint32_t gws_base;
|
||||
uint32_t gws_size;
|
||||
uint32_t oa_base;
|
||||
uint32_t oa_size;
|
||||
};
|
||||
|
||||
struct amdgpu_vm_manager {
|
||||
/* Handling of VMIDs */
|
||||
struct mutex lock;
|
||||
unsigned num_ids;
|
||||
struct list_head ids_lru;
|
||||
struct amdgpu_vm_id ids[AMDGPU_NUM_VM];
|
||||
|
||||
/* Handling of VM fences */
|
||||
u64 fence_context;
|
||||
unsigned seqno[AMDGPU_MAX_RINGS];
|
||||
|
||||
uint32_t max_pfn;
|
||||
/* vram base address for page table entry */
|
||||
u64 vram_base_offset;
|
||||
/* is vm enabled? */
|
||||
bool enabled;
|
||||
/* vm pte handling */
|
||||
const struct amdgpu_vm_pte_funcs *vm_pte_funcs;
|
||||
struct amdgpu_ring *vm_pte_rings[AMDGPU_MAX_RINGS];
|
||||
unsigned vm_pte_num_rings;
|
||||
atomic_t vm_pte_next_ring;
|
||||
/* client id counter */
|
||||
atomic64_t client_counter;
|
||||
};
|
||||
|
||||
void amdgpu_vm_manager_init(struct amdgpu_device *adev);
|
||||
void amdgpu_vm_manager_fini(struct amdgpu_device *adev);
|
||||
int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm);
|
||||
void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
|
||||
void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
|
||||
struct list_head *validated,
|
||||
struct amdgpu_bo_list_entry *entry);
|
||||
void amdgpu_vm_get_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
||||
struct list_head *duplicates);
|
||||
void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
|
||||
struct amdgpu_vm *vm);
|
||||
int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
|
||||
struct amdgpu_sync *sync, struct fence *fence,
|
||||
struct amdgpu_job *job);
|
||||
int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job);
|
||||
void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id);
|
||||
int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
|
||||
struct amdgpu_vm *vm);
|
||||
int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
|
||||
struct amdgpu_vm *vm);
|
||||
int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
||||
struct amdgpu_sync *sync);
|
||||
int amdgpu_vm_bo_update(struct amdgpu_device *adev,
|
||||
struct amdgpu_bo_va *bo_va,
|
||||
bool clear);
|
||||
void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
|
||||
struct amdgpu_bo *bo);
|
||||
struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
|
||||
struct amdgpu_bo *bo);
|
||||
struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
|
||||
struct amdgpu_vm *vm,
|
||||
struct amdgpu_bo *bo);
|
||||
int amdgpu_vm_bo_map(struct amdgpu_device *adev,
|
||||
struct amdgpu_bo_va *bo_va,
|
||||
uint64_t addr, uint64_t offset,
|
||||
uint64_t size, uint32_t flags);
|
||||
int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
|
||||
struct amdgpu_bo_va *bo_va,
|
||||
uint64_t addr);
|
||||
void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
|
||||
struct amdgpu_bo_va *bo_va);
|
||||
|
||||
/*
|
||||
* context related structures
|
||||
*/
|
||||
|
@ -1093,6 +788,16 @@ struct amdgpu_scratch {
|
|||
/*
|
||||
* GFX configurations
|
||||
*/
|
||||
#define AMDGPU_GFX_MAX_SE 4
|
||||
#define AMDGPU_GFX_MAX_SH_PER_SE 2
|
||||
|
||||
struct amdgpu_rb_config {
|
||||
uint32_t rb_backend_disable;
|
||||
uint32_t user_rb_backend_disable;
|
||||
uint32_t raster_config;
|
||||
uint32_t raster_config_1;
|
||||
};
|
||||
|
||||
struct amdgpu_gca_config {
|
||||
unsigned max_shader_engines;
|
||||
unsigned max_tile_pipes;
|
||||
|
@ -1121,6 +826,8 @@ struct amdgpu_gca_config {
|
|||
|
||||
uint32_t tile_mode_array[32];
|
||||
uint32_t macrotile_mode_array[16];
|
||||
|
||||
struct amdgpu_rb_config rb_config[AMDGPU_GFX_MAX_SE][AMDGPU_GFX_MAX_SH_PER_SE];
|
||||
};
|
||||
|
||||
struct amdgpu_cu_info {
|
||||
|
@ -1133,6 +840,7 @@ struct amdgpu_gfx_funcs {
|
|||
/* get the gpu clock counter */
|
||||
uint64_t (*get_gpu_clock_counter)(struct amdgpu_device *adev);
|
||||
void (*select_se_sh)(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance);
|
||||
void (*read_wave_data)(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields);
|
||||
};
|
||||
|
||||
struct amdgpu_gfx {
|
||||
|
@ -1188,16 +896,6 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
|
|||
int amdgpu_ib_pool_init(struct amdgpu_device *adev);
|
||||
void amdgpu_ib_pool_fini(struct amdgpu_device *adev);
|
||||
int amdgpu_ib_ring_tests(struct amdgpu_device *adev);
|
||||
int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw);
|
||||
void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count);
|
||||
void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
|
||||
void amdgpu_ring_commit(struct amdgpu_ring *ring);
|
||||
void amdgpu_ring_undo(struct amdgpu_ring *ring);
|
||||
int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
|
||||
unsigned ring_size, u32 nop, u32 align_mask,
|
||||
struct amdgpu_irq_src *irq_src, unsigned irq_type,
|
||||
enum amdgpu_ring_type ring_type);
|
||||
void amdgpu_ring_fini(struct amdgpu_ring *ring);
|
||||
|
||||
/*
|
||||
* CS.
|
||||
|
@ -1294,354 +992,6 @@ struct amdgpu_wb {
|
|||
int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb);
|
||||
void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb);
|
||||
|
||||
|
||||
|
||||
enum amdgpu_int_thermal_type {
|
||||
THERMAL_TYPE_NONE,
|
||||
THERMAL_TYPE_EXTERNAL,
|
||||
THERMAL_TYPE_EXTERNAL_GPIO,
|
||||
THERMAL_TYPE_RV6XX,
|
||||
THERMAL_TYPE_RV770,
|
||||
THERMAL_TYPE_ADT7473_WITH_INTERNAL,
|
||||
THERMAL_TYPE_EVERGREEN,
|
||||
THERMAL_TYPE_SUMO,
|
||||
THERMAL_TYPE_NI,
|
||||
THERMAL_TYPE_SI,
|
||||
THERMAL_TYPE_EMC2103_WITH_INTERNAL,
|
||||
THERMAL_TYPE_CI,
|
||||
THERMAL_TYPE_KV,
|
||||
};
|
||||
|
||||
enum amdgpu_dpm_auto_throttle_src {
|
||||
AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL,
|
||||
AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL
|
||||
};
|
||||
|
||||
enum amdgpu_dpm_event_src {
|
||||
AMDGPU_DPM_EVENT_SRC_ANALOG = 0,
|
||||
AMDGPU_DPM_EVENT_SRC_EXTERNAL = 1,
|
||||
AMDGPU_DPM_EVENT_SRC_DIGITAL = 2,
|
||||
AMDGPU_DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3,
|
||||
AMDGPU_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL = 4
|
||||
};
|
||||
|
||||
#define AMDGPU_MAX_VCE_LEVELS 6
|
||||
|
||||
enum amdgpu_vce_level {
|
||||
AMDGPU_VCE_LEVEL_AC_ALL = 0, /* AC, All cases */
|
||||
AMDGPU_VCE_LEVEL_DC_EE = 1, /* DC, entropy encoding */
|
||||
AMDGPU_VCE_LEVEL_DC_LL_LOW = 2, /* DC, low latency queue, res <= 720 */
|
||||
AMDGPU_VCE_LEVEL_DC_LL_HIGH = 3, /* DC, low latency queue, 1080 >= res > 720 */
|
||||
AMDGPU_VCE_LEVEL_DC_GP_LOW = 4, /* DC, general purpose queue, res <= 720 */
|
||||
AMDGPU_VCE_LEVEL_DC_GP_HIGH = 5, /* DC, general purpose queue, 1080 >= res > 720 */
|
||||
};
|
||||
|
||||
struct amdgpu_ps {
|
||||
u32 caps; /* vbios flags */
|
||||
u32 class; /* vbios flags */
|
||||
u32 class2; /* vbios flags */
|
||||
/* UVD clocks */
|
||||
u32 vclk;
|
||||
u32 dclk;
|
||||
/* VCE clocks */
|
||||
u32 evclk;
|
||||
u32 ecclk;
|
||||
bool vce_active;
|
||||
enum amdgpu_vce_level vce_level;
|
||||
/* asic priv */
|
||||
void *ps_priv;
|
||||
};
|
||||
|
||||
struct amdgpu_dpm_thermal {
|
||||
/* thermal interrupt work */
|
||||
struct work_struct work;
|
||||
/* low temperature threshold */
|
||||
int min_temp;
|
||||
/* high temperature threshold */
|
||||
int max_temp;
|
||||
/* was last interrupt low to high or high to low */
|
||||
bool high_to_low;
|
||||
/* interrupt source */
|
||||
struct amdgpu_irq_src irq;
|
||||
};
|
||||
|
||||
enum amdgpu_clk_action
|
||||
{
|
||||
AMDGPU_SCLK_UP = 1,
|
||||
AMDGPU_SCLK_DOWN
|
||||
};
|
||||
|
||||
struct amdgpu_blacklist_clocks
|
||||
{
|
||||
u32 sclk;
|
||||
u32 mclk;
|
||||
enum amdgpu_clk_action action;
|
||||
};
|
||||
|
||||
struct amdgpu_clock_and_voltage_limits {
|
||||
u32 sclk;
|
||||
u32 mclk;
|
||||
u16 vddc;
|
||||
u16 vddci;
|
||||
};
|
||||
|
||||
struct amdgpu_clock_array {
|
||||
u32 count;
|
||||
u32 *values;
|
||||
};
|
||||
|
||||
struct amdgpu_clock_voltage_dependency_entry {
|
||||
u32 clk;
|
||||
u16 v;
|
||||
};
|
||||
|
||||
struct amdgpu_clock_voltage_dependency_table {
|
||||
u32 count;
|
||||
struct amdgpu_clock_voltage_dependency_entry *entries;
|
||||
};
|
||||
|
||||
union amdgpu_cac_leakage_entry {
|
||||
struct {
|
||||
u16 vddc;
|
||||
u32 leakage;
|
||||
};
|
||||
struct {
|
||||
u16 vddc1;
|
||||
u16 vddc2;
|
||||
u16 vddc3;
|
||||
};
|
||||
};
|
||||
|
||||
struct amdgpu_cac_leakage_table {
|
||||
u32 count;
|
||||
union amdgpu_cac_leakage_entry *entries;
|
||||
};
|
||||
|
||||
struct amdgpu_phase_shedding_limits_entry {
|
||||
u16 voltage;
|
||||
u32 sclk;
|
||||
u32 mclk;
|
||||
};
|
||||
|
||||
struct amdgpu_phase_shedding_limits_table {
|
||||
u32 count;
|
||||
struct amdgpu_phase_shedding_limits_entry *entries;
|
||||
};
|
||||
|
||||
struct amdgpu_uvd_clock_voltage_dependency_entry {
|
||||
u32 vclk;
|
||||
u32 dclk;
|
||||
u16 v;
|
||||
};
|
||||
|
||||
struct amdgpu_uvd_clock_voltage_dependency_table {
|
||||
u8 count;
|
||||
struct amdgpu_uvd_clock_voltage_dependency_entry *entries;
|
||||
};
|
||||
|
||||
struct amdgpu_vce_clock_voltage_dependency_entry {
|
||||
u32 ecclk;
|
||||
u32 evclk;
|
||||
u16 v;
|
||||
};
|
||||
|
||||
struct amdgpu_vce_clock_voltage_dependency_table {
|
||||
u8 count;
|
||||
struct amdgpu_vce_clock_voltage_dependency_entry *entries;
|
||||
};
|
||||
|
||||
struct amdgpu_ppm_table {
|
||||
u8 ppm_design;
|
||||
u16 cpu_core_number;
|
||||
u32 platform_tdp;
|
||||
u32 small_ac_platform_tdp;
|
||||
u32 platform_tdc;
|
||||
u32 small_ac_platform_tdc;
|
||||
u32 apu_tdp;
|
||||
u32 dgpu_tdp;
|
||||
u32 dgpu_ulv_power;
|
||||
u32 tj_max;
|
||||
};
|
||||
|
||||
struct amdgpu_cac_tdp_table {
|
||||
u16 tdp;
|
||||
u16 configurable_tdp;
|
||||
u16 tdc;
|
||||
u16 battery_power_limit;
|
||||
u16 small_power_limit;
|
||||
u16 low_cac_leakage;
|
||||
u16 high_cac_leakage;
|
||||
u16 maximum_power_delivery_limit;
|
||||
};
|
||||
|
||||
struct amdgpu_dpm_dynamic_state {
|
||||
struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_sclk;
|
||||
struct amdgpu_clock_voltage_dependency_table vddci_dependency_on_mclk;
|
||||
struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_mclk;
|
||||
struct amdgpu_clock_voltage_dependency_table mvdd_dependency_on_mclk;
|
||||
struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_dispclk;
|
||||
struct amdgpu_uvd_clock_voltage_dependency_table uvd_clock_voltage_dependency_table;
|
||||
struct amdgpu_vce_clock_voltage_dependency_table vce_clock_voltage_dependency_table;
|
||||
struct amdgpu_clock_voltage_dependency_table samu_clock_voltage_dependency_table;
|
||||
struct amdgpu_clock_voltage_dependency_table acp_clock_voltage_dependency_table;
|
||||
struct amdgpu_clock_voltage_dependency_table vddgfx_dependency_on_sclk;
|
||||
struct amdgpu_clock_array valid_sclk_values;
|
||||
struct amdgpu_clock_array valid_mclk_values;
|
||||
struct amdgpu_clock_and_voltage_limits max_clock_voltage_on_dc;
|
||||
struct amdgpu_clock_and_voltage_limits max_clock_voltage_on_ac;
|
||||
u32 mclk_sclk_ratio;
|
||||
u32 sclk_mclk_delta;
|
||||
u16 vddc_vddci_delta;
|
||||
u16 min_vddc_for_pcie_gen2;
|
||||
struct amdgpu_cac_leakage_table cac_leakage_table;
|
||||
struct amdgpu_phase_shedding_limits_table phase_shedding_limits_table;
|
||||
struct amdgpu_ppm_table *ppm_table;
|
||||
struct amdgpu_cac_tdp_table *cac_tdp_table;
|
||||
};
|
||||
|
||||
struct amdgpu_dpm_fan {
|
||||
u16 t_min;
|
||||
u16 t_med;
|
||||
u16 t_high;
|
||||
u16 pwm_min;
|
||||
u16 pwm_med;
|
||||
u16 pwm_high;
|
||||
u8 t_hyst;
|
||||
u32 cycle_delay;
|
||||
u16 t_max;
|
||||
u8 control_mode;
|
||||
u16 default_max_fan_pwm;
|
||||
u16 default_fan_output_sensitivity;
|
||||
u16 fan_output_sensitivity;
|
||||
bool ucode_fan_control;
|
||||
};
|
||||
|
||||
enum amdgpu_pcie_gen {
|
||||
AMDGPU_PCIE_GEN1 = 0,
|
||||
AMDGPU_PCIE_GEN2 = 1,
|
||||
AMDGPU_PCIE_GEN3 = 2,
|
||||
AMDGPU_PCIE_GEN_INVALID = 0xffff
|
||||
};
|
||||
|
||||
enum amdgpu_dpm_forced_level {
|
||||
AMDGPU_DPM_FORCED_LEVEL_AUTO = 0,
|
||||
AMDGPU_DPM_FORCED_LEVEL_LOW = 1,
|
||||
AMDGPU_DPM_FORCED_LEVEL_HIGH = 2,
|
||||
AMDGPU_DPM_FORCED_LEVEL_MANUAL = 3,
|
||||
};
|
||||
|
||||
struct amdgpu_vce_state {
|
||||
/* vce clocks */
|
||||
u32 evclk;
|
||||
u32 ecclk;
|
||||
/* gpu clocks */
|
||||
u32 sclk;
|
||||
u32 mclk;
|
||||
u8 clk_idx;
|
||||
u8 pstate;
|
||||
};
|
||||
|
||||
struct amdgpu_dpm_funcs {
|
||||
int (*get_temperature)(struct amdgpu_device *adev);
|
||||
int (*pre_set_power_state)(struct amdgpu_device *adev);
|
||||
int (*set_power_state)(struct amdgpu_device *adev);
|
||||
void (*post_set_power_state)(struct amdgpu_device *adev);
|
||||
void (*display_configuration_changed)(struct amdgpu_device *adev);
|
||||
u32 (*get_sclk)(struct amdgpu_device *adev, bool low);
|
||||
u32 (*get_mclk)(struct amdgpu_device *adev, bool low);
|
||||
void (*print_power_state)(struct amdgpu_device *adev, struct amdgpu_ps *ps);
|
||||
void (*debugfs_print_current_performance_level)(struct amdgpu_device *adev, struct seq_file *m);
|
||||
int (*force_performance_level)(struct amdgpu_device *adev, enum amdgpu_dpm_forced_level level);
|
||||
bool (*vblank_too_short)(struct amdgpu_device *adev);
|
||||
void (*powergate_uvd)(struct amdgpu_device *adev, bool gate);
|
||||
void (*powergate_vce)(struct amdgpu_device *adev, bool gate);
|
||||
void (*enable_bapm)(struct amdgpu_device *adev, bool enable);
|
||||
void (*set_fan_control_mode)(struct amdgpu_device *adev, u32 mode);
|
||||
u32 (*get_fan_control_mode)(struct amdgpu_device *adev);
|
||||
int (*set_fan_speed_percent)(struct amdgpu_device *adev, u32 speed);
|
||||
int (*get_fan_speed_percent)(struct amdgpu_device *adev, u32 *speed);
|
||||
int (*force_clock_level)(struct amdgpu_device *adev, enum pp_clock_type type, uint32_t mask);
|
||||
int (*print_clock_levels)(struct amdgpu_device *adev, enum pp_clock_type type, char *buf);
|
||||
int (*get_sclk_od)(struct amdgpu_device *adev);
|
||||
int (*set_sclk_od)(struct amdgpu_device *adev, uint32_t value);
|
||||
int (*get_mclk_od)(struct amdgpu_device *adev);
|
||||
int (*set_mclk_od)(struct amdgpu_device *adev, uint32_t value);
|
||||
};
|
||||
|
||||
struct amdgpu_dpm {
|
||||
struct amdgpu_ps *ps;
|
||||
/* number of valid power states */
|
||||
int num_ps;
|
||||
/* current power state that is active */
|
||||
struct amdgpu_ps *current_ps;
|
||||
/* requested power state */
|
||||
struct amdgpu_ps *requested_ps;
|
||||
/* boot up power state */
|
||||
struct amdgpu_ps *boot_ps;
|
||||
/* default uvd power state */
|
||||
struct amdgpu_ps *uvd_ps;
|
||||
/* vce requirements */
|
||||
struct amdgpu_vce_state vce_states[AMDGPU_MAX_VCE_LEVELS];
|
||||
enum amdgpu_vce_level vce_level;
|
||||
enum amd_pm_state_type state;
|
||||
enum amd_pm_state_type user_state;
|
||||
u32 platform_caps;
|
||||
u32 voltage_response_time;
|
||||
u32 backbias_response_time;
|
||||
void *priv;
|
||||
u32 new_active_crtcs;
|
||||
int new_active_crtc_count;
|
||||
u32 current_active_crtcs;
|
||||
int current_active_crtc_count;
|
||||
struct amdgpu_dpm_dynamic_state dyn_state;
|
||||
struct amdgpu_dpm_fan fan;
|
||||
u32 tdp_limit;
|
||||
u32 near_tdp_limit;
|
||||
u32 near_tdp_limit_adjusted;
|
||||
u32 sq_ramping_threshold;
|
||||
u32 cac_leakage;
|
||||
u16 tdp_od_limit;
|
||||
u32 tdp_adjustment;
|
||||
u16 load_line_slope;
|
||||
bool power_control;
|
||||
bool ac_power;
|
||||
/* special states active */
|
||||
bool thermal_active;
|
||||
bool uvd_active;
|
||||
bool vce_active;
|
||||
/* thermal handling */
|
||||
struct amdgpu_dpm_thermal thermal;
|
||||
/* forced levels */
|
||||
enum amdgpu_dpm_forced_level forced_level;
|
||||
};
|
||||
|
||||
struct amdgpu_pm {
|
||||
struct mutex mutex;
|
||||
u32 current_sclk;
|
||||
u32 current_mclk;
|
||||
u32 default_sclk;
|
||||
u32 default_mclk;
|
||||
struct amdgpu_i2c_chan *i2c_bus;
|
||||
/* internal thermal controller on rv6xx+ */
|
||||
enum amdgpu_int_thermal_type int_thermal_type;
|
||||
struct device *int_hwmon_dev;
|
||||
/* fan control parameters */
|
||||
bool no_fan;
|
||||
u8 fan_pulses_per_revolution;
|
||||
u8 fan_min_rpm;
|
||||
u8 fan_max_rpm;
|
||||
/* dpm */
|
||||
bool dpm_enabled;
|
||||
bool sysfs_initialized;
|
||||
struct amdgpu_dpm dpm;
|
||||
const struct firmware *fw; /* SMC firmware */
|
||||
uint32_t fw_version;
|
||||
const struct amdgpu_dpm_funcs *funcs;
|
||||
uint32_t pcie_gen_mask;
|
||||
uint32_t pcie_mlw_mask;
|
||||
struct amd_pp_display_configuration pm_display_cfg;/* set by DAL */
|
||||
};
|
||||
|
||||
void amdgpu_get_pcie_info(struct amdgpu_device *adev);
|
||||
|
||||
/*
|
||||
|
@ -1939,14 +1289,6 @@ typedef void (*amdgpu_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
|
|||
typedef uint32_t (*amdgpu_block_rreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
|
||||
typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, uint32_t);
|
||||
|
||||
struct amdgpu_ip_block_status {
|
||||
bool valid;
|
||||
bool sw;
|
||||
bool hw;
|
||||
bool late_initialized;
|
||||
bool hang;
|
||||
};
|
||||
|
||||
struct amdgpu_device {
|
||||
struct device *dev;
|
||||
struct drm_device *ddev;
|
||||
|
@ -2102,9 +1444,8 @@ struct amdgpu_device {
|
|||
/* GDS */
|
||||
struct amdgpu_gds gds;
|
||||
|
||||
const struct amdgpu_ip_block_version *ip_blocks;
|
||||
struct amdgpu_ip_block ip_blocks[AMDGPU_MAX_IP_NUM];
|
||||
int num_ip_blocks;
|
||||
struct amdgpu_ip_block_status *ip_block_status;
|
||||
struct mutex mn_lock;
|
||||
DECLARE_HASHTABLE(mn_hash, 7);
|
||||
|
||||
|
@ -2127,6 +1468,11 @@ struct amdgpu_device {
|
|||
|
||||
};
|
||||
|
||||
static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev)
|
||||
{
|
||||
return container_of(bdev, struct amdgpu_device, mman.bdev);
|
||||
}
|
||||
|
||||
bool amdgpu_device_is_px(struct drm_device *dev);
|
||||
int amdgpu_device_init(struct amdgpu_device *adev,
|
||||
struct drm_device *ddev,
|
||||
|
@ -2278,8 +1624,6 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
|
|||
#define amdgpu_ring_pad_ib(r, ib) ((r)->funcs->pad_ib((r), (ib)))
|
||||
#define amdgpu_ring_init_cond_exec(r) (r)->funcs->init_cond_exec((r))
|
||||
#define amdgpu_ring_patch_cond_exec(r,o) (r)->funcs->patch_cond_exec((r),(o))
|
||||
#define amdgpu_ring_get_emit_ib_size(r) (r)->funcs->get_emit_ib_size((r))
|
||||
#define amdgpu_ring_get_dma_frame_size(r) (r)->funcs->get_dma_frame_size((r))
|
||||
#define amdgpu_ih_get_wptr(adev) (adev)->irq.ih_funcs->get_wptr((adev))
|
||||
#define amdgpu_ih_decode_iv(adev, iv) (adev)->irq.ih_funcs->decode_iv((adev), (iv))
|
||||
#define amdgpu_ih_set_rptr(adev) (adev)->irq.ih_funcs->set_rptr((adev))
|
||||
|
@ -2301,108 +1645,8 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
|
|||
#define amdgpu_display_resume_mc_access(adev, s) (adev)->mode_info.funcs->resume_mc_access((adev), (s))
|
||||
#define amdgpu_emit_copy_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_copy_buffer((ib), (s), (d), (b))
|
||||
#define amdgpu_emit_fill_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_fill_buffer((ib), (s), (d), (b))
|
||||
#define amdgpu_dpm_pre_set_power_state(adev) (adev)->pm.funcs->pre_set_power_state((adev))
|
||||
#define amdgpu_dpm_set_power_state(adev) (adev)->pm.funcs->set_power_state((adev))
|
||||
#define amdgpu_dpm_post_set_power_state(adev) (adev)->pm.funcs->post_set_power_state((adev))
|
||||
#define amdgpu_dpm_display_configuration_changed(adev) (adev)->pm.funcs->display_configuration_changed((adev))
|
||||
#define amdgpu_dpm_print_power_state(adev, ps) (adev)->pm.funcs->print_power_state((adev), (ps))
|
||||
#define amdgpu_dpm_vblank_too_short(adev) (adev)->pm.funcs->vblank_too_short((adev))
|
||||
#define amdgpu_dpm_enable_bapm(adev, e) (adev)->pm.funcs->enable_bapm((adev), (e))
|
||||
#define amdgpu_gfx_get_gpu_clock_counter(adev) (adev)->gfx.funcs->get_gpu_clock_counter((adev))
|
||||
#define amdgpu_gfx_select_se_sh(adev, se, sh, instance) (adev)->gfx.funcs->select_se_sh((adev), (se), (sh), (instance))
|
||||
|
||||
#define amdgpu_dpm_read_sensor(adev, idx, value) \
|
||||
((adev)->pp_enabled ? \
|
||||
(adev)->powerplay.pp_funcs->read_sensor(adev->powerplay.pp_handle, (idx), (value)) : \
|
||||
-EINVAL)
|
||||
|
||||
#define amdgpu_dpm_get_temperature(adev) \
|
||||
((adev)->pp_enabled ? \
|
||||
(adev)->powerplay.pp_funcs->get_temperature((adev)->powerplay.pp_handle) : \
|
||||
(adev)->pm.funcs->get_temperature((adev)))
|
||||
|
||||
#define amdgpu_dpm_set_fan_control_mode(adev, m) \
|
||||
((adev)->pp_enabled ? \
|
||||
(adev)->powerplay.pp_funcs->set_fan_control_mode((adev)->powerplay.pp_handle, (m)) : \
|
||||
(adev)->pm.funcs->set_fan_control_mode((adev), (m)))
|
||||
|
||||
#define amdgpu_dpm_get_fan_control_mode(adev) \
|
||||
((adev)->pp_enabled ? \
|
||||
(adev)->powerplay.pp_funcs->get_fan_control_mode((adev)->powerplay.pp_handle) : \
|
||||
(adev)->pm.funcs->get_fan_control_mode((adev)))
|
||||
|
||||
#define amdgpu_dpm_set_fan_speed_percent(adev, s) \
|
||||
((adev)->pp_enabled ? \
|
||||
(adev)->powerplay.pp_funcs->set_fan_speed_percent((adev)->powerplay.pp_handle, (s)) : \
|
||||
(adev)->pm.funcs->set_fan_speed_percent((adev), (s)))
|
||||
|
||||
#define amdgpu_dpm_get_fan_speed_percent(adev, s) \
|
||||
((adev)->pp_enabled ? \
|
||||
(adev)->powerplay.pp_funcs->get_fan_speed_percent((adev)->powerplay.pp_handle, (s)) : \
|
||||
(adev)->pm.funcs->get_fan_speed_percent((adev), (s)))
|
||||
|
||||
#define amdgpu_dpm_get_sclk(adev, l) \
|
||||
((adev)->pp_enabled ? \
|
||||
(adev)->powerplay.pp_funcs->get_sclk((adev)->powerplay.pp_handle, (l)) : \
|
||||
(adev)->pm.funcs->get_sclk((adev), (l)))
|
||||
|
||||
#define amdgpu_dpm_get_mclk(adev, l) \
|
||||
((adev)->pp_enabled ? \
|
||||
(adev)->powerplay.pp_funcs->get_mclk((adev)->powerplay.pp_handle, (l)) : \
|
||||
(adev)->pm.funcs->get_mclk((adev), (l)))
|
||||
|
||||
|
||||
#define amdgpu_dpm_force_performance_level(adev, l) \
|
||||
((adev)->pp_enabled ? \
|
||||
(adev)->powerplay.pp_funcs->force_performance_level((adev)->powerplay.pp_handle, (l)) : \
|
||||
(adev)->pm.funcs->force_performance_level((adev), (l)))
|
||||
|
||||
#define amdgpu_dpm_powergate_uvd(adev, g) \
|
||||
((adev)->pp_enabled ? \
|
||||
(adev)->powerplay.pp_funcs->powergate_uvd((adev)->powerplay.pp_handle, (g)) : \
|
||||
(adev)->pm.funcs->powergate_uvd((adev), (g)))
|
||||
|
||||
#define amdgpu_dpm_powergate_vce(adev, g) \
|
||||
((adev)->pp_enabled ? \
|
||||
(adev)->powerplay.pp_funcs->powergate_vce((adev)->powerplay.pp_handle, (g)) : \
|
||||
(adev)->pm.funcs->powergate_vce((adev), (g)))
|
||||
|
||||
#define amdgpu_dpm_get_current_power_state(adev) \
|
||||
(adev)->powerplay.pp_funcs->get_current_power_state((adev)->powerplay.pp_handle)
|
||||
|
||||
#define amdgpu_dpm_get_performance_level(adev) \
|
||||
(adev)->powerplay.pp_funcs->get_performance_level((adev)->powerplay.pp_handle)
|
||||
|
||||
#define amdgpu_dpm_get_pp_num_states(adev, data) \
|
||||
(adev)->powerplay.pp_funcs->get_pp_num_states((adev)->powerplay.pp_handle, data)
|
||||
|
||||
#define amdgpu_dpm_get_pp_table(adev, table) \
|
||||
(adev)->powerplay.pp_funcs->get_pp_table((adev)->powerplay.pp_handle, table)
|
||||
|
||||
#define amdgpu_dpm_set_pp_table(adev, buf, size) \
|
||||
(adev)->powerplay.pp_funcs->set_pp_table((adev)->powerplay.pp_handle, buf, size)
|
||||
|
||||
#define amdgpu_dpm_print_clock_levels(adev, type, buf) \
|
||||
(adev)->powerplay.pp_funcs->print_clock_levels((adev)->powerplay.pp_handle, type, buf)
|
||||
|
||||
#define amdgpu_dpm_force_clock_level(adev, type, level) \
|
||||
(adev)->powerplay.pp_funcs->force_clock_level((adev)->powerplay.pp_handle, type, level)
|
||||
|
||||
#define amdgpu_dpm_get_sclk_od(adev) \
|
||||
(adev)->powerplay.pp_funcs->get_sclk_od((adev)->powerplay.pp_handle)
|
||||
|
||||
#define amdgpu_dpm_set_sclk_od(adev, value) \
|
||||
(adev)->powerplay.pp_funcs->set_sclk_od((adev)->powerplay.pp_handle, value)
|
||||
|
||||
#define amdgpu_dpm_get_mclk_od(adev) \
|
||||
((adev)->powerplay.pp_funcs->get_mclk_od((adev)->powerplay.pp_handle))
|
||||
|
||||
#define amdgpu_dpm_set_mclk_od(adev, value) \
|
||||
((adev)->powerplay.pp_funcs->set_mclk_od((adev)->powerplay.pp_handle, value))
|
||||
|
||||
#define amdgpu_dpm_dispatch_task(adev, event_id, input, output) \
|
||||
(adev)->powerplay.pp_funcs->dispatch_tasks((adev)->powerplay.pp_handle, (event_id), (input), (output))
|
||||
|
||||
#define amdgpu_gds_switch(adev, r, v, d, w, a) (adev)->gds.funcs->patch_gds_switch((r), (v), (d), (w), (a))
|
||||
|
||||
/* Common functions */
|
||||
|
|
|
@ -265,14 +265,14 @@ static int acp_hw_init(void *handle)
|
|||
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
const struct amdgpu_ip_block_version *ip_version =
|
||||
const struct amdgpu_ip_block *ip_block =
|
||||
amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_ACP);
|
||||
|
||||
if (!ip_version)
|
||||
if (!ip_block)
|
||||
return -EINVAL;
|
||||
|
||||
r = amd_acp_hw_init(adev->acp.cgs_device,
|
||||
ip_version->major, ip_version->minor);
|
||||
ip_block->version->major, ip_block->version->minor);
|
||||
/* -ENODEV means board uses AZ rather than ACP */
|
||||
if (r == -ENODEV)
|
||||
return 0;
|
||||
|
@ -456,7 +456,7 @@ static int acp_set_powergating_state(void *handle,
|
|||
return 0;
|
||||
}
|
||||
|
||||
const struct amd_ip_funcs acp_ip_funcs = {
|
||||
static const struct amd_ip_funcs acp_ip_funcs = {
|
||||
.name = "acp_ip",
|
||||
.early_init = acp_early_init,
|
||||
.late_init = NULL,
|
||||
|
@ -472,3 +472,12 @@ const struct amd_ip_funcs acp_ip_funcs = {
|
|||
.set_clockgating_state = acp_set_clockgating_state,
|
||||
.set_powergating_state = acp_set_powergating_state,
|
||||
};
|
||||
|
||||
const struct amdgpu_ip_block_version acp_ip_block =
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_ACP,
|
||||
.major = 2,
|
||||
.minor = 2,
|
||||
.rev = 0,
|
||||
.funcs = &acp_ip_funcs,
|
||||
};
|
||||
|
|
|
@ -37,6 +37,6 @@ struct amdgpu_acp {
|
|||
struct acp_pm_domain *acp_genpd;
|
||||
};
|
||||
|
||||
extern const struct amd_ip_funcs acp_ip_funcs;
|
||||
extern const struct amdgpu_ip_block_version acp_ip_block;
|
||||
|
||||
#endif /* __AMDGPU_ACP_H__ */
|
||||
|
|
|
@ -1115,49 +1115,6 @@ int amdgpu_atombios_get_memory_pll_dividers(struct amdgpu_device *adev,
|
|||
return 0;
|
||||
}
|
||||
|
||||
uint32_t amdgpu_atombios_get_engine_clock(struct amdgpu_device *adev)
|
||||
{
|
||||
GET_ENGINE_CLOCK_PS_ALLOCATION args;
|
||||
int index = GetIndexIntoMasterTable(COMMAND, GetEngineClock);
|
||||
|
||||
amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
|
||||
return le32_to_cpu(args.ulReturnEngineClock);
|
||||
}
|
||||
|
||||
uint32_t amdgpu_atombios_get_memory_clock(struct amdgpu_device *adev)
|
||||
{
|
||||
GET_MEMORY_CLOCK_PS_ALLOCATION args;
|
||||
int index = GetIndexIntoMasterTable(COMMAND, GetMemoryClock);
|
||||
|
||||
amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
|
||||
return le32_to_cpu(args.ulReturnMemoryClock);
|
||||
}
|
||||
|
||||
void amdgpu_atombios_set_engine_clock(struct amdgpu_device *adev,
|
||||
uint32_t eng_clock)
|
||||
{
|
||||
SET_ENGINE_CLOCK_PS_ALLOCATION args;
|
||||
int index = GetIndexIntoMasterTable(COMMAND, SetEngineClock);
|
||||
|
||||
args.ulTargetEngineClock = cpu_to_le32(eng_clock); /* 10 khz */
|
||||
|
||||
amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
|
||||
}
|
||||
|
||||
void amdgpu_atombios_set_memory_clock(struct amdgpu_device *adev,
|
||||
uint32_t mem_clock)
|
||||
{
|
||||
SET_MEMORY_CLOCK_PS_ALLOCATION args;
|
||||
int index = GetIndexIntoMasterTable(COMMAND, SetMemoryClock);
|
||||
|
||||
if (adev->flags & AMD_IS_APU)
|
||||
return;
|
||||
|
||||
args.ulTargetMemoryClock = cpu_to_le32(mem_clock); /* 10 khz */
|
||||
|
||||
amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
|
||||
}
|
||||
|
||||
void amdgpu_atombios_set_engine_dram_timings(struct amdgpu_device *adev,
|
||||
u32 eng_clock, u32 mem_clock)
|
||||
{
|
||||
|
@ -1256,45 +1213,6 @@ int amdgpu_atombios_get_leakage_vddc_based_on_leakage_idx(struct amdgpu_device *
|
|||
return amdgpu_atombios_get_max_vddc(adev, VOLTAGE_TYPE_VDDC, leakage_idx, voltage);
|
||||
}
|
||||
|
||||
void amdgpu_atombios_set_voltage(struct amdgpu_device *adev,
|
||||
u16 voltage_level,
|
||||
u8 voltage_type)
|
||||
{
|
||||
union set_voltage args;
|
||||
int index = GetIndexIntoMasterTable(COMMAND, SetVoltage);
|
||||
u8 frev, crev, volt_index = voltage_level;
|
||||
|
||||
if (!amdgpu_atom_parse_cmd_header(adev->mode_info.atom_context, index, &frev, &crev))
|
||||
return;
|
||||
|
||||
/* 0xff01 is a flag rather then an actual voltage */
|
||||
if (voltage_level == 0xff01)
|
||||
return;
|
||||
|
||||
switch (crev) {
|
||||
case 1:
|
||||
args.v1.ucVoltageType = voltage_type;
|
||||
args.v1.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_ALL_SOURCE;
|
||||
args.v1.ucVoltageIndex = volt_index;
|
||||
break;
|
||||
case 2:
|
||||
args.v2.ucVoltageType = voltage_type;
|
||||
args.v2.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_SET_VOLTAGE;
|
||||
args.v2.usVoltageLevel = cpu_to_le16(voltage_level);
|
||||
break;
|
||||
case 3:
|
||||
args.v3.ucVoltageType = voltage_type;
|
||||
args.v3.ucVoltageMode = ATOM_SET_VOLTAGE;
|
||||
args.v3.usVoltageLevel = cpu_to_le16(voltage_level);
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
|
||||
return;
|
||||
}
|
||||
|
||||
amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
|
||||
}
|
||||
|
||||
int amdgpu_atombios_get_leakage_id_from_vbios(struct amdgpu_device *adev,
|
||||
u16 *leakage_id)
|
||||
{
|
||||
|
@ -1784,6 +1702,19 @@ void amdgpu_atombios_scratch_regs_restore(struct amdgpu_device *adev)
|
|||
WREG32(mmBIOS_SCRATCH_0 + i, adev->bios_scratch[i]);
|
||||
}
|
||||
|
||||
void amdgpu_atombios_scratch_regs_engine_hung(struct amdgpu_device *adev,
|
||||
bool hung)
|
||||
{
|
||||
u32 tmp = RREG32(mmBIOS_SCRATCH_3);
|
||||
|
||||
if (hung)
|
||||
tmp |= ATOM_S3_ASIC_GUI_ENGINE_HUNG;
|
||||
else
|
||||
tmp &= ~ATOM_S3_ASIC_GUI_ENGINE_HUNG;
|
||||
|
||||
WREG32(mmBIOS_SCRATCH_3, tmp);
|
||||
}
|
||||
|
||||
/* Atom needs data in little endian format
|
||||
* so swap as appropriate when copying data to
|
||||
* or from atom. Note that atom operates on
|
||||
|
|
|
@ -163,16 +163,6 @@ int amdgpu_atombios_get_memory_pll_dividers(struct amdgpu_device *adev,
|
|||
bool strobe_mode,
|
||||
struct atom_mpll_param *mpll_param);
|
||||
|
||||
uint32_t amdgpu_atombios_get_engine_clock(struct amdgpu_device *adev);
|
||||
uint32_t amdgpu_atombios_get_memory_clock(struct amdgpu_device *adev);
|
||||
void amdgpu_atombios_set_engine_clock(struct amdgpu_device *adev,
|
||||
uint32_t eng_clock);
|
||||
void amdgpu_atombios_set_memory_clock(struct amdgpu_device *adev,
|
||||
uint32_t mem_clock);
|
||||
void amdgpu_atombios_set_voltage(struct amdgpu_device *adev,
|
||||
u16 voltage_level,
|
||||
u8 voltage_type);
|
||||
|
||||
void amdgpu_atombios_set_engine_dram_timings(struct amdgpu_device *adev,
|
||||
u32 eng_clock, u32 mem_clock);
|
||||
|
||||
|
@ -206,6 +196,8 @@ void amdgpu_atombios_scratch_regs_lock(struct amdgpu_device *adev, bool lock);
|
|||
void amdgpu_atombios_scratch_regs_init(struct amdgpu_device *adev);
|
||||
void amdgpu_atombios_scratch_regs_save(struct amdgpu_device *adev);
|
||||
void amdgpu_atombios_scratch_regs_restore(struct amdgpu_device *adev);
|
||||
void amdgpu_atombios_scratch_regs_engine_hung(struct amdgpu_device *adev,
|
||||
bool hung);
|
||||
|
||||
void amdgpu_atombios_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le);
|
||||
int amdgpu_atombios_get_max_vddc(struct amdgpu_device *adev, u8 voltage_type,
|
||||
|
|
|
@ -146,7 +146,8 @@ static int amdgpu_cgs_alloc_gpu_mem(struct cgs_device *cgs_device,
|
|||
switch(type) {
|
||||
case CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB:
|
||||
case CGS_GPU_MEM_TYPE__VISIBLE_FB:
|
||||
flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
|
||||
flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
|
||||
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
|
||||
domain = AMDGPU_GEM_DOMAIN_VRAM;
|
||||
if (max_offset > adev->mc.real_vram_size)
|
||||
return -EINVAL;
|
||||
|
@ -157,7 +158,8 @@ static int amdgpu_cgs_alloc_gpu_mem(struct cgs_device *cgs_device,
|
|||
break;
|
||||
case CGS_GPU_MEM_TYPE__INVISIBLE_CONTIG_FB:
|
||||
case CGS_GPU_MEM_TYPE__INVISIBLE_FB:
|
||||
flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
|
||||
flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
|
||||
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
|
||||
domain = AMDGPU_GEM_DOMAIN_VRAM;
|
||||
if (adev->mc.visible_vram_size < adev->mc.real_vram_size) {
|
||||
place.fpfn =
|
||||
|
@ -240,7 +242,7 @@ static int amdgpu_cgs_gmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t h
|
|||
r = amdgpu_bo_reserve(obj, false);
|
||||
if (unlikely(r != 0))
|
||||
return r;
|
||||
r = amdgpu_bo_pin_restricted(obj, AMDGPU_GEM_DOMAIN_GTT,
|
||||
r = amdgpu_bo_pin_restricted(obj, obj->prefered_domains,
|
||||
min_offset, max_offset, mcaddr);
|
||||
amdgpu_bo_unreserve(obj);
|
||||
return r;
|
||||
|
@ -624,11 +626,11 @@ static int amdgpu_cgs_set_clockgating_state(struct cgs_device *cgs_device,
|
|||
int i, r = -1;
|
||||
|
||||
for (i = 0; i < adev->num_ip_blocks; i++) {
|
||||
if (!adev->ip_block_status[i].valid)
|
||||
if (!adev->ip_blocks[i].status.valid)
|
||||
continue;
|
||||
|
||||
if (adev->ip_blocks[i].type == block_type) {
|
||||
r = adev->ip_blocks[i].funcs->set_clockgating_state(
|
||||
if (adev->ip_blocks[i].version->type == block_type) {
|
||||
r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
|
||||
(void *)adev,
|
||||
state);
|
||||
break;
|
||||
|
@ -645,11 +647,11 @@ static int amdgpu_cgs_set_powergating_state(struct cgs_device *cgs_device,
|
|||
int i, r = -1;
|
||||
|
||||
for (i = 0; i < adev->num_ip_blocks; i++) {
|
||||
if (!adev->ip_block_status[i].valid)
|
||||
if (!adev->ip_blocks[i].status.valid)
|
||||
continue;
|
||||
|
||||
if (adev->ip_blocks[i].type == block_type) {
|
||||
r = adev->ip_blocks[i].funcs->set_powergating_state(
|
||||
if (adev->ip_blocks[i].version->type == block_type) {
|
||||
r = adev->ip_blocks[i].version->funcs->set_powergating_state(
|
||||
(void *)adev,
|
||||
state);
|
||||
break;
|
||||
|
@ -685,15 +687,21 @@ static uint32_t fw_type_convert(struct cgs_device *cgs_device, uint32_t fw_type)
|
|||
result = AMDGPU_UCODE_ID_CP_MEC1;
|
||||
break;
|
||||
case CGS_UCODE_ID_CP_MEC_JT2:
|
||||
if (adev->asic_type == CHIP_TONGA || adev->asic_type == CHIP_POLARIS11
|
||||
|| adev->asic_type == CHIP_POLARIS10)
|
||||
result = AMDGPU_UCODE_ID_CP_MEC2;
|
||||
else
|
||||
/* for VI. JT2 should be the same as JT1, because:
|
||||
1, MEC2 and MEC1 use exactly same FW.
|
||||
2, JT2 is not pached but JT1 is.
|
||||
*/
|
||||
if (adev->asic_type >= CHIP_TOPAZ)
|
||||
result = AMDGPU_UCODE_ID_CP_MEC1;
|
||||
else
|
||||
result = AMDGPU_UCODE_ID_CP_MEC2;
|
||||
break;
|
||||
case CGS_UCODE_ID_RLC_G:
|
||||
result = AMDGPU_UCODE_ID_RLC_G;
|
||||
break;
|
||||
case CGS_UCODE_ID_STORAGE:
|
||||
result = AMDGPU_UCODE_ID_STORAGE;
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("Firmware type not supported\n");
|
||||
}
|
||||
|
@ -776,12 +784,18 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
|
|||
|
||||
if ((type == CGS_UCODE_ID_CP_MEC_JT1) ||
|
||||
(type == CGS_UCODE_ID_CP_MEC_JT2)) {
|
||||
gpu_addr += le32_to_cpu(header->jt_offset) << 2;
|
||||
gpu_addr += ALIGN(le32_to_cpu(header->header.ucode_size_bytes), PAGE_SIZE);
|
||||
data_size = le32_to_cpu(header->jt_size) << 2;
|
||||
}
|
||||
info->mc_addr = gpu_addr;
|
||||
|
||||
info->kptr = ucode->kaddr;
|
||||
info->image_size = data_size;
|
||||
info->mc_addr = gpu_addr;
|
||||
info->version = (uint16_t)le32_to_cpu(header->header.ucode_version);
|
||||
|
||||
if (CGS_UCODE_ID_CP_MEC == type)
|
||||
info->image_size = (header->jt_offset) << 2;
|
||||
|
||||
info->fw_version = amdgpu_get_firmware_version(cgs_device, type);
|
||||
info->feature_version = (uint16_t)le32_to_cpu(header->ucode_feature_version);
|
||||
} else {
|
||||
|
@ -851,6 +865,12 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int amdgpu_cgs_is_virtualization_enabled(void *cgs_device)
|
||||
{
|
||||
CGS_FUNC_ADEV;
|
||||
return amdgpu_sriov_vf(adev);
|
||||
}
|
||||
|
||||
static int amdgpu_cgs_query_system_info(struct cgs_device *cgs_device,
|
||||
struct cgs_system_info *sys_info)
|
||||
{
|
||||
|
@ -1204,6 +1224,7 @@ static const struct cgs_ops amdgpu_cgs_ops = {
|
|||
amdgpu_cgs_notify_dpm_enabled,
|
||||
amdgpu_cgs_call_acpi_method,
|
||||
amdgpu_cgs_query_system_info,
|
||||
amdgpu_cgs_is_virtualization_enabled
|
||||
};
|
||||
|
||||
static const struct cgs_os_ops amdgpu_cgs_os_ops = {
|
||||
|
|
|
@ -1517,88 +1517,6 @@ static const struct drm_connector_funcs amdgpu_connector_edp_funcs = {
|
|||
.force = amdgpu_connector_dvi_force,
|
||||
};
|
||||
|
||||
static struct drm_encoder *
|
||||
amdgpu_connector_virtual_encoder(struct drm_connector *connector)
|
||||
{
|
||||
int enc_id = connector->encoder_ids[0];
|
||||
struct drm_encoder *encoder;
|
||||
int i;
|
||||
for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
|
||||
if (connector->encoder_ids[i] == 0)
|
||||
break;
|
||||
|
||||
encoder = drm_encoder_find(connector->dev, connector->encoder_ids[i]);
|
||||
if (!encoder)
|
||||
continue;
|
||||
|
||||
if (encoder->encoder_type == DRM_MODE_ENCODER_VIRTUAL)
|
||||
return encoder;
|
||||
}
|
||||
|
||||
/* pick the first one */
|
||||
if (enc_id)
|
||||
return drm_encoder_find(connector->dev, enc_id);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int amdgpu_connector_virtual_get_modes(struct drm_connector *connector)
|
||||
{
|
||||
struct drm_encoder *encoder = amdgpu_connector_best_single_encoder(connector);
|
||||
|
||||
if (encoder) {
|
||||
amdgpu_connector_add_common_modes(encoder, connector);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int amdgpu_connector_virtual_mode_valid(struct drm_connector *connector,
|
||||
struct drm_display_mode *mode)
|
||||
{
|
||||
return MODE_OK;
|
||||
}
|
||||
|
||||
static int
|
||||
amdgpu_connector_virtual_dpms(struct drm_connector *connector, int mode)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static enum drm_connector_status
|
||||
|
||||
amdgpu_connector_virtual_detect(struct drm_connector *connector, bool force)
|
||||
{
|
||||
return connector_status_connected;
|
||||
}
|
||||
|
||||
static int
|
||||
amdgpu_connector_virtual_set_property(struct drm_connector *connector,
|
||||
struct drm_property *property,
|
||||
uint64_t val)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void amdgpu_connector_virtual_force(struct drm_connector *connector)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
static const struct drm_connector_helper_funcs amdgpu_connector_virtual_helper_funcs = {
|
||||
.get_modes = amdgpu_connector_virtual_get_modes,
|
||||
.mode_valid = amdgpu_connector_virtual_mode_valid,
|
||||
.best_encoder = amdgpu_connector_virtual_encoder,
|
||||
};
|
||||
|
||||
static const struct drm_connector_funcs amdgpu_connector_virtual_funcs = {
|
||||
.dpms = amdgpu_connector_virtual_dpms,
|
||||
.detect = amdgpu_connector_virtual_detect,
|
||||
.fill_modes = drm_helper_probe_single_connector_modes,
|
||||
.set_property = amdgpu_connector_virtual_set_property,
|
||||
.destroy = amdgpu_connector_destroy,
|
||||
.force = amdgpu_connector_virtual_force,
|
||||
};
|
||||
|
||||
void
|
||||
amdgpu_connector_add(struct amdgpu_device *adev,
|
||||
uint32_t connector_id,
|
||||
|
@ -1983,17 +1901,6 @@ amdgpu_connector_add(struct amdgpu_device *adev,
|
|||
connector->interlace_allowed = false;
|
||||
connector->doublescan_allowed = false;
|
||||
break;
|
||||
case DRM_MODE_CONNECTOR_VIRTUAL:
|
||||
amdgpu_dig_connector = kzalloc(sizeof(struct amdgpu_connector_atom_dig), GFP_KERNEL);
|
||||
if (!amdgpu_dig_connector)
|
||||
goto failed;
|
||||
amdgpu_connector->con_priv = amdgpu_dig_connector;
|
||||
drm_connector_init(dev, &amdgpu_connector->base, &amdgpu_connector_virtual_funcs, connector_type);
|
||||
drm_connector_helper_add(&amdgpu_connector->base, &amdgpu_connector_virtual_helper_funcs);
|
||||
subpixel_order = SubPixelHorizontalRGB;
|
||||
connector->interlace_allowed = false;
|
||||
connector->doublescan_allowed = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -355,6 +355,7 @@ static void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev,
|
|||
static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
|
||||
struct amdgpu_bo *bo)
|
||||
{
|
||||
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
|
||||
u64 initial_bytes_moved;
|
||||
uint32_t domain;
|
||||
int r;
|
||||
|
@ -372,9 +373,9 @@ static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
|
|||
|
||||
retry:
|
||||
amdgpu_ttm_placement_from_domain(bo, domain);
|
||||
initial_bytes_moved = atomic64_read(&bo->adev->num_bytes_moved);
|
||||
initial_bytes_moved = atomic64_read(&adev->num_bytes_moved);
|
||||
r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
|
||||
p->bytes_moved += atomic64_read(&bo->adev->num_bytes_moved) -
|
||||
p->bytes_moved += atomic64_read(&adev->num_bytes_moved) -
|
||||
initial_bytes_moved;
|
||||
|
||||
if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
|
||||
|
@ -387,9 +388,9 @@ retry:
|
|||
|
||||
/* Last resort, try to evict something from the current working set */
|
||||
static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
|
||||
struct amdgpu_bo_list_entry *lobj)
|
||||
struct amdgpu_bo *validated)
|
||||
{
|
||||
uint32_t domain = lobj->robj->allowed_domains;
|
||||
uint32_t domain = validated->allowed_domains;
|
||||
int r;
|
||||
|
||||
if (!p->evictable)
|
||||
|
@ -400,11 +401,12 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
|
|||
|
||||
struct amdgpu_bo_list_entry *candidate = p->evictable;
|
||||
struct amdgpu_bo *bo = candidate->robj;
|
||||
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
|
||||
u64 initial_bytes_moved;
|
||||
uint32_t other;
|
||||
|
||||
/* If we reached our current BO we can forget it */
|
||||
if (candidate == lobj)
|
||||
if (candidate->robj == validated)
|
||||
break;
|
||||
|
||||
other = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
|
||||
|
@ -420,9 +422,9 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
|
|||
|
||||
/* Good we can try to move this BO somewhere else */
|
||||
amdgpu_ttm_placement_from_domain(bo, other);
|
||||
initial_bytes_moved = atomic64_read(&bo->adev->num_bytes_moved);
|
||||
initial_bytes_moved = atomic64_read(&adev->num_bytes_moved);
|
||||
r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
|
||||
p->bytes_moved += atomic64_read(&bo->adev->num_bytes_moved) -
|
||||
p->bytes_moved += atomic64_read(&adev->num_bytes_moved) -
|
||||
initial_bytes_moved;
|
||||
|
||||
if (unlikely(r))
|
||||
|
@ -437,6 +439,23 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
|
|||
return false;
|
||||
}
|
||||
|
||||
static int amdgpu_cs_validate(void *param, struct amdgpu_bo *bo)
|
||||
{
|
||||
struct amdgpu_cs_parser *p = param;
|
||||
int r;
|
||||
|
||||
do {
|
||||
r = amdgpu_cs_bo_validate(p, bo);
|
||||
} while (r == -ENOMEM && amdgpu_cs_try_evict(p, bo));
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
if (bo->shadow)
|
||||
r = amdgpu_cs_bo_validate(p, bo);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
|
||||
struct list_head *validated)
|
||||
{
|
||||
|
@ -464,18 +483,10 @@ static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
|
|||
if (p->evictable == lobj)
|
||||
p->evictable = NULL;
|
||||
|
||||
do {
|
||||
r = amdgpu_cs_bo_validate(p, bo);
|
||||
} while (r == -ENOMEM && amdgpu_cs_try_evict(p, lobj));
|
||||
r = amdgpu_cs_validate(p, bo);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
if (bo->shadow) {
|
||||
r = amdgpu_cs_bo_validate(p, bo);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
||||
if (binding_userptr) {
|
||||
drm_free_large(lobj->user_pages);
|
||||
lobj->user_pages = NULL;
|
||||
|
@ -593,14 +604,19 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
|
|||
list_splice(&need_pages, &p->validated);
|
||||
}
|
||||
|
||||
amdgpu_vm_get_pt_bos(p->adev, &fpriv->vm, &duplicates);
|
||||
|
||||
p->bytes_moved_threshold = amdgpu_cs_get_threshold_for_moves(p->adev);
|
||||
p->bytes_moved = 0;
|
||||
p->evictable = list_last_entry(&p->validated,
|
||||
struct amdgpu_bo_list_entry,
|
||||
tv.head);
|
||||
|
||||
r = amdgpu_vm_validate_pt_bos(p->adev, &fpriv->vm,
|
||||
amdgpu_cs_validate, p);
|
||||
if (r) {
|
||||
DRM_ERROR("amdgpu_vm_validate_pt_bos() failed.\n");
|
||||
goto error_validate;
|
||||
}
|
||||
|
||||
r = amdgpu_cs_list_validate(p, &duplicates);
|
||||
if (r) {
|
||||
DRM_ERROR("amdgpu_cs_list_validate(duplicates) failed.\n");
|
||||
|
@ -806,13 +822,14 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
|
|||
|
||||
/* Only for UVD/VCE VM emulation */
|
||||
if (ring->funcs->parse_cs) {
|
||||
p->job->vm = NULL;
|
||||
for (i = 0; i < p->job->num_ibs; i++) {
|
||||
r = amdgpu_ring_parse_cs(ring, p, i);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
} else {
|
||||
}
|
||||
|
||||
if (p->job->vm) {
|
||||
p->job->vm_pd_addr = amdgpu_bo_gpu_offset(vm->page_directory);
|
||||
|
||||
r = amdgpu_bo_vm_update_pte(p, vm);
|
||||
|
@ -901,7 +918,7 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
|
|||
offset = ((uint64_t)m->it.start) * AMDGPU_GPU_PAGE_SIZE;
|
||||
kptr += chunk_ib->va_start - offset;
|
||||
|
||||
r = amdgpu_ib_get(adev, NULL, chunk_ib->ib_bytes, ib);
|
||||
r = amdgpu_ib_get(adev, vm, chunk_ib->ib_bytes, ib);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to get ib !\n");
|
||||
return r;
|
||||
|
@ -916,9 +933,9 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
|
|||
return r;
|
||||
}
|
||||
|
||||
ib->gpu_addr = chunk_ib->va_start;
|
||||
}
|
||||
|
||||
ib->gpu_addr = chunk_ib->va_start;
|
||||
ib->length_dw = chunk_ib->ib_bytes / 4;
|
||||
ib->flags = chunk_ib->flags;
|
||||
j++;
|
||||
|
@ -926,8 +943,8 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
|
|||
|
||||
/* UVD & VCE fw doesn't support user fences */
|
||||
if (parser->job->uf_addr && (
|
||||
parser->job->ring->type == AMDGPU_RING_TYPE_UVD ||
|
||||
parser->job->ring->type == AMDGPU_RING_TYPE_VCE))
|
||||
parser->job->ring->funcs->type == AMDGPU_RING_TYPE_UVD ||
|
||||
parser->job->ring->funcs->type == AMDGPU_RING_TYPE_VCE))
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
|
@ -1195,6 +1212,15 @@ int amdgpu_cs_sysvm_access_required(struct amdgpu_cs_parser *parser)
|
|||
r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem);
|
||||
if (unlikely(r))
|
||||
return r;
|
||||
|
||||
if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)
|
||||
continue;
|
||||
|
||||
bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
|
||||
amdgpu_ttm_placement_from_domain(bo, bo->allowed_domains);
|
||||
r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
|
||||
if (unlikely(r))
|
||||
return r;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -55,10 +55,12 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, struct amdgpu_ctx *ctx)
|
|||
r = amd_sched_entity_init(&ring->sched, &ctx->rings[i].entity,
|
||||
rq, amdgpu_sched_jobs);
|
||||
if (r)
|
||||
break;
|
||||
goto failed;
|
||||
}
|
||||
|
||||
if (i < adev->num_rings) {
|
||||
return 0;
|
||||
|
||||
failed:
|
||||
for (j = 0; j < i; j++)
|
||||
amd_sched_entity_fini(&adev->rings[j]->sched,
|
||||
&ctx->rings[j].entity);
|
||||
|
@ -66,8 +68,6 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, struct amdgpu_ctx *ctx)
|
|||
ctx->fences = NULL;
|
||||
return r;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)
|
||||
{
|
||||
|
|
|
@ -264,7 +264,8 @@ static int amdgpu_vram_scratch_init(struct amdgpu_device *adev)
|
|||
if (adev->vram_scratch.robj == NULL) {
|
||||
r = amdgpu_bo_create(adev, AMDGPU_GPU_PAGE_SIZE,
|
||||
PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
|
||||
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
|
||||
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
|
||||
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
|
||||
NULL, NULL, &adev->vram_scratch.robj);
|
||||
if (r) {
|
||||
return r;
|
||||
|
@ -442,13 +443,9 @@ void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev,
|
|||
static void amdgpu_wb_fini(struct amdgpu_device *adev)
|
||||
{
|
||||
if (adev->wb.wb_obj) {
|
||||
if (!amdgpu_bo_reserve(adev->wb.wb_obj, false)) {
|
||||
amdgpu_bo_kunmap(adev->wb.wb_obj);
|
||||
amdgpu_bo_unpin(adev->wb.wb_obj);
|
||||
amdgpu_bo_unreserve(adev->wb.wb_obj);
|
||||
}
|
||||
amdgpu_bo_unref(&adev->wb.wb_obj);
|
||||
adev->wb.wb = NULL;
|
||||
amdgpu_bo_free_kernel(&adev->wb.wb_obj,
|
||||
&adev->wb.gpu_addr,
|
||||
(void **)&adev->wb.wb);
|
||||
adev->wb.wb_obj = NULL;
|
||||
}
|
||||
}
|
||||
|
@ -467,33 +464,14 @@ static int amdgpu_wb_init(struct amdgpu_device *adev)
|
|||
int r;
|
||||
|
||||
if (adev->wb.wb_obj == NULL) {
|
||||
r = amdgpu_bo_create(adev, AMDGPU_MAX_WB * 4, PAGE_SIZE, true,
|
||||
AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
|
||||
&adev->wb.wb_obj);
|
||||
r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * 4,
|
||||
PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
|
||||
&adev->wb.wb_obj, &adev->wb.gpu_addr,
|
||||
(void **)&adev->wb.wb);
|
||||
if (r) {
|
||||
dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
|
||||
return r;
|
||||
}
|
||||
r = amdgpu_bo_reserve(adev->wb.wb_obj, false);
|
||||
if (unlikely(r != 0)) {
|
||||
amdgpu_wb_fini(adev);
|
||||
return r;
|
||||
}
|
||||
r = amdgpu_bo_pin(adev->wb.wb_obj, AMDGPU_GEM_DOMAIN_GTT,
|
||||
&adev->wb.gpu_addr);
|
||||
if (r) {
|
||||
amdgpu_bo_unreserve(adev->wb.wb_obj);
|
||||
dev_warn(adev->dev, "(%d) pin WB bo failed\n", r);
|
||||
amdgpu_wb_fini(adev);
|
||||
return r;
|
||||
}
|
||||
r = amdgpu_bo_kmap(adev->wb.wb_obj, (void **)&adev->wb.wb);
|
||||
amdgpu_bo_unreserve(adev->wb.wb_obj);
|
||||
if (r) {
|
||||
dev_warn(adev->dev, "(%d) map WB bo failed\n", r);
|
||||
amdgpu_wb_fini(adev);
|
||||
return r;
|
||||
}
|
||||
|
||||
adev->wb.num_wb = AMDGPU_MAX_WB;
|
||||
memset(&adev->wb.used, 0, sizeof(adev->wb.used));
|
||||
|
@ -1051,6 +1029,13 @@ static void amdgpu_check_arguments(struct amdgpu_device *adev)
|
|||
amdgpu_vm_block_size);
|
||||
amdgpu_vm_block_size = 9;
|
||||
}
|
||||
|
||||
if ((amdgpu_vram_page_split != -1 && amdgpu_vram_page_split < 16) ||
|
||||
!amdgpu_check_pot_argument(amdgpu_vram_page_split)) {
|
||||
dev_warn(adev->dev, "invalid VRAM page split (%d)\n",
|
||||
amdgpu_vram_page_split);
|
||||
amdgpu_vram_page_split = 1024;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1125,10 +1110,10 @@ int amdgpu_set_clockgating_state(struct amdgpu_device *adev,
|
|||
int i, r = 0;
|
||||
|
||||
for (i = 0; i < adev->num_ip_blocks; i++) {
|
||||
if (!adev->ip_block_status[i].valid)
|
||||
if (!adev->ip_blocks[i].status.valid)
|
||||
continue;
|
||||
if (adev->ip_blocks[i].type == block_type) {
|
||||
r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
|
||||
if (adev->ip_blocks[i].version->type == block_type) {
|
||||
r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
|
||||
state);
|
||||
if (r)
|
||||
return r;
|
||||
|
@ -1145,10 +1130,10 @@ int amdgpu_set_powergating_state(struct amdgpu_device *adev,
|
|||
int i, r = 0;
|
||||
|
||||
for (i = 0; i < adev->num_ip_blocks; i++) {
|
||||
if (!adev->ip_block_status[i].valid)
|
||||
if (!adev->ip_blocks[i].status.valid)
|
||||
continue;
|
||||
if (adev->ip_blocks[i].type == block_type) {
|
||||
r = adev->ip_blocks[i].funcs->set_powergating_state((void *)adev,
|
||||
if (adev->ip_blocks[i].version->type == block_type) {
|
||||
r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev,
|
||||
state);
|
||||
if (r)
|
||||
return r;
|
||||
|
@ -1164,10 +1149,10 @@ int amdgpu_wait_for_idle(struct amdgpu_device *adev,
|
|||
int i, r;
|
||||
|
||||
for (i = 0; i < adev->num_ip_blocks; i++) {
|
||||
if (!adev->ip_block_status[i].valid)
|
||||
if (!adev->ip_blocks[i].status.valid)
|
||||
continue;
|
||||
if (adev->ip_blocks[i].type == block_type) {
|
||||
r = adev->ip_blocks[i].funcs->wait_for_idle((void *)adev);
|
||||
if (adev->ip_blocks[i].version->type == block_type) {
|
||||
r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
|
||||
if (r)
|
||||
return r;
|
||||
break;
|
||||
|
@ -1183,23 +1168,22 @@ bool amdgpu_is_idle(struct amdgpu_device *adev,
|
|||
int i;
|
||||
|
||||
for (i = 0; i < adev->num_ip_blocks; i++) {
|
||||
if (!adev->ip_block_status[i].valid)
|
||||
if (!adev->ip_blocks[i].status.valid)
|
||||
continue;
|
||||
if (adev->ip_blocks[i].type == block_type)
|
||||
return adev->ip_blocks[i].funcs->is_idle((void *)adev);
|
||||
if (adev->ip_blocks[i].version->type == block_type)
|
||||
return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
|
||||
}
|
||||
return true;
|
||||
|
||||
}
|
||||
|
||||
const struct amdgpu_ip_block_version * amdgpu_get_ip_block(
|
||||
struct amdgpu_device *adev,
|
||||
struct amdgpu_ip_block * amdgpu_get_ip_block(struct amdgpu_device *adev,
|
||||
enum amd_ip_block_type type)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < adev->num_ip_blocks; i++)
|
||||
if (adev->ip_blocks[i].type == type)
|
||||
if (adev->ip_blocks[i].version->type == type)
|
||||
return &adev->ip_blocks[i];
|
||||
|
||||
return NULL;
|
||||
|
@ -1220,38 +1204,75 @@ int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev,
|
|||
enum amd_ip_block_type type,
|
||||
u32 major, u32 minor)
|
||||
{
|
||||
const struct amdgpu_ip_block_version *ip_block;
|
||||
ip_block = amdgpu_get_ip_block(adev, type);
|
||||
struct amdgpu_ip_block *ip_block = amdgpu_get_ip_block(adev, type);
|
||||
|
||||
if (ip_block && ((ip_block->major > major) ||
|
||||
((ip_block->major == major) &&
|
||||
(ip_block->minor >= minor))))
|
||||
if (ip_block && ((ip_block->version->major > major) ||
|
||||
((ip_block->version->major == major) &&
|
||||
(ip_block->version->minor >= minor))))
|
||||
return 0;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static void amdgpu_whether_enable_virtual_display(struct amdgpu_device *adev)
|
||||
/**
|
||||
* amdgpu_ip_block_add
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
* @ip_block_version: pointer to the IP to add
|
||||
*
|
||||
* Adds the IP block driver information to the collection of IPs
|
||||
* on the asic.
|
||||
*/
|
||||
int amdgpu_ip_block_add(struct amdgpu_device *adev,
|
||||
const struct amdgpu_ip_block_version *ip_block_version)
|
||||
{
|
||||
if (!ip_block_version)
|
||||
return -EINVAL;
|
||||
|
||||
adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
|
||||
{
|
||||
adev->enable_virtual_display = false;
|
||||
|
||||
if (amdgpu_virtual_display) {
|
||||
struct drm_device *ddev = adev->ddev;
|
||||
const char *pci_address_name = pci_name(ddev->pdev);
|
||||
char *pciaddstr, *pciaddstr_tmp, *pciaddname;
|
||||
char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
|
||||
|
||||
pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
|
||||
pciaddstr_tmp = pciaddstr;
|
||||
while ((pciaddname = strsep(&pciaddstr_tmp, ";"))) {
|
||||
while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
|
||||
pciaddname = strsep(&pciaddname_tmp, ",");
|
||||
if (!strcmp(pci_address_name, pciaddname)) {
|
||||
long num_crtc;
|
||||
int res = -1;
|
||||
|
||||
adev->enable_virtual_display = true;
|
||||
|
||||
if (pciaddname_tmp)
|
||||
res = kstrtol(pciaddname_tmp, 10,
|
||||
&num_crtc);
|
||||
|
||||
if (!res) {
|
||||
if (num_crtc < 1)
|
||||
num_crtc = 1;
|
||||
if (num_crtc > 6)
|
||||
num_crtc = 6;
|
||||
adev->mode_info.num_crtc = num_crtc;
|
||||
} else {
|
||||
adev->mode_info.num_crtc = 1;
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
DRM_INFO("virtual display string:%s, %s:virtual_display:%d\n",
|
||||
DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
|
||||
amdgpu_virtual_display, pci_address_name,
|
||||
adev->enable_virtual_display);
|
||||
adev->enable_virtual_display, adev->mode_info.num_crtc);
|
||||
|
||||
kfree(pciaddstr);
|
||||
}
|
||||
|
@ -1261,7 +1282,7 @@ static int amdgpu_early_init(struct amdgpu_device *adev)
|
|||
{
|
||||
int i, r;
|
||||
|
||||
amdgpu_whether_enable_virtual_display(adev);
|
||||
amdgpu_device_enable_virtual_display(adev);
|
||||
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_TOPAZ:
|
||||
|
@ -1313,33 +1334,24 @@ static int amdgpu_early_init(struct amdgpu_device *adev)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
adev->ip_block_status = kcalloc(adev->num_ip_blocks,
|
||||
sizeof(struct amdgpu_ip_block_status), GFP_KERNEL);
|
||||
if (adev->ip_block_status == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
if (adev->ip_blocks == NULL) {
|
||||
DRM_ERROR("No IP blocks found!\n");
|
||||
return r;
|
||||
}
|
||||
|
||||
for (i = 0; i < adev->num_ip_blocks; i++) {
|
||||
if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
|
||||
DRM_ERROR("disabled ip block: %d\n", i);
|
||||
adev->ip_block_status[i].valid = false;
|
||||
adev->ip_blocks[i].status.valid = false;
|
||||
} else {
|
||||
if (adev->ip_blocks[i].funcs->early_init) {
|
||||
r = adev->ip_blocks[i].funcs->early_init((void *)adev);
|
||||
if (adev->ip_blocks[i].version->funcs->early_init) {
|
||||
r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
|
||||
if (r == -ENOENT) {
|
||||
adev->ip_block_status[i].valid = false;
|
||||
adev->ip_blocks[i].status.valid = false;
|
||||
} else if (r) {
|
||||
DRM_ERROR("early_init of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
|
||||
DRM_ERROR("early_init of IP block <%s> failed %d\n",
|
||||
adev->ip_blocks[i].version->funcs->name, r);
|
||||
return r;
|
||||
} else {
|
||||
adev->ip_block_status[i].valid = true;
|
||||
adev->ip_blocks[i].status.valid = true;
|
||||
}
|
||||
} else {
|
||||
adev->ip_block_status[i].valid = true;
|
||||
adev->ip_blocks[i].status.valid = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1355,22 +1367,23 @@ static int amdgpu_init(struct amdgpu_device *adev)
|
|||
int i, r;
|
||||
|
||||
for (i = 0; i < adev->num_ip_blocks; i++) {
|
||||
if (!adev->ip_block_status[i].valid)
|
||||
if (!adev->ip_blocks[i].status.valid)
|
||||
continue;
|
||||
r = adev->ip_blocks[i].funcs->sw_init((void *)adev);
|
||||
r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
|
||||
if (r) {
|
||||
DRM_ERROR("sw_init of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
|
||||
DRM_ERROR("sw_init of IP block <%s> failed %d\n",
|
||||
adev->ip_blocks[i].version->funcs->name, r);
|
||||
return r;
|
||||
}
|
||||
adev->ip_block_status[i].sw = true;
|
||||
adev->ip_blocks[i].status.sw = true;
|
||||
/* need to do gmc hw init early so we can allocate gpu mem */
|
||||
if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) {
|
||||
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
|
||||
r = amdgpu_vram_scratch_init(adev);
|
||||
if (r) {
|
||||
DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
|
||||
return r;
|
||||
}
|
||||
r = adev->ip_blocks[i].funcs->hw_init((void *)adev);
|
||||
r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
|
||||
if (r) {
|
||||
DRM_ERROR("hw_init %d failed %d\n", i, r);
|
||||
return r;
|
||||
|
@ -1380,22 +1393,23 @@ static int amdgpu_init(struct amdgpu_device *adev)
|
|||
DRM_ERROR("amdgpu_wb_init failed %d\n", r);
|
||||
return r;
|
||||
}
|
||||
adev->ip_block_status[i].hw = true;
|
||||
adev->ip_blocks[i].status.hw = true;
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < adev->num_ip_blocks; i++) {
|
||||
if (!adev->ip_block_status[i].sw)
|
||||
if (!adev->ip_blocks[i].status.sw)
|
||||
continue;
|
||||
/* gmc hw init is done early */
|
||||
if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC)
|
||||
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC)
|
||||
continue;
|
||||
r = adev->ip_blocks[i].funcs->hw_init((void *)adev);
|
||||
r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
|
||||
if (r) {
|
||||
DRM_ERROR("hw_init of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
|
||||
DRM_ERROR("hw_init of IP block <%s> failed %d\n",
|
||||
adev->ip_blocks[i].version->funcs->name, r);
|
||||
return r;
|
||||
}
|
||||
adev->ip_block_status[i].hw = true;
|
||||
adev->ip_blocks[i].status.hw = true;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -1406,25 +1420,26 @@ static int amdgpu_late_init(struct amdgpu_device *adev)
|
|||
int i = 0, r;
|
||||
|
||||
for (i = 0; i < adev->num_ip_blocks; i++) {
|
||||
if (!adev->ip_block_status[i].valid)
|
||||
if (!adev->ip_blocks[i].status.valid)
|
||||
continue;
|
||||
if (adev->ip_blocks[i].funcs->late_init) {
|
||||
r = adev->ip_blocks[i].funcs->late_init((void *)adev);
|
||||
if (adev->ip_blocks[i].version->funcs->late_init) {
|
||||
r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
|
||||
if (r) {
|
||||
DRM_ERROR("late_init of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
|
||||
DRM_ERROR("late_init of IP block <%s> failed %d\n",
|
||||
adev->ip_blocks[i].version->funcs->name, r);
|
||||
return r;
|
||||
}
|
||||
adev->ip_block_status[i].late_initialized = true;
|
||||
adev->ip_blocks[i].status.late_initialized = true;
|
||||
}
|
||||
/* skip CG for VCE/UVD, it's handled specially */
|
||||
if (adev->ip_blocks[i].type != AMD_IP_BLOCK_TYPE_UVD &&
|
||||
adev->ip_blocks[i].type != AMD_IP_BLOCK_TYPE_VCE) {
|
||||
if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
|
||||
adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) {
|
||||
/* enable clockgating to save power */
|
||||
r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
|
||||
r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
|
||||
AMD_CG_STATE_GATE);
|
||||
if (r) {
|
||||
DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
|
||||
adev->ip_blocks[i].funcs->name, r);
|
||||
adev->ip_blocks[i].version->funcs->name, r);
|
||||
return r;
|
||||
}
|
||||
}
|
||||
|
@ -1439,68 +1454,71 @@ static int amdgpu_fini(struct amdgpu_device *adev)
|
|||
|
||||
/* need to disable SMC first */
|
||||
for (i = 0; i < adev->num_ip_blocks; i++) {
|
||||
if (!adev->ip_block_status[i].hw)
|
||||
if (!adev->ip_blocks[i].status.hw)
|
||||
continue;
|
||||
if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_SMC) {
|
||||
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
|
||||
/* ungate blocks before hw fini so that we can shutdown the blocks safely */
|
||||
r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
|
||||
r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
|
||||
AMD_CG_STATE_UNGATE);
|
||||
if (r) {
|
||||
DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
|
||||
adev->ip_blocks[i].funcs->name, r);
|
||||
adev->ip_blocks[i].version->funcs->name, r);
|
||||
return r;
|
||||
}
|
||||
r = adev->ip_blocks[i].funcs->hw_fini((void *)adev);
|
||||
r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
|
||||
/* XXX handle errors */
|
||||
if (r) {
|
||||
DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
|
||||
adev->ip_blocks[i].funcs->name, r);
|
||||
adev->ip_blocks[i].version->funcs->name, r);
|
||||
}
|
||||
adev->ip_block_status[i].hw = false;
|
||||
adev->ip_blocks[i].status.hw = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
|
||||
if (!adev->ip_block_status[i].hw)
|
||||
if (!adev->ip_blocks[i].status.hw)
|
||||
continue;
|
||||
if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) {
|
||||
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
|
||||
amdgpu_wb_fini(adev);
|
||||
amdgpu_vram_scratch_fini(adev);
|
||||
}
|
||||
/* ungate blocks before hw fini so that we can shutdown the blocks safely */
|
||||
r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
|
||||
r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
|
||||
AMD_CG_STATE_UNGATE);
|
||||
if (r) {
|
||||
DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
|
||||
DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
|
||||
adev->ip_blocks[i].version->funcs->name, r);
|
||||
return r;
|
||||
}
|
||||
r = adev->ip_blocks[i].funcs->hw_fini((void *)adev);
|
||||
r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
|
||||
/* XXX handle errors */
|
||||
if (r) {
|
||||
DRM_DEBUG("hw_fini of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
|
||||
DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
|
||||
adev->ip_blocks[i].version->funcs->name, r);
|
||||
}
|
||||
adev->ip_block_status[i].hw = false;
|
||||
adev->ip_blocks[i].status.hw = false;
|
||||
}
|
||||
|
||||
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
|
||||
if (!adev->ip_block_status[i].sw)
|
||||
if (!adev->ip_blocks[i].status.sw)
|
||||
continue;
|
||||
r = adev->ip_blocks[i].funcs->sw_fini((void *)adev);
|
||||
r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
|
||||
/* XXX handle errors */
|
||||
if (r) {
|
||||
DRM_DEBUG("sw_fini of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
|
||||
DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
|
||||
adev->ip_blocks[i].version->funcs->name, r);
|
||||
}
|
||||
adev->ip_block_status[i].sw = false;
|
||||
adev->ip_block_status[i].valid = false;
|
||||
adev->ip_blocks[i].status.sw = false;
|
||||
adev->ip_blocks[i].status.valid = false;
|
||||
}
|
||||
|
||||
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
|
||||
if (!adev->ip_block_status[i].late_initialized)
|
||||
if (!adev->ip_blocks[i].status.late_initialized)
|
||||
continue;
|
||||
if (adev->ip_blocks[i].funcs->late_fini)
|
||||
adev->ip_blocks[i].funcs->late_fini((void *)adev);
|
||||
adev->ip_block_status[i].late_initialized = false;
|
||||
if (adev->ip_blocks[i].version->funcs->late_fini)
|
||||
adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
|
||||
adev->ip_blocks[i].status.late_initialized = false;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -1518,21 +1536,23 @@ static int amdgpu_suspend(struct amdgpu_device *adev)
|
|||
}
|
||||
|
||||
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
|
||||
if (!adev->ip_block_status[i].valid)
|
||||
if (!adev->ip_blocks[i].status.valid)
|
||||
continue;
|
||||
/* ungate blocks so that suspend can properly shut them down */
|
||||
if (i != AMD_IP_BLOCK_TYPE_SMC) {
|
||||
r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
|
||||
r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
|
||||
AMD_CG_STATE_UNGATE);
|
||||
if (r) {
|
||||
DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
|
||||
DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
|
||||
adev->ip_blocks[i].version->funcs->name, r);
|
||||
}
|
||||
}
|
||||
/* XXX handle errors */
|
||||
r = adev->ip_blocks[i].funcs->suspend(adev);
|
||||
r = adev->ip_blocks[i].version->funcs->suspend(adev);
|
||||
/* XXX handle errors */
|
||||
if (r) {
|
||||
DRM_ERROR("suspend of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
|
||||
DRM_ERROR("suspend of IP block <%s> failed %d\n",
|
||||
adev->ip_blocks[i].version->funcs->name, r);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1544,11 +1564,12 @@ static int amdgpu_resume(struct amdgpu_device *adev)
|
|||
int i, r;
|
||||
|
||||
for (i = 0; i < adev->num_ip_blocks; i++) {
|
||||
if (!adev->ip_block_status[i].valid)
|
||||
if (!adev->ip_blocks[i].status.valid)
|
||||
continue;
|
||||
r = adev->ip_blocks[i].funcs->resume(adev);
|
||||
r = adev->ip_blocks[i].version->funcs->resume(adev);
|
||||
if (r) {
|
||||
DRM_ERROR("resume of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
|
||||
DRM_ERROR("resume of IP block <%s> failed %d\n",
|
||||
adev->ip_blocks[i].version->funcs->name, r);
|
||||
return r;
|
||||
}
|
||||
}
|
||||
|
@ -1859,8 +1880,6 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
|
|||
amdgpu_fence_driver_fini(adev);
|
||||
amdgpu_fbdev_fini(adev);
|
||||
r = amdgpu_fini(adev);
|
||||
kfree(adev->ip_block_status);
|
||||
adev->ip_block_status = NULL;
|
||||
adev->accel_working = false;
|
||||
/* free i2c buses */
|
||||
amdgpu_i2c_fini(adev);
|
||||
|
@ -1956,7 +1975,10 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
|
|||
|
||||
r = amdgpu_suspend(adev);
|
||||
|
||||
/* evict remaining vram memory */
|
||||
/* evict remaining vram memory
|
||||
* This second call to evict vram is to evict the gart page table
|
||||
* using the CPU.
|
||||
*/
|
||||
amdgpu_bo_evict_vram(adev);
|
||||
|
||||
pci_save_state(dev->pdev);
|
||||
|
@ -2096,13 +2118,13 @@ static bool amdgpu_check_soft_reset(struct amdgpu_device *adev)
|
|||
bool asic_hang = false;
|
||||
|
||||
for (i = 0; i < adev->num_ip_blocks; i++) {
|
||||
if (!adev->ip_block_status[i].valid)
|
||||
if (!adev->ip_blocks[i].status.valid)
|
||||
continue;
|
||||
if (adev->ip_blocks[i].funcs->check_soft_reset)
|
||||
adev->ip_block_status[i].hang =
|
||||
adev->ip_blocks[i].funcs->check_soft_reset(adev);
|
||||
if (adev->ip_block_status[i].hang) {
|
||||
DRM_INFO("IP block:%d is hang!\n", i);
|
||||
if (adev->ip_blocks[i].version->funcs->check_soft_reset)
|
||||
adev->ip_blocks[i].status.hang =
|
||||
adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
|
||||
if (adev->ip_blocks[i].status.hang) {
|
||||
DRM_INFO("IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
|
||||
asic_hang = true;
|
||||
}
|
||||
}
|
||||
|
@ -2114,11 +2136,11 @@ static int amdgpu_pre_soft_reset(struct amdgpu_device *adev)
|
|||
int i, r = 0;
|
||||
|
||||
for (i = 0; i < adev->num_ip_blocks; i++) {
|
||||
if (!adev->ip_block_status[i].valid)
|
||||
if (!adev->ip_blocks[i].status.valid)
|
||||
continue;
|
||||
if (adev->ip_block_status[i].hang &&
|
||||
adev->ip_blocks[i].funcs->pre_soft_reset) {
|
||||
r = adev->ip_blocks[i].funcs->pre_soft_reset(adev);
|
||||
if (adev->ip_blocks[i].status.hang &&
|
||||
adev->ip_blocks[i].version->funcs->pre_soft_reset) {
|
||||
r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
@ -2132,13 +2154,13 @@ static bool amdgpu_need_full_reset(struct amdgpu_device *adev)
|
|||
int i;
|
||||
|
||||
for (i = 0; i < adev->num_ip_blocks; i++) {
|
||||
if (!adev->ip_block_status[i].valid)
|
||||
if (!adev->ip_blocks[i].status.valid)
|
||||
continue;
|
||||
if ((adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) ||
|
||||
(adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_SMC) ||
|
||||
(adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_ACP) ||
|
||||
(adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_DCE)) {
|
||||
if (adev->ip_block_status[i].hang) {
|
||||
if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
|
||||
(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
|
||||
(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
|
||||
(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)) {
|
||||
if (adev->ip_blocks[i].status.hang) {
|
||||
DRM_INFO("Some block need full reset!\n");
|
||||
return true;
|
||||
}
|
||||
|
@ -2152,11 +2174,11 @@ static int amdgpu_soft_reset(struct amdgpu_device *adev)
|
|||
int i, r = 0;
|
||||
|
||||
for (i = 0; i < adev->num_ip_blocks; i++) {
|
||||
if (!adev->ip_block_status[i].valid)
|
||||
if (!adev->ip_blocks[i].status.valid)
|
||||
continue;
|
||||
if (adev->ip_block_status[i].hang &&
|
||||
adev->ip_blocks[i].funcs->soft_reset) {
|
||||
r = adev->ip_blocks[i].funcs->soft_reset(adev);
|
||||
if (adev->ip_blocks[i].status.hang &&
|
||||
adev->ip_blocks[i].version->funcs->soft_reset) {
|
||||
r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
@ -2170,11 +2192,11 @@ static int amdgpu_post_soft_reset(struct amdgpu_device *adev)
|
|||
int i, r = 0;
|
||||
|
||||
for (i = 0; i < adev->num_ip_blocks; i++) {
|
||||
if (!adev->ip_block_status[i].valid)
|
||||
if (!adev->ip_blocks[i].status.valid)
|
||||
continue;
|
||||
if (adev->ip_block_status[i].hang &&
|
||||
adev->ip_blocks[i].funcs->post_soft_reset)
|
||||
r = adev->ip_blocks[i].funcs->post_soft_reset(adev);
|
||||
if (adev->ip_blocks[i].status.hang &&
|
||||
adev->ip_blocks[i].version->funcs->post_soft_reset)
|
||||
r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
@ -2531,6 +2553,13 @@ static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
|
|||
se_bank = (*pos >> 24) & 0x3FF;
|
||||
sh_bank = (*pos >> 34) & 0x3FF;
|
||||
instance_bank = (*pos >> 44) & 0x3FF;
|
||||
|
||||
if (se_bank == 0x3FF)
|
||||
se_bank = 0xFFFFFFFF;
|
||||
if (sh_bank == 0x3FF)
|
||||
sh_bank = 0xFFFFFFFF;
|
||||
if (instance_bank == 0x3FF)
|
||||
instance_bank = 0xFFFFFFFF;
|
||||
use_bank = 1;
|
||||
} else {
|
||||
use_bank = 0;
|
||||
|
@ -2539,8 +2568,8 @@ static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
|
|||
*pos &= 0x3FFFF;
|
||||
|
||||
if (use_bank) {
|
||||
if (sh_bank >= adev->gfx.config.max_sh_per_se ||
|
||||
se_bank >= adev->gfx.config.max_shader_engines)
|
||||
if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
|
||||
(se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
|
||||
return -EINVAL;
|
||||
mutex_lock(&adev->grbm_idx_mutex);
|
||||
amdgpu_gfx_select_se_sh(adev, se_bank,
|
||||
|
@ -2587,10 +2616,45 @@ static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
|
|||
struct amdgpu_device *adev = f->f_inode->i_private;
|
||||
ssize_t result = 0;
|
||||
int r;
|
||||
bool pm_pg_lock, use_bank;
|
||||
unsigned instance_bank, sh_bank, se_bank;
|
||||
|
||||
if (size & 0x3 || *pos & 0x3)
|
||||
return -EINVAL;
|
||||
|
||||
/* are we reading registers for which a PG lock is necessary? */
|
||||
pm_pg_lock = (*pos >> 23) & 1;
|
||||
|
||||
if (*pos & (1ULL << 62)) {
|
||||
se_bank = (*pos >> 24) & 0x3FF;
|
||||
sh_bank = (*pos >> 34) & 0x3FF;
|
||||
instance_bank = (*pos >> 44) & 0x3FF;
|
||||
|
||||
if (se_bank == 0x3FF)
|
||||
se_bank = 0xFFFFFFFF;
|
||||
if (sh_bank == 0x3FF)
|
||||
sh_bank = 0xFFFFFFFF;
|
||||
if (instance_bank == 0x3FF)
|
||||
instance_bank = 0xFFFFFFFF;
|
||||
use_bank = 1;
|
||||
} else {
|
||||
use_bank = 0;
|
||||
}
|
||||
|
||||
*pos &= 0x3FFFF;
|
||||
|
||||
if (use_bank) {
|
||||
if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
|
||||
(se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
|
||||
return -EINVAL;
|
||||
mutex_lock(&adev->grbm_idx_mutex);
|
||||
amdgpu_gfx_select_se_sh(adev, se_bank,
|
||||
sh_bank, instance_bank);
|
||||
}
|
||||
|
||||
if (pm_pg_lock)
|
||||
mutex_lock(&adev->pm.mutex);
|
||||
|
||||
while (size) {
|
||||
uint32_t value;
|
||||
|
||||
|
@ -2609,6 +2673,14 @@ static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
|
|||
size -= 4;
|
||||
}
|
||||
|
||||
if (use_bank) {
|
||||
amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
|
||||
mutex_unlock(&adev->grbm_idx_mutex);
|
||||
}
|
||||
|
||||
if (pm_pg_lock)
|
||||
mutex_unlock(&adev->pm.mutex);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
|
@ -2871,6 +2943,56 @@ static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
|
|||
return !r ? 4 : r;
|
||||
}
|
||||
|
||||
static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
|
||||
size_t size, loff_t *pos)
|
||||
{
|
||||
struct amdgpu_device *adev = f->f_inode->i_private;
|
||||
int r, x;
|
||||
ssize_t result=0;
|
||||
uint32_t offset, se, sh, cu, wave, simd, data[32];
|
||||
|
||||
if (size & 3 || *pos & 3)
|
||||
return -EINVAL;
|
||||
|
||||
/* decode offset */
|
||||
offset = (*pos & 0x7F);
|
||||
se = ((*pos >> 7) & 0xFF);
|
||||
sh = ((*pos >> 15) & 0xFF);
|
||||
cu = ((*pos >> 23) & 0xFF);
|
||||
wave = ((*pos >> 31) & 0xFF);
|
||||
simd = ((*pos >> 37) & 0xFF);
|
||||
|
||||
/* switch to the specific se/sh/cu */
|
||||
mutex_lock(&adev->grbm_idx_mutex);
|
||||
amdgpu_gfx_select_se_sh(adev, se, sh, cu);
|
||||
|
||||
x = 0;
|
||||
if (adev->gfx.funcs->read_wave_data)
|
||||
adev->gfx.funcs->read_wave_data(adev, simd, wave, data, &x);
|
||||
|
||||
amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
|
||||
mutex_unlock(&adev->grbm_idx_mutex);
|
||||
|
||||
if (!x)
|
||||
return -EINVAL;
|
||||
|
||||
while (size && (offset < x * 4)) {
|
||||
uint32_t value;
|
||||
|
||||
value = data[offset >> 2];
|
||||
r = put_user(value, (uint32_t *)buf);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
result += 4;
|
||||
buf += 4;
|
||||
offset += 4;
|
||||
size -= 4;
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
static const struct file_operations amdgpu_debugfs_regs_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.read = amdgpu_debugfs_regs_read,
|
||||
|
@ -2908,6 +3030,12 @@ static const struct file_operations amdgpu_debugfs_sensors_fops = {
|
|||
.llseek = default_llseek
|
||||
};
|
||||
|
||||
static const struct file_operations amdgpu_debugfs_wave_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.read = amdgpu_debugfs_wave_read,
|
||||
.llseek = default_llseek
|
||||
};
|
||||
|
||||
static const struct file_operations *debugfs_regs[] = {
|
||||
&amdgpu_debugfs_regs_fops,
|
||||
&amdgpu_debugfs_regs_didt_fops,
|
||||
|
@ -2915,6 +3043,7 @@ static const struct file_operations *debugfs_regs[] = {
|
|||
&amdgpu_debugfs_regs_smc_fops,
|
||||
&amdgpu_debugfs_gca_config_fops,
|
||||
&amdgpu_debugfs_sensors_fops,
|
||||
&amdgpu_debugfs_wave_fops,
|
||||
};
|
||||
|
||||
static const char *debugfs_regs_names[] = {
|
||||
|
@ -2924,6 +3053,7 @@ static const char *debugfs_regs_names[] = {
|
|||
"amdgpu_regs_smc",
|
||||
"amdgpu_gca_config",
|
||||
"amdgpu_sensors",
|
||||
"amdgpu_wave",
|
||||
};
|
||||
|
||||
static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
|
||||
|
|
|
@ -68,9 +68,9 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
|
|||
struct amdgpu_flip_work *work =
|
||||
container_of(delayed_work, struct amdgpu_flip_work, flip_work);
|
||||
struct amdgpu_device *adev = work->adev;
|
||||
struct amdgpu_crtc *amdgpuCrtc = adev->mode_info.crtcs[work->crtc_id];
|
||||
struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[work->crtc_id];
|
||||
|
||||
struct drm_crtc *crtc = &amdgpuCrtc->base;
|
||||
struct drm_crtc *crtc = &amdgpu_crtc->base;
|
||||
unsigned long flags;
|
||||
unsigned i;
|
||||
int vpos, hpos;
|
||||
|
@ -85,14 +85,14 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
|
|||
/* Wait until we're out of the vertical blank period before the one
|
||||
* targeted by the flip
|
||||
*/
|
||||
if (amdgpuCrtc->enabled &&
|
||||
if (amdgpu_crtc->enabled &&
|
||||
(amdgpu_get_crtc_scanoutpos(adev->ddev, work->crtc_id, 0,
|
||||
&vpos, &hpos, NULL, NULL,
|
||||
&crtc->hwmode)
|
||||
& (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
|
||||
(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
|
||||
(int)(work->target_vblank -
|
||||
amdgpu_get_vblank_counter_kms(adev->ddev, amdgpuCrtc->crtc_id)) > 0) {
|
||||
amdgpu_get_vblank_counter_kms(adev->ddev, amdgpu_crtc->crtc_id)) > 0) {
|
||||
schedule_delayed_work(&work->flip_work, usecs_to_jiffies(1000));
|
||||
return;
|
||||
}
|
||||
|
@ -104,12 +104,12 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
|
|||
adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base, work->async);
|
||||
|
||||
/* Set the flip status */
|
||||
amdgpuCrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
|
||||
amdgpu_crtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
|
||||
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
|
||||
|
||||
|
||||
DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_SUBMITTED, work: %p,\n",
|
||||
amdgpuCrtc->crtc_id, amdgpuCrtc, work);
|
||||
amdgpu_crtc->crtc_id, amdgpu_crtc, work);
|
||||
|
||||
}
|
||||
|
||||
|
|
|
@ -553,9 +553,10 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
|
|||
entry = (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *)
|
||||
((u8 *)entry + sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record));
|
||||
}
|
||||
for (i = 0; i < states->numEntries; i++) {
|
||||
if (i >= AMDGPU_MAX_VCE_LEVELS)
|
||||
break;
|
||||
adev->pm.dpm.num_of_vce_states =
|
||||
states->numEntries > AMD_MAX_VCE_LEVELS ?
|
||||
AMD_MAX_VCE_LEVELS : states->numEntries;
|
||||
for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) {
|
||||
vce_clk = (VCEClockInfo *)
|
||||
((u8 *)&array->entries[0] +
|
||||
(state_entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
|
||||
|
@ -955,3 +956,12 @@ u8 amdgpu_encode_pci_lane_width(u32 lanes)
|
|||
|
||||
return encoded_lanes[lanes];
|
||||
}
|
||||
|
||||
struct amd_vce_state*
|
||||
amdgpu_get_vce_clock_state(struct amdgpu_device *adev, unsigned idx)
|
||||
{
|
||||
if (idx < adev->pm.dpm.num_of_vce_states)
|
||||
return &adev->pm.dpm.vce_states[idx];
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
|
|
@ -23,6 +23,446 @@
|
|||
#ifndef __AMDGPU_DPM_H__
|
||||
#define __AMDGPU_DPM_H__
|
||||
|
||||
enum amdgpu_int_thermal_type {
|
||||
THERMAL_TYPE_NONE,
|
||||
THERMAL_TYPE_EXTERNAL,
|
||||
THERMAL_TYPE_EXTERNAL_GPIO,
|
||||
THERMAL_TYPE_RV6XX,
|
||||
THERMAL_TYPE_RV770,
|
||||
THERMAL_TYPE_ADT7473_WITH_INTERNAL,
|
||||
THERMAL_TYPE_EVERGREEN,
|
||||
THERMAL_TYPE_SUMO,
|
||||
THERMAL_TYPE_NI,
|
||||
THERMAL_TYPE_SI,
|
||||
THERMAL_TYPE_EMC2103_WITH_INTERNAL,
|
||||
THERMAL_TYPE_CI,
|
||||
THERMAL_TYPE_KV,
|
||||
};
|
||||
|
||||
enum amdgpu_dpm_auto_throttle_src {
|
||||
AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL,
|
||||
AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL
|
||||
};
|
||||
|
||||
enum amdgpu_dpm_event_src {
|
||||
AMDGPU_DPM_EVENT_SRC_ANALOG = 0,
|
||||
AMDGPU_DPM_EVENT_SRC_EXTERNAL = 1,
|
||||
AMDGPU_DPM_EVENT_SRC_DIGITAL = 2,
|
||||
AMDGPU_DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3,
|
||||
AMDGPU_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL = 4
|
||||
};
|
||||
|
||||
struct amdgpu_ps {
|
||||
u32 caps; /* vbios flags */
|
||||
u32 class; /* vbios flags */
|
||||
u32 class2; /* vbios flags */
|
||||
/* UVD clocks */
|
||||
u32 vclk;
|
||||
u32 dclk;
|
||||
/* VCE clocks */
|
||||
u32 evclk;
|
||||
u32 ecclk;
|
||||
bool vce_active;
|
||||
enum amd_vce_level vce_level;
|
||||
/* asic priv */
|
||||
void *ps_priv;
|
||||
};
|
||||
|
||||
struct amdgpu_dpm_thermal {
|
||||
/* thermal interrupt work */
|
||||
struct work_struct work;
|
||||
/* low temperature threshold */
|
||||
int min_temp;
|
||||
/* high temperature threshold */
|
||||
int max_temp;
|
||||
/* was last interrupt low to high or high to low */
|
||||
bool high_to_low;
|
||||
/* interrupt source */
|
||||
struct amdgpu_irq_src irq;
|
||||
};
|
||||
|
||||
enum amdgpu_clk_action
|
||||
{
|
||||
AMDGPU_SCLK_UP = 1,
|
||||
AMDGPU_SCLK_DOWN
|
||||
};
|
||||
|
||||
struct amdgpu_blacklist_clocks
|
||||
{
|
||||
u32 sclk;
|
||||
u32 mclk;
|
||||
enum amdgpu_clk_action action;
|
||||
};
|
||||
|
||||
struct amdgpu_clock_and_voltage_limits {
|
||||
u32 sclk;
|
||||
u32 mclk;
|
||||
u16 vddc;
|
||||
u16 vddci;
|
||||
};
|
||||
|
||||
struct amdgpu_clock_array {
|
||||
u32 count;
|
||||
u32 *values;
|
||||
};
|
||||
|
||||
struct amdgpu_clock_voltage_dependency_entry {
|
||||
u32 clk;
|
||||
u16 v;
|
||||
};
|
||||
|
||||
struct amdgpu_clock_voltage_dependency_table {
|
||||
u32 count;
|
||||
struct amdgpu_clock_voltage_dependency_entry *entries;
|
||||
};
|
||||
|
||||
union amdgpu_cac_leakage_entry {
|
||||
struct {
|
||||
u16 vddc;
|
||||
u32 leakage;
|
||||
};
|
||||
struct {
|
||||
u16 vddc1;
|
||||
u16 vddc2;
|
||||
u16 vddc3;
|
||||
};
|
||||
};
|
||||
|
||||
struct amdgpu_cac_leakage_table {
|
||||
u32 count;
|
||||
union amdgpu_cac_leakage_entry *entries;
|
||||
};
|
||||
|
||||
struct amdgpu_phase_shedding_limits_entry {
|
||||
u16 voltage;
|
||||
u32 sclk;
|
||||
u32 mclk;
|
||||
};
|
||||
|
||||
struct amdgpu_phase_shedding_limits_table {
|
||||
u32 count;
|
||||
struct amdgpu_phase_shedding_limits_entry *entries;
|
||||
};
|
||||
|
||||
struct amdgpu_uvd_clock_voltage_dependency_entry {
|
||||
u32 vclk;
|
||||
u32 dclk;
|
||||
u16 v;
|
||||
};
|
||||
|
||||
struct amdgpu_uvd_clock_voltage_dependency_table {
|
||||
u8 count;
|
||||
struct amdgpu_uvd_clock_voltage_dependency_entry *entries;
|
||||
};
|
||||
|
||||
struct amdgpu_vce_clock_voltage_dependency_entry {
|
||||
u32 ecclk;
|
||||
u32 evclk;
|
||||
u16 v;
|
||||
};
|
||||
|
||||
struct amdgpu_vce_clock_voltage_dependency_table {
|
||||
u8 count;
|
||||
struct amdgpu_vce_clock_voltage_dependency_entry *entries;
|
||||
};
|
||||
|
||||
struct amdgpu_ppm_table {
|
||||
u8 ppm_design;
|
||||
u16 cpu_core_number;
|
||||
u32 platform_tdp;
|
||||
u32 small_ac_platform_tdp;
|
||||
u32 platform_tdc;
|
||||
u32 small_ac_platform_tdc;
|
||||
u32 apu_tdp;
|
||||
u32 dgpu_tdp;
|
||||
u32 dgpu_ulv_power;
|
||||
u32 tj_max;
|
||||
};
|
||||
|
||||
struct amdgpu_cac_tdp_table {
|
||||
u16 tdp;
|
||||
u16 configurable_tdp;
|
||||
u16 tdc;
|
||||
u16 battery_power_limit;
|
||||
u16 small_power_limit;
|
||||
u16 low_cac_leakage;
|
||||
u16 high_cac_leakage;
|
||||
u16 maximum_power_delivery_limit;
|
||||
};
|
||||
|
||||
struct amdgpu_dpm_dynamic_state {
|
||||
struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_sclk;
|
||||
struct amdgpu_clock_voltage_dependency_table vddci_dependency_on_mclk;
|
||||
struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_mclk;
|
||||
struct amdgpu_clock_voltage_dependency_table mvdd_dependency_on_mclk;
|
||||
struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_dispclk;
|
||||
struct amdgpu_uvd_clock_voltage_dependency_table uvd_clock_voltage_dependency_table;
|
||||
struct amdgpu_vce_clock_voltage_dependency_table vce_clock_voltage_dependency_table;
|
||||
struct amdgpu_clock_voltage_dependency_table samu_clock_voltage_dependency_table;
|
||||
struct amdgpu_clock_voltage_dependency_table acp_clock_voltage_dependency_table;
|
||||
struct amdgpu_clock_voltage_dependency_table vddgfx_dependency_on_sclk;
|
||||
struct amdgpu_clock_array valid_sclk_values;
|
||||
struct amdgpu_clock_array valid_mclk_values;
|
||||
struct amdgpu_clock_and_voltage_limits max_clock_voltage_on_dc;
|
||||
struct amdgpu_clock_and_voltage_limits max_clock_voltage_on_ac;
|
||||
u32 mclk_sclk_ratio;
|
||||
u32 sclk_mclk_delta;
|
||||
u16 vddc_vddci_delta;
|
||||
u16 min_vddc_for_pcie_gen2;
|
||||
struct amdgpu_cac_leakage_table cac_leakage_table;
|
||||
struct amdgpu_phase_shedding_limits_table phase_shedding_limits_table;
|
||||
struct amdgpu_ppm_table *ppm_table;
|
||||
struct amdgpu_cac_tdp_table *cac_tdp_table;
|
||||
};
|
||||
|
||||
struct amdgpu_dpm_fan {
|
||||
u16 t_min;
|
||||
u16 t_med;
|
||||
u16 t_high;
|
||||
u16 pwm_min;
|
||||
u16 pwm_med;
|
||||
u16 pwm_high;
|
||||
u8 t_hyst;
|
||||
u32 cycle_delay;
|
||||
u16 t_max;
|
||||
u8 control_mode;
|
||||
u16 default_max_fan_pwm;
|
||||
u16 default_fan_output_sensitivity;
|
||||
u16 fan_output_sensitivity;
|
||||
bool ucode_fan_control;
|
||||
};
|
||||
|
||||
enum amdgpu_pcie_gen {
|
||||
AMDGPU_PCIE_GEN1 = 0,
|
||||
AMDGPU_PCIE_GEN2 = 1,
|
||||
AMDGPU_PCIE_GEN3 = 2,
|
||||
AMDGPU_PCIE_GEN_INVALID = 0xffff
|
||||
};
|
||||
|
||||
enum amdgpu_dpm_forced_level {
|
||||
AMDGPU_DPM_FORCED_LEVEL_AUTO = 0,
|
||||
AMDGPU_DPM_FORCED_LEVEL_LOW = 1,
|
||||
AMDGPU_DPM_FORCED_LEVEL_HIGH = 2,
|
||||
AMDGPU_DPM_FORCED_LEVEL_MANUAL = 3,
|
||||
};
|
||||
|
||||
struct amdgpu_dpm_funcs {
|
||||
int (*get_temperature)(struct amdgpu_device *adev);
|
||||
int (*pre_set_power_state)(struct amdgpu_device *adev);
|
||||
int (*set_power_state)(struct amdgpu_device *adev);
|
||||
void (*post_set_power_state)(struct amdgpu_device *adev);
|
||||
void (*display_configuration_changed)(struct amdgpu_device *adev);
|
||||
u32 (*get_sclk)(struct amdgpu_device *adev, bool low);
|
||||
u32 (*get_mclk)(struct amdgpu_device *adev, bool low);
|
||||
void (*print_power_state)(struct amdgpu_device *adev, struct amdgpu_ps *ps);
|
||||
void (*debugfs_print_current_performance_level)(struct amdgpu_device *adev, struct seq_file *m);
|
||||
int (*force_performance_level)(struct amdgpu_device *adev, enum amdgpu_dpm_forced_level level);
|
||||
bool (*vblank_too_short)(struct amdgpu_device *adev);
|
||||
void (*powergate_uvd)(struct amdgpu_device *adev, bool gate);
|
||||
void (*powergate_vce)(struct amdgpu_device *adev, bool gate);
|
||||
void (*enable_bapm)(struct amdgpu_device *adev, bool enable);
|
||||
void (*set_fan_control_mode)(struct amdgpu_device *adev, u32 mode);
|
||||
u32 (*get_fan_control_mode)(struct amdgpu_device *adev);
|
||||
int (*set_fan_speed_percent)(struct amdgpu_device *adev, u32 speed);
|
||||
int (*get_fan_speed_percent)(struct amdgpu_device *adev, u32 *speed);
|
||||
int (*force_clock_level)(struct amdgpu_device *adev, enum pp_clock_type type, uint32_t mask);
|
||||
int (*print_clock_levels)(struct amdgpu_device *adev, enum pp_clock_type type, char *buf);
|
||||
int (*get_sclk_od)(struct amdgpu_device *adev);
|
||||
int (*set_sclk_od)(struct amdgpu_device *adev, uint32_t value);
|
||||
int (*get_mclk_od)(struct amdgpu_device *adev);
|
||||
int (*set_mclk_od)(struct amdgpu_device *adev, uint32_t value);
|
||||
int (*check_state_equal)(struct amdgpu_device *adev,
|
||||
struct amdgpu_ps *cps,
|
||||
struct amdgpu_ps *rps,
|
||||
bool *equal);
|
||||
|
||||
struct amd_vce_state* (*get_vce_clock_state)(struct amdgpu_device *adev, unsigned idx);
|
||||
};
|
||||
|
||||
#define amdgpu_dpm_pre_set_power_state(adev) (adev)->pm.funcs->pre_set_power_state((adev))
|
||||
#define amdgpu_dpm_set_power_state(adev) (adev)->pm.funcs->set_power_state((adev))
|
||||
#define amdgpu_dpm_post_set_power_state(adev) (adev)->pm.funcs->post_set_power_state((adev))
|
||||
#define amdgpu_dpm_display_configuration_changed(adev) (adev)->pm.funcs->display_configuration_changed((adev))
|
||||
#define amdgpu_dpm_print_power_state(adev, ps) (adev)->pm.funcs->print_power_state((adev), (ps))
|
||||
#define amdgpu_dpm_vblank_too_short(adev) (adev)->pm.funcs->vblank_too_short((adev))
|
||||
#define amdgpu_dpm_enable_bapm(adev, e) (adev)->pm.funcs->enable_bapm((adev), (e))
|
||||
|
||||
#define amdgpu_dpm_read_sensor(adev, idx, value) \
|
||||
((adev)->pp_enabled ? \
|
||||
(adev)->powerplay.pp_funcs->read_sensor(adev->powerplay.pp_handle, (idx), (value)) : \
|
||||
-EINVAL)
|
||||
|
||||
#define amdgpu_dpm_get_temperature(adev) \
|
||||
((adev)->pp_enabled ? \
|
||||
(adev)->powerplay.pp_funcs->get_temperature((adev)->powerplay.pp_handle) : \
|
||||
(adev)->pm.funcs->get_temperature((adev)))
|
||||
|
||||
#define amdgpu_dpm_set_fan_control_mode(adev, m) \
|
||||
((adev)->pp_enabled ? \
|
||||
(adev)->powerplay.pp_funcs->set_fan_control_mode((adev)->powerplay.pp_handle, (m)) : \
|
||||
(adev)->pm.funcs->set_fan_control_mode((adev), (m)))
|
||||
|
||||
#define amdgpu_dpm_get_fan_control_mode(adev) \
|
||||
((adev)->pp_enabled ? \
|
||||
(adev)->powerplay.pp_funcs->get_fan_control_mode((adev)->powerplay.pp_handle) : \
|
||||
(adev)->pm.funcs->get_fan_control_mode((adev)))
|
||||
|
||||
#define amdgpu_dpm_set_fan_speed_percent(adev, s) \
|
||||
((adev)->pp_enabled ? \
|
||||
(adev)->powerplay.pp_funcs->set_fan_speed_percent((adev)->powerplay.pp_handle, (s)) : \
|
||||
(adev)->pm.funcs->set_fan_speed_percent((adev), (s)))
|
||||
|
||||
#define amdgpu_dpm_get_fan_speed_percent(adev, s) \
|
||||
((adev)->pp_enabled ? \
|
||||
(adev)->powerplay.pp_funcs->get_fan_speed_percent((adev)->powerplay.pp_handle, (s)) : \
|
||||
(adev)->pm.funcs->get_fan_speed_percent((adev), (s)))
|
||||
|
||||
#define amdgpu_dpm_get_sclk(adev, l) \
|
||||
((adev)->pp_enabled ? \
|
||||
(adev)->powerplay.pp_funcs->get_sclk((adev)->powerplay.pp_handle, (l)) : \
|
||||
(adev)->pm.funcs->get_sclk((adev), (l)))
|
||||
|
||||
#define amdgpu_dpm_get_mclk(adev, l) \
|
||||
((adev)->pp_enabled ? \
|
||||
(adev)->powerplay.pp_funcs->get_mclk((adev)->powerplay.pp_handle, (l)) : \
|
||||
(adev)->pm.funcs->get_mclk((adev), (l)))
|
||||
|
||||
|
||||
#define amdgpu_dpm_force_performance_level(adev, l) \
|
||||
((adev)->pp_enabled ? \
|
||||
(adev)->powerplay.pp_funcs->force_performance_level((adev)->powerplay.pp_handle, (l)) : \
|
||||
(adev)->pm.funcs->force_performance_level((adev), (l)))
|
||||
|
||||
#define amdgpu_dpm_powergate_uvd(adev, g) \
|
||||
((adev)->pp_enabled ? \
|
||||
(adev)->powerplay.pp_funcs->powergate_uvd((adev)->powerplay.pp_handle, (g)) : \
|
||||
(adev)->pm.funcs->powergate_uvd((adev), (g)))
|
||||
|
||||
#define amdgpu_dpm_powergate_vce(adev, g) \
|
||||
((adev)->pp_enabled ? \
|
||||
(adev)->powerplay.pp_funcs->powergate_vce((adev)->powerplay.pp_handle, (g)) : \
|
||||
(adev)->pm.funcs->powergate_vce((adev), (g)))
|
||||
|
||||
#define amdgpu_dpm_get_current_power_state(adev) \
|
||||
(adev)->powerplay.pp_funcs->get_current_power_state((adev)->powerplay.pp_handle)
|
||||
|
||||
#define amdgpu_dpm_get_performance_level(adev) \
|
||||
(adev)->powerplay.pp_funcs->get_performance_level((adev)->powerplay.pp_handle)
|
||||
|
||||
#define amdgpu_dpm_get_pp_num_states(adev, data) \
|
||||
(adev)->powerplay.pp_funcs->get_pp_num_states((adev)->powerplay.pp_handle, data)
|
||||
|
||||
#define amdgpu_dpm_get_pp_table(adev, table) \
|
||||
(adev)->powerplay.pp_funcs->get_pp_table((adev)->powerplay.pp_handle, table)
|
||||
|
||||
#define amdgpu_dpm_set_pp_table(adev, buf, size) \
|
||||
(adev)->powerplay.pp_funcs->set_pp_table((adev)->powerplay.pp_handle, buf, size)
|
||||
|
||||
#define amdgpu_dpm_print_clock_levels(adev, type, buf) \
|
||||
(adev)->powerplay.pp_funcs->print_clock_levels((adev)->powerplay.pp_handle, type, buf)
|
||||
|
||||
#define amdgpu_dpm_force_clock_level(adev, type, level) \
|
||||
(adev)->powerplay.pp_funcs->force_clock_level((adev)->powerplay.pp_handle, type, level)
|
||||
|
||||
#define amdgpu_dpm_get_sclk_od(adev) \
|
||||
(adev)->powerplay.pp_funcs->get_sclk_od((adev)->powerplay.pp_handle)
|
||||
|
||||
#define amdgpu_dpm_set_sclk_od(adev, value) \
|
||||
(adev)->powerplay.pp_funcs->set_sclk_od((adev)->powerplay.pp_handle, value)
|
||||
|
||||
#define amdgpu_dpm_get_mclk_od(adev) \
|
||||
((adev)->powerplay.pp_funcs->get_mclk_od((adev)->powerplay.pp_handle))
|
||||
|
||||
#define amdgpu_dpm_set_mclk_od(adev, value) \
|
||||
((adev)->powerplay.pp_funcs->set_mclk_od((adev)->powerplay.pp_handle, value))
|
||||
|
||||
#define amdgpu_dpm_dispatch_task(adev, event_id, input, output) \
|
||||
(adev)->powerplay.pp_funcs->dispatch_tasks((adev)->powerplay.pp_handle, (event_id), (input), (output))
|
||||
|
||||
#define amgdpu_dpm_check_state_equal(adev, cps, rps, equal) (adev)->pm.funcs->check_state_equal((adev), (cps),(rps),(equal))
|
||||
|
||||
#define amdgpu_dpm_get_vce_clock_state(adev, i) \
|
||||
((adev)->pp_enabled ? \
|
||||
(adev)->powerplay.pp_funcs->get_vce_clock_state((adev)->powerplay.pp_handle, (i)) : \
|
||||
(adev)->pm.funcs->get_vce_clock_state((adev), (i)))
|
||||
|
||||
struct amdgpu_dpm {
|
||||
struct amdgpu_ps *ps;
|
||||
/* number of valid power states */
|
||||
int num_ps;
|
||||
/* current power state that is active */
|
||||
struct amdgpu_ps *current_ps;
|
||||
/* requested power state */
|
||||
struct amdgpu_ps *requested_ps;
|
||||
/* boot up power state */
|
||||
struct amdgpu_ps *boot_ps;
|
||||
/* default uvd power state */
|
||||
struct amdgpu_ps *uvd_ps;
|
||||
/* vce requirements */
|
||||
u32 num_of_vce_states;
|
||||
struct amd_vce_state vce_states[AMD_MAX_VCE_LEVELS];
|
||||
enum amd_vce_level vce_level;
|
||||
enum amd_pm_state_type state;
|
||||
enum amd_pm_state_type user_state;
|
||||
enum amd_pm_state_type last_state;
|
||||
enum amd_pm_state_type last_user_state;
|
||||
u32 platform_caps;
|
||||
u32 voltage_response_time;
|
||||
u32 backbias_response_time;
|
||||
void *priv;
|
||||
u32 new_active_crtcs;
|
||||
int new_active_crtc_count;
|
||||
u32 current_active_crtcs;
|
||||
int current_active_crtc_count;
|
||||
struct amdgpu_dpm_dynamic_state dyn_state;
|
||||
struct amdgpu_dpm_fan fan;
|
||||
u32 tdp_limit;
|
||||
u32 near_tdp_limit;
|
||||
u32 near_tdp_limit_adjusted;
|
||||
u32 sq_ramping_threshold;
|
||||
u32 cac_leakage;
|
||||
u16 tdp_od_limit;
|
||||
u32 tdp_adjustment;
|
||||
u16 load_line_slope;
|
||||
bool power_control;
|
||||
bool ac_power;
|
||||
/* special states active */
|
||||
bool thermal_active;
|
||||
bool uvd_active;
|
||||
bool vce_active;
|
||||
/* thermal handling */
|
||||
struct amdgpu_dpm_thermal thermal;
|
||||
/* forced levels */
|
||||
enum amdgpu_dpm_forced_level forced_level;
|
||||
};
|
||||
|
||||
struct amdgpu_pm {
|
||||
struct mutex mutex;
|
||||
u32 current_sclk;
|
||||
u32 current_mclk;
|
||||
u32 default_sclk;
|
||||
u32 default_mclk;
|
||||
struct amdgpu_i2c_chan *i2c_bus;
|
||||
/* internal thermal controller on rv6xx+ */
|
||||
enum amdgpu_int_thermal_type int_thermal_type;
|
||||
struct device *int_hwmon_dev;
|
||||
/* fan control parameters */
|
||||
bool no_fan;
|
||||
u8 fan_pulses_per_revolution;
|
||||
u8 fan_min_rpm;
|
||||
u8 fan_max_rpm;
|
||||
/* dpm */
|
||||
bool dpm_enabled;
|
||||
bool sysfs_initialized;
|
||||
struct amdgpu_dpm dpm;
|
||||
const struct firmware *fw; /* SMC firmware */
|
||||
uint32_t fw_version;
|
||||
const struct amdgpu_dpm_funcs *funcs;
|
||||
uint32_t pcie_gen_mask;
|
||||
uint32_t pcie_mlw_mask;
|
||||
struct amd_pp_display_configuration pm_display_cfg;/* set by DAL */
|
||||
};
|
||||
|
||||
#define R600_SSTU_DFLT 0
|
||||
#define R600_SST_DFLT 0x00C8
|
||||
|
||||
|
@ -82,4 +522,7 @@ u16 amdgpu_get_pcie_lane_support(struct amdgpu_device *adev,
|
|||
u16 default_lanes);
|
||||
u8 amdgpu_encode_pci_lane_width(u32 lanes);
|
||||
|
||||
struct amd_vce_state*
|
||||
amdgpu_get_vce_clock_state(struct amdgpu_device *adev, unsigned idx);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -58,9 +58,10 @@
|
|||
* - 3.6.0 - kmd involves use CONTEXT_CONTROL in ring buffer.
|
||||
* - 3.7.0 - Add support for VCE clock list packet
|
||||
* - 3.8.0 - Add support raster config init in the kernel
|
||||
* - 3.9.0 - Add support for memory query info about VRAM and GTT.
|
||||
*/
|
||||
#define KMS_DRIVER_MAJOR 3
|
||||
#define KMS_DRIVER_MINOR 8
|
||||
#define KMS_DRIVER_MINOR 9
|
||||
#define KMS_DRIVER_PATCHLEVEL 0
|
||||
|
||||
int amdgpu_vram_limit = 0;
|
||||
|
@ -85,6 +86,7 @@ int amdgpu_vm_size = 64;
|
|||
int amdgpu_vm_block_size = -1;
|
||||
int amdgpu_vm_fault_stop = 0;
|
||||
int amdgpu_vm_debug = 0;
|
||||
int amdgpu_vram_page_split = 1024;
|
||||
int amdgpu_exp_hw_support = 0;
|
||||
int amdgpu_sched_jobs = 32;
|
||||
int amdgpu_sched_hw_submission = 2;
|
||||
|
@ -165,6 +167,9 @@ module_param_named(vm_fault_stop, amdgpu_vm_fault_stop, int, 0444);
|
|||
MODULE_PARM_DESC(vm_debug, "Debug VM handling (0 = disabled (default), 1 = enabled)");
|
||||
module_param_named(vm_debug, amdgpu_vm_debug, int, 0644);
|
||||
|
||||
MODULE_PARM_DESC(vram_page_split, "Number of pages after we split VRAM allocations (default 1024, -1 = disable)");
|
||||
module_param_named(vram_page_split, amdgpu_vram_page_split, int, 0444);
|
||||
|
||||
MODULE_PARM_DESC(exp_hw_support, "experimental hw support (1 = enable, 0 = disable (default))");
|
||||
module_param_named(exp_hw_support, amdgpu_exp_hw_support, int, 0444);
|
||||
|
||||
|
@ -201,7 +206,8 @@ module_param_named(pg_mask, amdgpu_pg_mask, uint, 0444);
|
|||
MODULE_PARM_DESC(disable_cu, "Disable CUs (se.sh.cu,...)");
|
||||
module_param_named(disable_cu, amdgpu_disable_cu, charp, 0444);
|
||||
|
||||
MODULE_PARM_DESC(virtual_display, "Enable virtual display feature (the virtual_display will be set like xxxx:xx:xx.x;xxxx:xx:xx.x)");
|
||||
MODULE_PARM_DESC(virtual_display,
|
||||
"Enable virtual display feature (the virtual_display will be set like xxxx:xx:xx.x,x;xxxx:xx:xx.x,x)");
|
||||
module_param_named(virtual_display, amdgpu_virtual_display, charp, 0444);
|
||||
|
||||
static const struct pci_device_id pciidlist[] = {
|
||||
|
@ -381,6 +387,7 @@ static const struct pci_device_id pciidlist[] = {
|
|||
{0x1002, 0x6939, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA},
|
||||
/* fiji */
|
||||
{0x1002, 0x7300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_FIJI},
|
||||
{0x1002, 0x730F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_FIJI},
|
||||
/* carrizo */
|
||||
{0x1002, 0x9870, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMD_IS_APU},
|
||||
{0x1002, 0x9874, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMD_IS_APU},
|
||||
|
|
|
@ -152,7 +152,8 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev,
|
|||
aligned_size = ALIGN(size, PAGE_SIZE);
|
||||
ret = amdgpu_gem_object_create(adev, aligned_size, 0,
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
|
||||
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
|
||||
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
|
||||
true, &gobj);
|
||||
if (ret) {
|
||||
printk(KERN_ERR "failed to allocate framebuffer (%d)\n",
|
||||
|
|
|
@ -126,7 +126,8 @@ int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev)
|
|||
if (adev->gart.robj == NULL) {
|
||||
r = amdgpu_bo_create(adev, adev->gart.table_size,
|
||||
PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
|
||||
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
|
||||
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
|
||||
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
|
||||
NULL, NULL, &adev->gart.robj);
|
||||
if (r) {
|
||||
return r;
|
||||
|
|
|
@ -116,10 +116,11 @@ void amdgpu_gem_force_release(struct amdgpu_device *adev)
|
|||
* Call from drm_gem_handle_create which appear in both new and open ioctl
|
||||
* case.
|
||||
*/
|
||||
int amdgpu_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
|
||||
int amdgpu_gem_object_open(struct drm_gem_object *obj,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
struct amdgpu_bo *abo = gem_to_amdgpu_bo(obj);
|
||||
struct amdgpu_device *adev = abo->adev;
|
||||
struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
|
||||
struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
|
||||
struct amdgpu_vm *vm = &fpriv->vm;
|
||||
struct amdgpu_bo_va *bo_va;
|
||||
|
@ -142,7 +143,7 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
|
|||
struct drm_file *file_priv)
|
||||
{
|
||||
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
|
||||
struct amdgpu_device *adev = bo->adev;
|
||||
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
|
||||
struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
|
||||
struct amdgpu_vm *vm = &fpriv->vm;
|
||||
|
||||
|
@ -468,6 +469,16 @@ out:
|
|||
return r;
|
||||
}
|
||||
|
||||
static int amdgpu_gem_va_check(void *param, struct amdgpu_bo *bo)
|
||||
{
|
||||
unsigned domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
|
||||
|
||||
/* if anything is swapped out don't swap it in here,
|
||||
just abort and wait for the next CS */
|
||||
|
||||
return domain == AMDGPU_GEM_DOMAIN_CPU ? -ERESTARTSYS : 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_gem_va_update_vm -update the bo_va in its VM
|
||||
*
|
||||
|
@ -478,7 +489,8 @@ out:
|
|||
* vital here, so they are not reported back to userspace.
|
||||
*/
|
||||
static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
|
||||
struct amdgpu_bo_va *bo_va, uint32_t operation)
|
||||
struct amdgpu_bo_va *bo_va,
|
||||
uint32_t operation)
|
||||
{
|
||||
struct ttm_validate_buffer tv, *entry;
|
||||
struct amdgpu_bo_list_entry vm_pd;
|
||||
|
@ -501,7 +513,6 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
|
|||
if (r)
|
||||
goto error_print;
|
||||
|
||||
amdgpu_vm_get_pt_bos(adev, bo_va->vm, &duplicates);
|
||||
list_for_each_entry(entry, &list, head) {
|
||||
domain = amdgpu_mem_type_to_domain(entry->bo->mem.mem_type);
|
||||
/* if anything is swapped out don't swap it in here,
|
||||
|
@ -509,13 +520,10 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
|
|||
if (domain == AMDGPU_GEM_DOMAIN_CPU)
|
||||
goto error_unreserve;
|
||||
}
|
||||
list_for_each_entry(entry, &duplicates, head) {
|
||||
domain = amdgpu_mem_type_to_domain(entry->bo->mem.mem_type);
|
||||
/* if anything is swapped out don't swap it in here,
|
||||
just abort and wait for the next CS */
|
||||
if (domain == AMDGPU_GEM_DOMAIN_CPU)
|
||||
r = amdgpu_vm_validate_pt_bos(adev, bo_va->vm, amdgpu_gem_va_check,
|
||||
NULL);
|
||||
if (r)
|
||||
goto error_unreserve;
|
||||
}
|
||||
|
||||
r = amdgpu_vm_update_page_directory(adev, bo_va->vm);
|
||||
if (r)
|
||||
|
@ -536,8 +544,6 @@ error_print:
|
|||
DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
|
||||
}
|
||||
|
||||
|
||||
|
||||
int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *filp)
|
||||
{
|
||||
|
@ -547,7 +553,8 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
|
|||
struct amdgpu_fpriv *fpriv = filp->driver_priv;
|
||||
struct amdgpu_bo *abo;
|
||||
struct amdgpu_bo_va *bo_va;
|
||||
struct ttm_validate_buffer tv, tv_pd;
|
||||
struct amdgpu_bo_list_entry vm_pd;
|
||||
struct ttm_validate_buffer tv;
|
||||
struct ww_acquire_ctx ticket;
|
||||
struct list_head list, duplicates;
|
||||
uint32_t invalid_flags, va_flags = 0;
|
||||
|
@ -592,9 +599,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
|
|||
tv.shared = true;
|
||||
list_add(&tv.head, &list);
|
||||
|
||||
tv_pd.bo = &fpriv->vm.page_directory->tbo;
|
||||
tv_pd.shared = true;
|
||||
list_add(&tv_pd.head, &list);
|
||||
amdgpu_vm_get_pd_bo(&fpriv->vm, &list, &vm_pd);
|
||||
|
||||
r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
|
||||
if (r) {
|
||||
|
|
|
@ -24,6 +24,7 @@
|
|||
*/
|
||||
#include <drm/drmP.h>
|
||||
#include "amdgpu.h"
|
||||
#include "amdgpu_gfx.h"
|
||||
|
||||
/*
|
||||
* GPU scratch registers helpers function.
|
||||
|
|
|
@ -27,6 +27,7 @@
|
|||
int amdgpu_gfx_scratch_get(struct amdgpu_device *adev, uint32_t *reg);
|
||||
void amdgpu_gfx_scratch_free(struct amdgpu_device *adev, uint32_t reg);
|
||||
|
||||
unsigned amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned max_se, unsigned max_sh);
|
||||
void amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned max_se,
|
||||
unsigned max_sh);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -168,6 +168,7 @@ static int amdgpu_gtt_mgr_new(struct ttm_mem_type_manager *man,
|
|||
return -ENOMEM;
|
||||
|
||||
node->start = AMDGPU_BO_INVALID_OFFSET;
|
||||
node->size = mem->num_pages;
|
||||
mem->mm_node = node;
|
||||
|
||||
if (place->fpfn || place->lpfn || place->flags & TTM_PL_FLAG_TOPDOWN) {
|
||||
|
|
|
@ -152,8 +152,8 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
alloc_size = amdgpu_ring_get_dma_frame_size(ring) +
|
||||
num_ibs * amdgpu_ring_get_emit_ib_size(ring);
|
||||
alloc_size = ring->funcs->emit_frame_size + num_ibs *
|
||||
ring->funcs->emit_ib_size;
|
||||
|
||||
r = amdgpu_ring_alloc(ring, alloc_size);
|
||||
if (r) {
|
||||
|
@ -161,7 +161,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
|
|||
return r;
|
||||
}
|
||||
|
||||
if (ring->type == AMDGPU_RING_TYPE_SDMA && ring->funcs->init_cond_exec)
|
||||
if (ring->funcs->init_cond_exec)
|
||||
patch_offset = amdgpu_ring_init_cond_exec(ring);
|
||||
|
||||
if (vm) {
|
||||
|
|
|
@ -306,10 +306,10 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
|
|||
}
|
||||
|
||||
for (i = 0; i < adev->num_ip_blocks; i++) {
|
||||
if (adev->ip_blocks[i].type == type &&
|
||||
adev->ip_block_status[i].valid) {
|
||||
ip.hw_ip_version_major = adev->ip_blocks[i].major;
|
||||
ip.hw_ip_version_minor = adev->ip_blocks[i].minor;
|
||||
if (adev->ip_blocks[i].version->type == type &&
|
||||
adev->ip_blocks[i].status.valid) {
|
||||
ip.hw_ip_version_major = adev->ip_blocks[i].version->major;
|
||||
ip.hw_ip_version_minor = adev->ip_blocks[i].version->minor;
|
||||
ip.capabilities_flags = 0;
|
||||
ip.available_rings = ring_mask;
|
||||
ip.ib_start_alignment = ib_start_alignment;
|
||||
|
@ -345,8 +345,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
|
|||
}
|
||||
|
||||
for (i = 0; i < adev->num_ip_blocks; i++)
|
||||
if (adev->ip_blocks[i].type == type &&
|
||||
adev->ip_block_status[i].valid &&
|
||||
if (adev->ip_blocks[i].version->type == type &&
|
||||
adev->ip_blocks[i].status.valid &&
|
||||
count < AMDGPU_HW_IP_INSTANCE_MAX_COUNT)
|
||||
count++;
|
||||
|
||||
|
@ -411,6 +411,36 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
|
|||
return copy_to_user(out, &vram_gtt,
|
||||
min((size_t)size, sizeof(vram_gtt))) ? -EFAULT : 0;
|
||||
}
|
||||
case AMDGPU_INFO_MEMORY: {
|
||||
struct drm_amdgpu_memory_info mem;
|
||||
|
||||
memset(&mem, 0, sizeof(mem));
|
||||
mem.vram.total_heap_size = adev->mc.real_vram_size;
|
||||
mem.vram.usable_heap_size =
|
||||
adev->mc.real_vram_size - adev->vram_pin_size;
|
||||
mem.vram.heap_usage = atomic64_read(&adev->vram_usage);
|
||||
mem.vram.max_allocation = mem.vram.usable_heap_size * 3 / 4;
|
||||
|
||||
mem.cpu_accessible_vram.total_heap_size =
|
||||
adev->mc.visible_vram_size;
|
||||
mem.cpu_accessible_vram.usable_heap_size =
|
||||
adev->mc.visible_vram_size -
|
||||
(adev->vram_pin_size - adev->invisible_pin_size);
|
||||
mem.cpu_accessible_vram.heap_usage =
|
||||
atomic64_read(&adev->vram_vis_usage);
|
||||
mem.cpu_accessible_vram.max_allocation =
|
||||
mem.cpu_accessible_vram.usable_heap_size * 3 / 4;
|
||||
|
||||
mem.gtt.total_heap_size = adev->mc.gtt_size;
|
||||
mem.gtt.usable_heap_size =
|
||||
adev->mc.gtt_size - adev->gart_pin_size;
|
||||
mem.gtt.heap_usage = atomic64_read(&adev->gtt_usage);
|
||||
mem.gtt.max_allocation = mem.gtt.usable_heap_size * 3 / 4;
|
||||
|
||||
return copy_to_user(out, &mem,
|
||||
min((size_t)size, sizeof(mem)))
|
||||
? -EFAULT : 0;
|
||||
}
|
||||
case AMDGPU_INFO_READ_MMR_REG: {
|
||||
unsigned n, alloc_size;
|
||||
uint32_t *regs;
|
||||
|
@ -475,6 +505,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
|
|||
dev_info.ids_flags = 0;
|
||||
if (adev->flags & AMD_IS_APU)
|
||||
dev_info.ids_flags |= AMDGPU_IDS_FLAGS_FUSION;
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
dev_info.ids_flags |= AMDGPU_IDS_FLAGS_PREEMPTION;
|
||||
dev_info.virtual_address_offset = AMDGPU_VA_RESERVED_SIZE;
|
||||
dev_info.virtual_address_max = (uint64_t)adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE;
|
||||
dev_info.virtual_address_alignment = max((int)PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE);
|
||||
|
@ -494,6 +526,24 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
|
|||
return copy_to_user(out, &dev_info,
|
||||
min((size_t)size, sizeof(dev_info))) ? -EFAULT : 0;
|
||||
}
|
||||
case AMDGPU_INFO_VCE_CLOCK_TABLE: {
|
||||
unsigned i;
|
||||
struct drm_amdgpu_info_vce_clock_table vce_clk_table = {};
|
||||
struct amd_vce_state *vce_state;
|
||||
|
||||
for (i = 0; i < AMDGPU_VCE_CLOCK_TABLE_ENTRIES; i++) {
|
||||
vce_state = amdgpu_dpm_get_vce_clock_state(adev, i);
|
||||
if (vce_state) {
|
||||
vce_clk_table.entries[i].sclk = vce_state->sclk;
|
||||
vce_clk_table.entries[i].mclk = vce_state->mclk;
|
||||
vce_clk_table.entries[i].eclk = vce_state->evclk;
|
||||
vce_clk_table.num_valid_entries++;
|
||||
}
|
||||
}
|
||||
|
||||
return copy_to_user(out, &vce_clk_table,
|
||||
min((size_t)size, sizeof(vce_clk_table))) ? -EFAULT : 0;
|
||||
}
|
||||
default:
|
||||
DRM_DEBUG_KMS("Invalid request %d\n", info->query);
|
||||
return -EINVAL;
|
||||
|
|
|
@ -285,7 +285,7 @@ free_rmn:
|
|||
int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
|
||||
{
|
||||
unsigned long end = addr + amdgpu_bo_size(bo) - 1;
|
||||
struct amdgpu_device *adev = bo->adev;
|
||||
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
|
||||
struct amdgpu_mn *rmn;
|
||||
struct amdgpu_mn_node *node = NULL;
|
||||
struct list_head bos;
|
||||
|
@ -340,7 +340,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
|
|||
*/
|
||||
void amdgpu_mn_unregister(struct amdgpu_bo *bo)
|
||||
{
|
||||
struct amdgpu_device *adev = bo->adev;
|
||||
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
|
||||
struct amdgpu_mn *rmn;
|
||||
struct list_head *head;
|
||||
|
||||
|
|
|
@ -341,8 +341,6 @@ struct amdgpu_mode_info {
|
|||
int num_dig; /* number of dig blocks */
|
||||
int disp_priority;
|
||||
const struct amdgpu_display_funcs *funcs;
|
||||
struct hrtimer vblank_timer;
|
||||
enum amdgpu_interrupt_state vsync_timer_enabled;
|
||||
};
|
||||
|
||||
#define AMDGPU_MAX_BL_LEVEL 0xFF
|
||||
|
@ -413,6 +411,9 @@ struct amdgpu_crtc {
|
|||
u32 wm_high;
|
||||
u32 lb_vblank_lead_lines;
|
||||
struct drm_display_mode hw_mode;
|
||||
/* for virtual dce */
|
||||
struct hrtimer vblank_timer;
|
||||
enum amdgpu_interrupt_state vsync_timer_enabled;
|
||||
};
|
||||
|
||||
struct amdgpu_encoder_atom_dig {
|
||||
|
|
|
@ -88,18 +88,19 @@ static void amdgpu_update_memory_usage(struct amdgpu_device *adev,
|
|||
|
||||
static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
|
||||
{
|
||||
struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
|
||||
struct amdgpu_bo *bo;
|
||||
|
||||
bo = container_of(tbo, struct amdgpu_bo, tbo);
|
||||
|
||||
amdgpu_update_memory_usage(bo->adev, &bo->tbo.mem, NULL);
|
||||
amdgpu_update_memory_usage(adev, &bo->tbo.mem, NULL);
|
||||
|
||||
drm_gem_object_release(&bo->gem_base);
|
||||
amdgpu_bo_unref(&bo->parent);
|
||||
if (!list_empty(&bo->shadow_list)) {
|
||||
mutex_lock(&bo->adev->shadow_list_lock);
|
||||
mutex_lock(&adev->shadow_list_lock);
|
||||
list_del_init(&bo->shadow_list);
|
||||
mutex_unlock(&bo->adev->shadow_list_lock);
|
||||
mutex_unlock(&adev->shadow_list_lock);
|
||||
}
|
||||
kfree(bo->metadata);
|
||||
kfree(bo);
|
||||
|
@ -121,12 +122,17 @@ static void amdgpu_ttm_placement_init(struct amdgpu_device *adev,
|
|||
|
||||
if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
|
||||
unsigned visible_pfn = adev->mc.visible_vram_size >> PAGE_SHIFT;
|
||||
unsigned lpfn = 0;
|
||||
|
||||
/* This forces a reallocation if the flag wasn't set before */
|
||||
if (flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)
|
||||
lpfn = adev->mc.real_vram_size >> PAGE_SHIFT;
|
||||
|
||||
if (flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS &&
|
||||
!(flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) &&
|
||||
adev->mc.visible_vram_size < adev->mc.real_vram_size) {
|
||||
places[c].fpfn = visible_pfn;
|
||||
places[c].lpfn = 0;
|
||||
places[c].lpfn = lpfn;
|
||||
places[c].flags = TTM_PL_FLAG_WC |
|
||||
TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM |
|
||||
TTM_PL_FLAG_TOPDOWN;
|
||||
|
@ -134,7 +140,7 @@ static void amdgpu_ttm_placement_init(struct amdgpu_device *adev,
|
|||
}
|
||||
|
||||
places[c].fpfn = 0;
|
||||
places[c].lpfn = 0;
|
||||
places[c].lpfn = lpfn;
|
||||
places[c].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
|
||||
TTM_PL_FLAG_VRAM;
|
||||
if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
|
||||
|
@ -205,8 +211,10 @@ static void amdgpu_ttm_placement_init(struct amdgpu_device *adev,
|
|||
|
||||
void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
|
||||
{
|
||||
amdgpu_ttm_placement_init(abo->adev, &abo->placement,
|
||||
abo->placements, domain, abo->flags);
|
||||
struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
|
||||
|
||||
amdgpu_ttm_placement_init(adev, &abo->placement, abo->placements,
|
||||
domain, abo->flags);
|
||||
}
|
||||
|
||||
static void amdgpu_fill_placement_to_bo(struct amdgpu_bo *bo,
|
||||
|
@ -245,7 +253,8 @@ int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
|
|||
int r;
|
||||
|
||||
r = amdgpu_bo_create(adev, size, align, true, domain,
|
||||
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
|
||||
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
|
||||
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
|
||||
NULL, NULL, bo_ptr);
|
||||
if (r) {
|
||||
dev_err(adev->dev, "(%d) failed to allocate kernel bo\n", r);
|
||||
|
@ -351,7 +360,6 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
|
|||
kfree(bo);
|
||||
return r;
|
||||
}
|
||||
bo->adev = adev;
|
||||
INIT_LIST_HEAD(&bo->shadow_list);
|
||||
INIT_LIST_HEAD(&bo->va);
|
||||
bo->prefered_domains = domain & (AMDGPU_GEM_DOMAIN_VRAM |
|
||||
|
@ -616,6 +624,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
|
|||
u64 min_offset, u64 max_offset,
|
||||
u64 *gpu_addr)
|
||||
{
|
||||
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
|
||||
int r, i;
|
||||
unsigned fpfn, lpfn;
|
||||
|
||||
|
@ -643,18 +652,20 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
|
|||
|
||||
return 0;
|
||||
}
|
||||
|
||||
bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
|
||||
amdgpu_ttm_placement_from_domain(bo, domain);
|
||||
for (i = 0; i < bo->placement.num_placement; i++) {
|
||||
/* force to pin into visible video ram */
|
||||
if ((bo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
|
||||
!(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) &&
|
||||
(!max_offset || max_offset >
|
||||
bo->adev->mc.visible_vram_size)) {
|
||||
adev->mc.visible_vram_size)) {
|
||||
if (WARN_ON_ONCE(min_offset >
|
||||
bo->adev->mc.visible_vram_size))
|
||||
adev->mc.visible_vram_size))
|
||||
return -EINVAL;
|
||||
fpfn = min_offset >> PAGE_SHIFT;
|
||||
lpfn = bo->adev->mc.visible_vram_size >> PAGE_SHIFT;
|
||||
lpfn = adev->mc.visible_vram_size >> PAGE_SHIFT;
|
||||
} else {
|
||||
fpfn = min_offset >> PAGE_SHIFT;
|
||||
lpfn = max_offset >> PAGE_SHIFT;
|
||||
|
@ -669,12 +680,12 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
|
|||
|
||||
r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
|
||||
if (unlikely(r)) {
|
||||
dev_err(bo->adev->dev, "%p pin failed\n", bo);
|
||||
dev_err(adev->dev, "%p pin failed\n", bo);
|
||||
goto error;
|
||||
}
|
||||
r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem);
|
||||
if (unlikely(r)) {
|
||||
dev_err(bo->adev->dev, "%p bind failed\n", bo);
|
||||
dev_err(adev->dev, "%p bind failed\n", bo);
|
||||
goto error;
|
||||
}
|
||||
|
||||
|
@ -682,11 +693,11 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
|
|||
if (gpu_addr != NULL)
|
||||
*gpu_addr = amdgpu_bo_gpu_offset(bo);
|
||||
if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
|
||||
bo->adev->vram_pin_size += amdgpu_bo_size(bo);
|
||||
adev->vram_pin_size += amdgpu_bo_size(bo);
|
||||
if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
|
||||
bo->adev->invisible_pin_size += amdgpu_bo_size(bo);
|
||||
adev->invisible_pin_size += amdgpu_bo_size(bo);
|
||||
} else if (domain == AMDGPU_GEM_DOMAIN_GTT) {
|
||||
bo->adev->gart_pin_size += amdgpu_bo_size(bo);
|
||||
adev->gart_pin_size += amdgpu_bo_size(bo);
|
||||
}
|
||||
|
||||
error:
|
||||
|
@ -700,10 +711,11 @@ int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain, u64 *gpu_addr)
|
|||
|
||||
int amdgpu_bo_unpin(struct amdgpu_bo *bo)
|
||||
{
|
||||
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
|
||||
int r, i;
|
||||
|
||||
if (!bo->pin_count) {
|
||||
dev_warn(bo->adev->dev, "%p unpin not necessary\n", bo);
|
||||
dev_warn(adev->dev, "%p unpin not necessary\n", bo);
|
||||
return 0;
|
||||
}
|
||||
bo->pin_count--;
|
||||
|
@ -715,16 +727,16 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo)
|
|||
}
|
||||
r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
|
||||
if (unlikely(r)) {
|
||||
dev_err(bo->adev->dev, "%p validate failed for unpin\n", bo);
|
||||
dev_err(adev->dev, "%p validate failed for unpin\n", bo);
|
||||
goto error;
|
||||
}
|
||||
|
||||
if (bo->tbo.mem.mem_type == TTM_PL_VRAM) {
|
||||
bo->adev->vram_pin_size -= amdgpu_bo_size(bo);
|
||||
adev->vram_pin_size -= amdgpu_bo_size(bo);
|
||||
if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
|
||||
bo->adev->invisible_pin_size -= amdgpu_bo_size(bo);
|
||||
adev->invisible_pin_size -= amdgpu_bo_size(bo);
|
||||
} else if (bo->tbo.mem.mem_type == TTM_PL_TT) {
|
||||
bo->adev->gart_pin_size -= amdgpu_bo_size(bo);
|
||||
adev->gart_pin_size -= amdgpu_bo_size(bo);
|
||||
}
|
||||
|
||||
error:
|
||||
|
@ -849,6 +861,7 @@ int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
|
|||
void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
|
||||
struct ttm_mem_reg *new_mem)
|
||||
{
|
||||
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
|
||||
struct amdgpu_bo *abo;
|
||||
struct ttm_mem_reg *old_mem = &bo->mem;
|
||||
|
||||
|
@ -856,21 +869,21 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
|
|||
return;
|
||||
|
||||
abo = container_of(bo, struct amdgpu_bo, tbo);
|
||||
amdgpu_vm_bo_invalidate(abo->adev, abo);
|
||||
amdgpu_vm_bo_invalidate(adev, abo);
|
||||
|
||||
/* update statistics */
|
||||
if (!new_mem)
|
||||
return;
|
||||
|
||||
/* move_notify is called before move happens */
|
||||
amdgpu_update_memory_usage(abo->adev, &bo->mem, new_mem);
|
||||
amdgpu_update_memory_usage(adev, &bo->mem, new_mem);
|
||||
|
||||
trace_amdgpu_ttm_bo_move(abo, new_mem->mem_type, old_mem->mem_type);
|
||||
}
|
||||
|
||||
int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
|
||||
{
|
||||
struct amdgpu_device *adev;
|
||||
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
|
||||
struct amdgpu_bo *abo;
|
||||
unsigned long offset, size, lpfn;
|
||||
int i, r;
|
||||
|
@ -879,13 +892,14 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
|
|||
return 0;
|
||||
|
||||
abo = container_of(bo, struct amdgpu_bo, tbo);
|
||||
adev = abo->adev;
|
||||
if (bo->mem.mem_type != TTM_PL_VRAM)
|
||||
return 0;
|
||||
|
||||
size = bo->mem.num_pages << PAGE_SHIFT;
|
||||
offset = bo->mem.start << PAGE_SHIFT;
|
||||
if ((offset + size) <= adev->mc.visible_vram_size)
|
||||
/* TODO: figure out how to map scattered VRAM to the CPU */
|
||||
if ((offset + size) <= adev->mc.visible_vram_size &&
|
||||
(abo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS))
|
||||
return 0;
|
||||
|
||||
/* Can't move a pinned BO to visible VRAM */
|
||||
|
@ -893,6 +907,7 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
|
|||
return -EINVAL;
|
||||
|
||||
/* hurrah the memory is not visible ! */
|
||||
abo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
|
||||
amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM);
|
||||
lpfn = adev->mc.visible_vram_size >> PAGE_SHIFT;
|
||||
for (i = 0; i < abo->placement.num_placement; i++) {
|
||||
|
@ -954,6 +969,8 @@ u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
|
|||
WARN_ON_ONCE(!ww_mutex_is_locked(&bo->tbo.resv->lock) &&
|
||||
!bo->pin_count);
|
||||
WARN_ON_ONCE(bo->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET);
|
||||
WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_VRAM &&
|
||||
!(bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS));
|
||||
|
||||
return bo->tbo.offset;
|
||||
}
|
||||
|
|
|
@ -71,12 +71,13 @@ static inline unsigned amdgpu_mem_type_to_domain(u32 mem_type)
|
|||
*/
|
||||
static inline int amdgpu_bo_reserve(struct amdgpu_bo *bo, bool no_intr)
|
||||
{
|
||||
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
|
||||
int r;
|
||||
|
||||
r = ttm_bo_reserve(&bo->tbo, !no_intr, false, NULL);
|
||||
if (unlikely(r != 0)) {
|
||||
if (r != -ERESTARTSYS)
|
||||
dev_err(bo->adev->dev, "%p reserve failed\n", bo);
|
||||
dev_err(adev->dev, "%p reserve failed\n", bo);
|
||||
return r;
|
||||
}
|
||||
return 0;
|
||||
|
|
|
@ -986,10 +986,10 @@ restart_search:
|
|||
|
||||
static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
|
||||
{
|
||||
int i;
|
||||
struct amdgpu_ps *ps;
|
||||
enum amd_pm_state_type dpm_state;
|
||||
int ret;
|
||||
bool equal;
|
||||
|
||||
/* if dpm init failed */
|
||||
if (!adev->pm.dpm_enabled)
|
||||
|
@ -1009,46 +1009,6 @@ static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
|
|||
else
|
||||
return;
|
||||
|
||||
/* no need to reprogram if nothing changed unless we are on BTC+ */
|
||||
if (adev->pm.dpm.current_ps == adev->pm.dpm.requested_ps) {
|
||||
/* vce just modifies an existing state so force a change */
|
||||
if (ps->vce_active != adev->pm.dpm.vce_active)
|
||||
goto force;
|
||||
if (adev->flags & AMD_IS_APU) {
|
||||
/* for APUs if the num crtcs changed but state is the same,
|
||||
* all we need to do is update the display configuration.
|
||||
*/
|
||||
if (adev->pm.dpm.new_active_crtcs != adev->pm.dpm.current_active_crtcs) {
|
||||
/* update display watermarks based on new power state */
|
||||
amdgpu_display_bandwidth_update(adev);
|
||||
/* update displays */
|
||||
amdgpu_dpm_display_configuration_changed(adev);
|
||||
adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
|
||||
adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
|
||||
}
|
||||
return;
|
||||
} else {
|
||||
/* for BTC+ if the num crtcs hasn't changed and state is the same,
|
||||
* nothing to do, if the num crtcs is > 1 and state is the same,
|
||||
* update display configuration.
|
||||
*/
|
||||
if (adev->pm.dpm.new_active_crtcs ==
|
||||
adev->pm.dpm.current_active_crtcs) {
|
||||
return;
|
||||
} else if ((adev->pm.dpm.current_active_crtc_count > 1) &&
|
||||
(adev->pm.dpm.new_active_crtc_count > 1)) {
|
||||
/* update display watermarks based on new power state */
|
||||
amdgpu_display_bandwidth_update(adev);
|
||||
/* update displays */
|
||||
amdgpu_dpm_display_configuration_changed(adev);
|
||||
adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
|
||||
adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
force:
|
||||
if (amdgpu_dpm == 1) {
|
||||
printk("switching from power state:\n");
|
||||
amdgpu_dpm_print_power_state(adev, adev->pm.dpm.current_ps);
|
||||
|
@ -1059,31 +1019,21 @@ force:
|
|||
/* update whether vce is active */
|
||||
ps->vce_active = adev->pm.dpm.vce_active;
|
||||
|
||||
amdgpu_dpm_display_configuration_changed(adev);
|
||||
|
||||
ret = amdgpu_dpm_pre_set_power_state(adev);
|
||||
if (ret)
|
||||
return;
|
||||
|
||||
/* update display watermarks based on new power state */
|
||||
amdgpu_display_bandwidth_update(adev);
|
||||
if ((0 != amgdpu_dpm_check_state_equal(adev, adev->pm.dpm.current_ps, adev->pm.dpm.requested_ps, &equal)))
|
||||
equal = false;
|
||||
|
||||
/* wait for the rings to drain */
|
||||
for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
|
||||
struct amdgpu_ring *ring = adev->rings[i];
|
||||
if (ring && ring->ready)
|
||||
amdgpu_fence_wait_empty(ring);
|
||||
}
|
||||
if (equal)
|
||||
return;
|
||||
|
||||
/* program the new power state */
|
||||
amdgpu_dpm_set_power_state(adev);
|
||||
|
||||
/* update current power state */
|
||||
adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps;
|
||||
|
||||
amdgpu_dpm_post_set_power_state(adev);
|
||||
|
||||
/* update displays */
|
||||
amdgpu_dpm_display_configuration_changed(adev);
|
||||
|
||||
adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
|
||||
adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
|
||||
|
||||
|
@ -1135,7 +1085,7 @@ void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
|
|||
mutex_lock(&adev->pm.mutex);
|
||||
adev->pm.dpm.vce_active = true;
|
||||
/* XXX select vce level based on ring/task */
|
||||
adev->pm.dpm.vce_level = AMDGPU_VCE_LEVEL_AC_ALL;
|
||||
adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL;
|
||||
mutex_unlock(&adev->pm.mutex);
|
||||
} else {
|
||||
mutex_lock(&adev->pm.mutex);
|
||||
|
@ -1276,20 +1226,20 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
|
|||
struct drm_device *ddev = adev->ddev;
|
||||
struct drm_crtc *crtc;
|
||||
struct amdgpu_crtc *amdgpu_crtc;
|
||||
int i = 0;
|
||||
|
||||
if (!adev->pm.dpm_enabled)
|
||||
return;
|
||||
|
||||
if (adev->pp_enabled) {
|
||||
int i = 0;
|
||||
|
||||
amdgpu_display_bandwidth_update(adev);
|
||||
|
||||
for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
|
||||
struct amdgpu_ring *ring = adev->rings[i];
|
||||
if (ring && ring->ready)
|
||||
amdgpu_fence_wait_empty(ring);
|
||||
}
|
||||
|
||||
if (adev->pp_enabled) {
|
||||
amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_DISPLAY_CONFIG_CHANGE, NULL, NULL);
|
||||
} else {
|
||||
mutex_lock(&adev->pm.mutex);
|
||||
|
|
|
@ -299,7 +299,7 @@ static int amdgpu_pp_soft_reset(void *handle)
|
|||
return ret;
|
||||
}
|
||||
|
||||
const struct amd_ip_funcs amdgpu_pp_ip_funcs = {
|
||||
static const struct amd_ip_funcs amdgpu_pp_ip_funcs = {
|
||||
.name = "amdgpu_powerplay",
|
||||
.early_init = amdgpu_pp_early_init,
|
||||
.late_init = amdgpu_pp_late_init,
|
||||
|
@ -316,3 +316,12 @@ const struct amd_ip_funcs amdgpu_pp_ip_funcs = {
|
|||
.set_clockgating_state = amdgpu_pp_set_clockgating_state,
|
||||
.set_powergating_state = amdgpu_pp_set_powergating_state,
|
||||
};
|
||||
|
||||
const struct amdgpu_ip_block_version amdgpu_pp_ip_block =
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_SMC,
|
||||
.major = 1,
|
||||
.minor = 0,
|
||||
.rev = 0,
|
||||
.funcs = &amdgpu_pp_ip_funcs,
|
||||
};
|
||||
|
|
|
@ -23,11 +23,11 @@
|
|||
*
|
||||
*/
|
||||
|
||||
#ifndef __AMDGPU_POPWERPLAY_H__
|
||||
#define __AMDGPU_POPWERPLAY_H__
|
||||
#ifndef __AMDGPU_POWERPLAY_H__
|
||||
#define __AMDGPU_POWERPLAY_H__
|
||||
|
||||
#include "amd_shared.h"
|
||||
|
||||
extern const struct amd_ip_funcs amdgpu_pp_ip_funcs;
|
||||
extern const struct amdgpu_ip_block_version amdgpu_pp_ip_block;
|
||||
|
||||
#endif /* __AMDSOC_DM_H__ */
|
||||
#endif /* __AMDGPU_POWERPLAY_H__ */
|
||||
|
|
|
@ -65,7 +65,7 @@ int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw)
|
|||
{
|
||||
/* Align requested size with padding so unlock_commit can
|
||||
* pad safely */
|
||||
ndw = (ndw + ring->align_mask) & ~ring->align_mask;
|
||||
ndw = (ndw + ring->funcs->align_mask) & ~ring->funcs->align_mask;
|
||||
|
||||
/* Make sure we aren't trying to allocate more space
|
||||
* than the maximum for one submission
|
||||
|
@ -94,7 +94,7 @@ void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
|
|||
int i;
|
||||
|
||||
for (i = 0; i < count; i++)
|
||||
amdgpu_ring_write(ring, ring->nop);
|
||||
amdgpu_ring_write(ring, ring->funcs->nop);
|
||||
}
|
||||
|
||||
/** amdgpu_ring_generic_pad_ib - pad IB with NOP packets
|
||||
|
@ -106,8 +106,8 @@ void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
|
|||
*/
|
||||
void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
|
||||
{
|
||||
while (ib->length_dw & ring->align_mask)
|
||||
ib->ptr[ib->length_dw++] = ring->nop;
|
||||
while (ib->length_dw & ring->funcs->align_mask)
|
||||
ib->ptr[ib->length_dw++] = ring->funcs->nop;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -125,8 +125,9 @@ void amdgpu_ring_commit(struct amdgpu_ring *ring)
|
|||
uint32_t count;
|
||||
|
||||
/* We pad to match fetch size */
|
||||
count = ring->align_mask + 1 - (ring->wptr & ring->align_mask);
|
||||
count %= ring->align_mask + 1;
|
||||
count = ring->funcs->align_mask + 1 -
|
||||
(ring->wptr & ring->funcs->align_mask);
|
||||
count %= ring->funcs->align_mask + 1;
|
||||
ring->funcs->insert_nop(ring, count);
|
||||
|
||||
mb();
|
||||
|
@ -163,9 +164,8 @@ void amdgpu_ring_undo(struct amdgpu_ring *ring)
|
|||
* Returns 0 on success, error on failure.
|
||||
*/
|
||||
int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
|
||||
unsigned max_dw, u32 nop, u32 align_mask,
|
||||
struct amdgpu_irq_src *irq_src, unsigned irq_type,
|
||||
enum amdgpu_ring_type ring_type)
|
||||
unsigned max_dw, struct amdgpu_irq_src *irq_src,
|
||||
unsigned irq_type)
|
||||
{
|
||||
int r;
|
||||
|
||||
|
@ -216,9 +216,6 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
|
|||
|
||||
ring->ring_size = roundup_pow_of_two(max_dw * 4 *
|
||||
amdgpu_sched_hw_submission);
|
||||
ring->align_mask = align_mask;
|
||||
ring->nop = nop;
|
||||
ring->type = ring_type;
|
||||
|
||||
/* Allocate ring buffer */
|
||||
if (ring->ring_obj == NULL) {
|
||||
|
|
|
@ -0,0 +1,185 @@
|
|||
/*
|
||||
* Copyright 2016 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Christian König
|
||||
*/
|
||||
#ifndef __AMDGPU_RING_H__
|
||||
#define __AMDGPU_RING_H__
|
||||
|
||||
#include "gpu_scheduler.h"
|
||||
|
||||
/* max number of rings */
|
||||
#define AMDGPU_MAX_RINGS 16
|
||||
#define AMDGPU_MAX_GFX_RINGS 1
|
||||
#define AMDGPU_MAX_COMPUTE_RINGS 8
|
||||
#define AMDGPU_MAX_VCE_RINGS 3
|
||||
|
||||
/* some special values for the owner field */
|
||||
#define AMDGPU_FENCE_OWNER_UNDEFINED ((void*)0ul)
|
||||
#define AMDGPU_FENCE_OWNER_VM ((void*)1ul)
|
||||
|
||||
#define AMDGPU_FENCE_FLAG_64BIT (1 << 0)
|
||||
#define AMDGPU_FENCE_FLAG_INT (1 << 1)
|
||||
|
||||
enum amdgpu_ring_type {
|
||||
AMDGPU_RING_TYPE_GFX,
|
||||
AMDGPU_RING_TYPE_COMPUTE,
|
||||
AMDGPU_RING_TYPE_SDMA,
|
||||
AMDGPU_RING_TYPE_UVD,
|
||||
AMDGPU_RING_TYPE_VCE
|
||||
};
|
||||
|
||||
struct amdgpu_device;
|
||||
struct amdgpu_ring;
|
||||
struct amdgpu_ib;
|
||||
struct amdgpu_cs_parser;
|
||||
|
||||
/*
|
||||
* Fences.
|
||||
*/
|
||||
struct amdgpu_fence_driver {
|
||||
uint64_t gpu_addr;
|
||||
volatile uint32_t *cpu_addr;
|
||||
/* sync_seq is protected by ring emission lock */
|
||||
uint32_t sync_seq;
|
||||
atomic_t last_seq;
|
||||
bool initialized;
|
||||
struct amdgpu_irq_src *irq_src;
|
||||
unsigned irq_type;
|
||||
struct timer_list fallback_timer;
|
||||
unsigned num_fences_mask;
|
||||
spinlock_t lock;
|
||||
struct fence **fences;
|
||||
};
|
||||
|
||||
int amdgpu_fence_driver_init(struct amdgpu_device *adev);
|
||||
void amdgpu_fence_driver_fini(struct amdgpu_device *adev);
|
||||
void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev);
|
||||
|
||||
int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
|
||||
unsigned num_hw_submission);
|
||||
int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
|
||||
struct amdgpu_irq_src *irq_src,
|
||||
unsigned irq_type);
|
||||
void amdgpu_fence_driver_suspend(struct amdgpu_device *adev);
|
||||
void amdgpu_fence_driver_resume(struct amdgpu_device *adev);
|
||||
int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **fence);
|
||||
void amdgpu_fence_process(struct amdgpu_ring *ring);
|
||||
int amdgpu_fence_wait_empty(struct amdgpu_ring *ring);
|
||||
unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring);
|
||||
|
||||
/*
|
||||
* Rings.
|
||||
*/
|
||||
|
||||
/* provided by hw blocks that expose a ring buffer for commands */
|
||||
struct amdgpu_ring_funcs {
|
||||
enum amdgpu_ring_type type;
|
||||
uint32_t align_mask;
|
||||
u32 nop;
|
||||
|
||||
/* ring read/write ptr handling */
|
||||
u32 (*get_rptr)(struct amdgpu_ring *ring);
|
||||
u32 (*get_wptr)(struct amdgpu_ring *ring);
|
||||
void (*set_wptr)(struct amdgpu_ring *ring);
|
||||
/* validating and patching of IBs */
|
||||
int (*parse_cs)(struct amdgpu_cs_parser *p, uint32_t ib_idx);
|
||||
/* constants to calculate how many DW are needed for an emit */
|
||||
unsigned emit_frame_size;
|
||||
unsigned emit_ib_size;
|
||||
/* command emit functions */
|
||||
void (*emit_ib)(struct amdgpu_ring *ring,
|
||||
struct amdgpu_ib *ib,
|
||||
unsigned vm_id, bool ctx_switch);
|
||||
void (*emit_fence)(struct amdgpu_ring *ring, uint64_t addr,
|
||||
uint64_t seq, unsigned flags);
|
||||
void (*emit_pipeline_sync)(struct amdgpu_ring *ring);
|
||||
void (*emit_vm_flush)(struct amdgpu_ring *ring, unsigned vm_id,
|
||||
uint64_t pd_addr);
|
||||
void (*emit_hdp_flush)(struct amdgpu_ring *ring);
|
||||
void (*emit_hdp_invalidate)(struct amdgpu_ring *ring);
|
||||
void (*emit_gds_switch)(struct amdgpu_ring *ring, uint32_t vmid,
|
||||
uint32_t gds_base, uint32_t gds_size,
|
||||
uint32_t gws_base, uint32_t gws_size,
|
||||
uint32_t oa_base, uint32_t oa_size);
|
||||
/* testing functions */
|
||||
int (*test_ring)(struct amdgpu_ring *ring);
|
||||
int (*test_ib)(struct amdgpu_ring *ring, long timeout);
|
||||
/* insert NOP packets */
|
||||
void (*insert_nop)(struct amdgpu_ring *ring, uint32_t count);
|
||||
/* pad the indirect buffer to the necessary number of dw */
|
||||
void (*pad_ib)(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
|
||||
unsigned (*init_cond_exec)(struct amdgpu_ring *ring);
|
||||
void (*patch_cond_exec)(struct amdgpu_ring *ring, unsigned offset);
|
||||
/* note usage for clock and power gating */
|
||||
void (*begin_use)(struct amdgpu_ring *ring);
|
||||
void (*end_use)(struct amdgpu_ring *ring);
|
||||
void (*emit_switch_buffer) (struct amdgpu_ring *ring);
|
||||
void (*emit_cntxcntl) (struct amdgpu_ring *ring, uint32_t flags);
|
||||
};
|
||||
|
||||
struct amdgpu_ring {
|
||||
struct amdgpu_device *adev;
|
||||
const struct amdgpu_ring_funcs *funcs;
|
||||
struct amdgpu_fence_driver fence_drv;
|
||||
struct amd_gpu_scheduler sched;
|
||||
|
||||
struct amdgpu_bo *ring_obj;
|
||||
volatile uint32_t *ring;
|
||||
unsigned rptr_offs;
|
||||
unsigned wptr;
|
||||
unsigned wptr_old;
|
||||
unsigned ring_size;
|
||||
unsigned max_dw;
|
||||
int count_dw;
|
||||
uint64_t gpu_addr;
|
||||
uint32_t ptr_mask;
|
||||
bool ready;
|
||||
u32 idx;
|
||||
u32 me;
|
||||
u32 pipe;
|
||||
u32 queue;
|
||||
struct amdgpu_bo *mqd_obj;
|
||||
u32 doorbell_index;
|
||||
bool use_doorbell;
|
||||
unsigned wptr_offs;
|
||||
unsigned fence_offs;
|
||||
uint64_t current_ctx;
|
||||
char name[16];
|
||||
unsigned cond_exe_offs;
|
||||
u64 cond_exe_gpu_addr;
|
||||
volatile u32 *cond_exe_cpu_addr;
|
||||
#if defined(CONFIG_DEBUG_FS)
|
||||
struct dentry *ent;
|
||||
#endif
|
||||
};
|
||||
|
||||
int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw);
|
||||
void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count);
|
||||
void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
|
||||
void amdgpu_ring_commit(struct amdgpu_ring *ring);
|
||||
void amdgpu_ring_undo(struct amdgpu_ring *ring);
|
||||
int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
|
||||
unsigned ring_size, struct amdgpu_irq_src *irq_src,
|
||||
unsigned irq_type);
|
||||
void amdgpu_ring_fini(struct amdgpu_ring *ring);
|
||||
|
||||
#endif
|
|
@ -0,0 +1,56 @@
|
|||
/*
|
||||
* Copyright 2016 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Christian König
|
||||
*/
|
||||
#ifndef __AMDGPU_SYNC_H__
|
||||
#define __AMDGPU_SYNC_H__
|
||||
|
||||
#include <linux/hashtable.h>
|
||||
|
||||
struct fence;
|
||||
struct reservation_object;
|
||||
struct amdgpu_device;
|
||||
struct amdgpu_ring;
|
||||
|
||||
/*
|
||||
* Container for fences used to sync command submissions.
|
||||
*/
|
||||
struct amdgpu_sync {
|
||||
DECLARE_HASHTABLE(fences, 4);
|
||||
struct fence *last_vm_update;
|
||||
};
|
||||
|
||||
void amdgpu_sync_create(struct amdgpu_sync *sync);
|
||||
int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
|
||||
struct fence *f);
|
||||
int amdgpu_sync_resv(struct amdgpu_device *adev,
|
||||
struct amdgpu_sync *sync,
|
||||
struct reservation_object *resv,
|
||||
void *owner);
|
||||
struct fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
|
||||
struct amdgpu_ring *ring);
|
||||
struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync);
|
||||
void amdgpu_sync_free(struct amdgpu_sync *sync);
|
||||
int amdgpu_sync_init(void);
|
||||
void amdgpu_sync_fini(void);
|
||||
|
||||
#endif
|
|
@ -51,16 +51,6 @@
|
|||
static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev);
|
||||
static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev);
|
||||
|
||||
static struct amdgpu_device *amdgpu_get_adev(struct ttm_bo_device *bdev)
|
||||
{
|
||||
struct amdgpu_mman *mman;
|
||||
struct amdgpu_device *adev;
|
||||
|
||||
mman = container_of(bdev, struct amdgpu_mman, bdev);
|
||||
adev = container_of(mman, struct amdgpu_device, mman);
|
||||
return adev;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Global memory.
|
||||
|
@ -150,7 +140,7 @@ static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
|
|||
{
|
||||
struct amdgpu_device *adev;
|
||||
|
||||
adev = amdgpu_get_adev(bdev);
|
||||
adev = amdgpu_ttm_adev(bdev);
|
||||
|
||||
switch (type) {
|
||||
case TTM_PL_SYSTEM:
|
||||
|
@ -168,7 +158,7 @@ static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
|
|||
break;
|
||||
case TTM_PL_VRAM:
|
||||
/* "On-card" video ram */
|
||||
man->func = &ttm_bo_manager_func;
|
||||
man->func = &amdgpu_vram_mgr_func;
|
||||
man->gpu_offset = adev->mc.vram_start;
|
||||
man->flags = TTM_MEMTYPE_FLAG_FIXED |
|
||||
TTM_MEMTYPE_FLAG_MAPPABLE;
|
||||
|
@ -195,6 +185,7 @@ static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
|
|||
static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
|
||||
struct ttm_placement *placement)
|
||||
{
|
||||
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
|
||||
struct amdgpu_bo *abo;
|
||||
static struct ttm_place placements = {
|
||||
.fpfn = 0,
|
||||
|
@ -213,7 +204,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
|
|||
abo = container_of(bo, struct amdgpu_bo, tbo);
|
||||
switch (bo->mem.mem_type) {
|
||||
case TTM_PL_VRAM:
|
||||
if (abo->adev->mman.buffer_funcs_ring->ready == false) {
|
||||
if (adev->mman.buffer_funcs_ring->ready == false) {
|
||||
amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
|
||||
} else {
|
||||
amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT);
|
||||
|
@ -229,7 +220,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
|
|||
* allocating address space for the BO.
|
||||
*/
|
||||
abo->placements[i].lpfn =
|
||||
abo->adev->mc.gtt_size >> PAGE_SHIFT;
|
||||
adev->mc.gtt_size >> PAGE_SHIFT;
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
@ -260,64 +251,116 @@ static void amdgpu_move_null(struct ttm_buffer_object *bo,
|
|||
new_mem->mm_node = NULL;
|
||||
}
|
||||
|
||||
static int amdgpu_mm_node_addr(struct ttm_buffer_object *bo,
|
||||
struct drm_mm_node *mm_node,
|
||||
struct ttm_mem_reg *mem,
|
||||
uint64_t *addr)
|
||||
{
|
||||
int r;
|
||||
|
||||
switch (mem->mem_type) {
|
||||
case TTM_PL_TT:
|
||||
r = amdgpu_ttm_bind(bo, mem);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
case TTM_PL_VRAM:
|
||||
*addr = mm_node->start << PAGE_SHIFT;
|
||||
*addr += bo->bdev->man[mem->mem_type].gpu_offset;
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("Unknown placement %d\n", mem->mem_type);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int amdgpu_move_blit(struct ttm_buffer_object *bo,
|
||||
bool evict, bool no_wait_gpu,
|
||||
struct ttm_mem_reg *new_mem,
|
||||
struct ttm_mem_reg *old_mem)
|
||||
{
|
||||
struct amdgpu_device *adev;
|
||||
struct amdgpu_ring *ring;
|
||||
uint64_t old_start, new_start;
|
||||
struct fence *fence;
|
||||
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
|
||||
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
|
||||
|
||||
struct drm_mm_node *old_mm, *new_mm;
|
||||
uint64_t old_start, old_size, new_start, new_size;
|
||||
unsigned long num_pages;
|
||||
struct fence *fence = NULL;
|
||||
int r;
|
||||
|
||||
adev = amdgpu_get_adev(bo->bdev);
|
||||
ring = adev->mman.buffer_funcs_ring;
|
||||
BUILD_BUG_ON((PAGE_SIZE % AMDGPU_GPU_PAGE_SIZE) != 0);
|
||||
|
||||
switch (old_mem->mem_type) {
|
||||
case TTM_PL_TT:
|
||||
r = amdgpu_ttm_bind(bo, old_mem);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
case TTM_PL_VRAM:
|
||||
old_start = (u64)old_mem->start << PAGE_SHIFT;
|
||||
old_start += bo->bdev->man[old_mem->mem_type].gpu_offset;
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
|
||||
return -EINVAL;
|
||||
}
|
||||
switch (new_mem->mem_type) {
|
||||
case TTM_PL_TT:
|
||||
r = amdgpu_ttm_bind(bo, new_mem);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
case TTM_PL_VRAM:
|
||||
new_start = (u64)new_mem->start << PAGE_SHIFT;
|
||||
new_start += bo->bdev->man[new_mem->mem_type].gpu_offset;
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (!ring->ready) {
|
||||
DRM_ERROR("Trying to move memory with ring turned off.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
BUILD_BUG_ON((PAGE_SIZE % AMDGPU_GPU_PAGE_SIZE) != 0);
|
||||
|
||||
r = amdgpu_copy_buffer(ring, old_start, new_start,
|
||||
new_mem->num_pages * PAGE_SIZE, /* bytes */
|
||||
bo->resv, &fence, false);
|
||||
old_mm = old_mem->mm_node;
|
||||
r = amdgpu_mm_node_addr(bo, old_mm, old_mem, &old_start);
|
||||
if (r)
|
||||
return r;
|
||||
old_size = old_mm->size;
|
||||
|
||||
|
||||
new_mm = new_mem->mm_node;
|
||||
r = amdgpu_mm_node_addr(bo, new_mm, new_mem, &new_start);
|
||||
if (r)
|
||||
return r;
|
||||
new_size = new_mm->size;
|
||||
|
||||
num_pages = new_mem->num_pages;
|
||||
while (num_pages) {
|
||||
unsigned long cur_pages = min(old_size, new_size);
|
||||
struct fence *next;
|
||||
|
||||
r = amdgpu_copy_buffer(ring, old_start, new_start,
|
||||
cur_pages * PAGE_SIZE,
|
||||
bo->resv, &next, false);
|
||||
if (r)
|
||||
goto error;
|
||||
|
||||
fence_put(fence);
|
||||
fence = next;
|
||||
|
||||
num_pages -= cur_pages;
|
||||
if (!num_pages)
|
||||
break;
|
||||
|
||||
old_size -= cur_pages;
|
||||
if (!old_size) {
|
||||
r = amdgpu_mm_node_addr(bo, ++old_mm, old_mem,
|
||||
&old_start);
|
||||
if (r)
|
||||
goto error;
|
||||
old_size = old_mm->size;
|
||||
} else {
|
||||
old_start += cur_pages * PAGE_SIZE;
|
||||
}
|
||||
|
||||
new_size -= cur_pages;
|
||||
if (!new_size) {
|
||||
r = amdgpu_mm_node_addr(bo, ++new_mm, new_mem,
|
||||
&new_start);
|
||||
if (r)
|
||||
goto error;
|
||||
|
||||
new_size = new_mm->size;
|
||||
} else {
|
||||
new_start += cur_pages * PAGE_SIZE;
|
||||
}
|
||||
}
|
||||
|
||||
r = ttm_bo_pipeline_move(bo, fence, evict, new_mem);
|
||||
fence_put(fence);
|
||||
return r;
|
||||
|
||||
error:
|
||||
if (fence)
|
||||
fence_wait(fence, false);
|
||||
fence_put(fence);
|
||||
return r;
|
||||
}
|
||||
|
||||
static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo,
|
||||
|
@ -332,7 +375,7 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo,
|
|||
struct ttm_placement placement;
|
||||
int r;
|
||||
|
||||
adev = amdgpu_get_adev(bo->bdev);
|
||||
adev = amdgpu_ttm_adev(bo->bdev);
|
||||
tmp_mem = *new_mem;
|
||||
tmp_mem.mm_node = NULL;
|
||||
placement.num_placement = 1;
|
||||
|
@ -379,7 +422,7 @@ static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo,
|
|||
struct ttm_place placements;
|
||||
int r;
|
||||
|
||||
adev = amdgpu_get_adev(bo->bdev);
|
||||
adev = amdgpu_ttm_adev(bo->bdev);
|
||||
tmp_mem = *new_mem;
|
||||
tmp_mem.mm_node = NULL;
|
||||
placement.num_placement = 1;
|
||||
|
@ -422,7 +465,7 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo,
|
|||
if (WARN_ON_ONCE(abo->pin_count > 0))
|
||||
return -EINVAL;
|
||||
|
||||
adev = amdgpu_get_adev(bo->bdev);
|
||||
adev = amdgpu_ttm_adev(bo->bdev);
|
||||
|
||||
/* remember the eviction */
|
||||
if (evict)
|
||||
|
@ -475,7 +518,7 @@ memcpy:
|
|||
static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
|
||||
{
|
||||
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
|
||||
struct amdgpu_device *adev = amdgpu_get_adev(bdev);
|
||||
struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
|
||||
|
||||
mem->bus.addr = NULL;
|
||||
mem->bus.offset = 0;
|
||||
|
@ -607,7 +650,7 @@ release_pages:
|
|||
/* prepare the sg table with the user pages */
|
||||
static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm)
|
||||
{
|
||||
struct amdgpu_device *adev = amdgpu_get_adev(ttm->bdev);
|
||||
struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
|
||||
struct amdgpu_ttm_tt *gtt = (void *)ttm;
|
||||
unsigned nents;
|
||||
int r;
|
||||
|
@ -639,7 +682,7 @@ release_sg:
|
|||
|
||||
static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
|
||||
{
|
||||
struct amdgpu_device *adev = amdgpu_get_adev(ttm->bdev);
|
||||
struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
|
||||
struct amdgpu_ttm_tt *gtt = (void *)ttm;
|
||||
struct sg_page_iter sg_iter;
|
||||
|
||||
|
@ -799,7 +842,7 @@ static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_bo_device *bdev,
|
|||
struct amdgpu_device *adev;
|
||||
struct amdgpu_ttm_tt *gtt;
|
||||
|
||||
adev = amdgpu_get_adev(bdev);
|
||||
adev = amdgpu_ttm_adev(bdev);
|
||||
|
||||
gtt = kzalloc(sizeof(struct amdgpu_ttm_tt), GFP_KERNEL);
|
||||
if (gtt == NULL) {
|
||||
|
@ -843,7 +886,7 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm)
|
|||
return 0;
|
||||
}
|
||||
|
||||
adev = amdgpu_get_adev(ttm->bdev);
|
||||
adev = amdgpu_ttm_adev(ttm->bdev);
|
||||
|
||||
#ifdef CONFIG_SWIOTLB
|
||||
if (swiotlb_nr_tbl()) {
|
||||
|
@ -889,7 +932,7 @@ static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
|
|||
if (slave)
|
||||
return;
|
||||
|
||||
adev = amdgpu_get_adev(ttm->bdev);
|
||||
adev = amdgpu_ttm_adev(ttm->bdev);
|
||||
|
||||
#ifdef CONFIG_SWIOTLB
|
||||
if (swiotlb_nr_tbl()) {
|
||||
|
@ -1012,7 +1055,7 @@ uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
|
|||
|
||||
static void amdgpu_ttm_lru_removal(struct ttm_buffer_object *tbo)
|
||||
{
|
||||
struct amdgpu_device *adev = amdgpu_get_adev(tbo->bdev);
|
||||
struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
|
||||
unsigned i, j;
|
||||
|
||||
for (i = 0; i < AMDGPU_TTM_LRU_SIZE; ++i) {
|
||||
|
@ -1029,7 +1072,7 @@ static void amdgpu_ttm_lru_removal(struct ttm_buffer_object *tbo)
|
|||
|
||||
static struct amdgpu_mman_lru *amdgpu_ttm_lru(struct ttm_buffer_object *tbo)
|
||||
{
|
||||
struct amdgpu_device *adev = amdgpu_get_adev(tbo->bdev);
|
||||
struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
|
||||
unsigned log2_size = min(ilog2(tbo->num_pages),
|
||||
AMDGPU_TTM_LRU_SIZE - 1);
|
||||
|
||||
|
@ -1060,12 +1103,37 @@ static struct list_head *amdgpu_ttm_swap_lru_tail(struct ttm_buffer_object *tbo)
|
|||
return res;
|
||||
}
|
||||
|
||||
static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
|
||||
const struct ttm_place *place)
|
||||
{
|
||||
if (bo->mem.mem_type == TTM_PL_VRAM &&
|
||||
bo->mem.start == AMDGPU_BO_INVALID_OFFSET) {
|
||||
unsigned long num_pages = bo->mem.num_pages;
|
||||
struct drm_mm_node *node = bo->mem.mm_node;
|
||||
|
||||
/* Check each drm MM node individually */
|
||||
while (num_pages) {
|
||||
if (place->fpfn < (node->start + node->size) &&
|
||||
!(place->lpfn && place->lpfn <= node->start))
|
||||
return true;
|
||||
|
||||
num_pages -= node->size;
|
||||
++node;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
return ttm_bo_eviction_valuable(bo, place);
|
||||
}
|
||||
|
||||
static struct ttm_bo_driver amdgpu_bo_driver = {
|
||||
.ttm_tt_create = &amdgpu_ttm_tt_create,
|
||||
.ttm_tt_populate = &amdgpu_ttm_tt_populate,
|
||||
.ttm_tt_unpopulate = &amdgpu_ttm_tt_unpopulate,
|
||||
.invalidate_caches = &amdgpu_invalidate_caches,
|
||||
.init_mem_type = &amdgpu_init_mem_type,
|
||||
.eviction_valuable = amdgpu_ttm_bo_eviction_valuable,
|
||||
.evict_flags = &amdgpu_evict_flags,
|
||||
.move = &amdgpu_bo_move,
|
||||
.verify_access = &amdgpu_verify_access,
|
||||
|
@ -1119,7 +1187,8 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
|
|||
|
||||
r = amdgpu_bo_create(adev, 256 * 1024, PAGE_SIZE, true,
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
|
||||
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
|
||||
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
|
||||
NULL, NULL, &adev->stollen_vga_memory);
|
||||
if (r) {
|
||||
return r;
|
||||
|
@ -1317,7 +1386,7 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
|
|||
struct reservation_object *resv,
|
||||
struct fence **fence)
|
||||
{
|
||||
struct amdgpu_device *adev = bo->adev;
|
||||
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
|
||||
struct amdgpu_job *job;
|
||||
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
|
||||
|
||||
|
|
|
@ -66,6 +66,7 @@ struct amdgpu_mman {
|
|||
};
|
||||
|
||||
extern const struct ttm_mem_type_manager_func amdgpu_gtt_mgr_func;
|
||||
extern const struct ttm_mem_type_manager_func amdgpu_vram_mgr_func;
|
||||
|
||||
int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man,
|
||||
struct ttm_buffer_object *tbo,
|
||||
|
|
|
@ -228,6 +228,9 @@ static int amdgpu_ucode_init_single_fw(struct amdgpu_firmware_info *ucode,
|
|||
ucode->mc_addr = mc_addr;
|
||||
ucode->kaddr = kptr;
|
||||
|
||||
if (ucode->ucode_id == AMDGPU_UCODE_ID_STORAGE)
|
||||
return 0;
|
||||
|
||||
header = (const struct common_firmware_header *)ucode->fw->data;
|
||||
memcpy(ucode->kaddr, (void *)((uint8_t *)ucode->fw->data +
|
||||
le32_to_cpu(header->ucode_array_offset_bytes)),
|
||||
|
@ -236,6 +239,31 @@ static int amdgpu_ucode_init_single_fw(struct amdgpu_firmware_info *ucode,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int amdgpu_ucode_patch_jt(struct amdgpu_firmware_info *ucode,
|
||||
uint64_t mc_addr, void *kptr)
|
||||
{
|
||||
const struct gfx_firmware_header_v1_0 *header = NULL;
|
||||
const struct common_firmware_header *comm_hdr = NULL;
|
||||
uint8_t* src_addr = NULL;
|
||||
uint8_t* dst_addr = NULL;
|
||||
|
||||
if (NULL == ucode->fw)
|
||||
return 0;
|
||||
|
||||
comm_hdr = (const struct common_firmware_header *)ucode->fw->data;
|
||||
header = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
|
||||
dst_addr = ucode->kaddr +
|
||||
ALIGN(le32_to_cpu(comm_hdr->ucode_size_bytes),
|
||||
PAGE_SIZE);
|
||||
src_addr = (uint8_t *)ucode->fw->data +
|
||||
le32_to_cpu(comm_hdr->ucode_array_offset_bytes) +
|
||||
(le32_to_cpu(header->jt_offset) * 4);
|
||||
memcpy(dst_addr, src_addr, le32_to_cpu(header->jt_size) * 4);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
|
||||
{
|
||||
struct amdgpu_bo **bo = &adev->firmware.fw_buf;
|
||||
|
@ -247,7 +275,8 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
|
|||
const struct common_firmware_header *header = NULL;
|
||||
|
||||
err = amdgpu_bo_create(adev, adev->firmware.fw_size, PAGE_SIZE, true,
|
||||
AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL, bo);
|
||||
amdgpu_sriov_vf(adev) ? AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
|
||||
0, NULL, NULL, bo);
|
||||
if (err) {
|
||||
dev_err(adev->dev, "(%d) Firmware buffer allocate failed\n", err);
|
||||
goto failed;
|
||||
|
@ -259,7 +288,8 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
|
|||
goto failed_reserve;
|
||||
}
|
||||
|
||||
err = amdgpu_bo_pin(*bo, AMDGPU_GEM_DOMAIN_GTT, &fw_mc_addr);
|
||||
err = amdgpu_bo_pin(*bo, amdgpu_sriov_vf(adev) ? AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
|
||||
&fw_mc_addr);
|
||||
if (err) {
|
||||
dev_err(adev->dev, "(%d) Firmware buffer pin failed\n", err);
|
||||
goto failed_pin;
|
||||
|
@ -279,6 +309,13 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
|
|||
header = (const struct common_firmware_header *)ucode->fw->data;
|
||||
amdgpu_ucode_init_single_fw(ucode, fw_mc_addr + fw_offset,
|
||||
fw_buf_ptr + fw_offset);
|
||||
if (i == AMDGPU_UCODE_ID_CP_MEC1) {
|
||||
const struct gfx_firmware_header_v1_0 *cp_hdr;
|
||||
cp_hdr = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
|
||||
amdgpu_ucode_patch_jt(ucode, fw_mc_addr + fw_offset,
|
||||
fw_buf_ptr + fw_offset);
|
||||
fw_offset += ALIGN(le32_to_cpu(cp_hdr->jt_size) << 2, PAGE_SIZE);
|
||||
}
|
||||
fw_offset += ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -130,6 +130,7 @@ enum AMDGPU_UCODE_ID {
|
|||
AMDGPU_UCODE_ID_CP_MEC1,
|
||||
AMDGPU_UCODE_ID_CP_MEC2,
|
||||
AMDGPU_UCODE_ID_RLC_G,
|
||||
AMDGPU_UCODE_ID_STORAGE,
|
||||
AMDGPU_UCODE_ID_MAXIMUM,
|
||||
};
|
||||
|
||||
|
|
|
@ -876,6 +876,9 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)
|
|||
struct amdgpu_ib *ib = &parser->job->ibs[ib_idx];
|
||||
int r;
|
||||
|
||||
parser->job->vm = NULL;
|
||||
ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
|
||||
|
||||
if (ib->length_dw % 16) {
|
||||
DRM_ERROR("UVD IB length (%d) not 16 dwords aligned!\n",
|
||||
ib->length_dw);
|
||||
|
@ -931,7 +934,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
|
|||
if (r)
|
||||
return r;
|
||||
|
||||
if (!bo->adev->uvd.address_64_bit) {
|
||||
if (!ring->adev->uvd.address_64_bit) {
|
||||
amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
|
||||
amdgpu_uvd_force_into_uvd_segment(bo);
|
||||
}
|
||||
|
@ -1002,7 +1005,8 @@ int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
|
|||
|
||||
r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true,
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
|
||||
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
|
||||
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
|
||||
NULL, NULL, &bo);
|
||||
if (r)
|
||||
return r;
|
||||
|
@ -1051,7 +1055,8 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
|
|||
|
||||
r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true,
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
|
||||
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
|
||||
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
|
||||
NULL, NULL, &bo);
|
||||
if (r)
|
||||
return r;
|
||||
|
|
|
@ -157,7 +157,8 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
|
|||
|
||||
r = amdgpu_bo_create(adev, size, PAGE_SIZE, true,
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
|
||||
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
|
||||
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
|
||||
NULL, NULL, &adev->vce.vcpu_bo);
|
||||
if (r) {
|
||||
dev_err(adev->dev, "(%d) failed to allocate VCE bo\n", r);
|
||||
|
@ -641,6 +642,9 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
|
|||
uint32_t *size = &tmp;
|
||||
int i, r, idx = 0;
|
||||
|
||||
p->job->vm = NULL;
|
||||
ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
|
||||
|
||||
r = amdgpu_cs_sysvm_access_required(p);
|
||||
if (r)
|
||||
return r;
|
||||
|
@ -787,6 +791,96 @@ out:
|
|||
return r;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_vce_cs_parse_vm - parse the command stream in VM mode
|
||||
*
|
||||
* @p: parser context
|
||||
*
|
||||
*/
|
||||
int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p, uint32_t ib_idx)
|
||||
{
|
||||
struct amdgpu_ib *ib = &p->job->ibs[ib_idx];
|
||||
int session_idx = -1;
|
||||
uint32_t destroyed = 0;
|
||||
uint32_t created = 0;
|
||||
uint32_t allocated = 0;
|
||||
uint32_t tmp, handle = 0;
|
||||
int i, r = 0, idx = 0;
|
||||
|
||||
while (idx < ib->length_dw) {
|
||||
uint32_t len = amdgpu_get_ib_value(p, ib_idx, idx);
|
||||
uint32_t cmd = amdgpu_get_ib_value(p, ib_idx, idx + 1);
|
||||
|
||||
if ((len < 8) || (len & 3)) {
|
||||
DRM_ERROR("invalid VCE command length (%d)!\n", len);
|
||||
r = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
switch (cmd) {
|
||||
case 0x00000001: /* session */
|
||||
handle = amdgpu_get_ib_value(p, ib_idx, idx + 2);
|
||||
session_idx = amdgpu_vce_validate_handle(p, handle,
|
||||
&allocated);
|
||||
if (session_idx < 0) {
|
||||
r = session_idx;
|
||||
goto out;
|
||||
}
|
||||
break;
|
||||
|
||||
case 0x01000001: /* create */
|
||||
created |= 1 << session_idx;
|
||||
if (destroyed & (1 << session_idx)) {
|
||||
destroyed &= ~(1 << session_idx);
|
||||
allocated |= 1 << session_idx;
|
||||
|
||||
} else if (!(allocated & (1 << session_idx))) {
|
||||
DRM_ERROR("Handle already in use!\n");
|
||||
r = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
break;
|
||||
|
||||
case 0x02000001: /* destroy */
|
||||
destroyed |= 1 << session_idx;
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
if (session_idx == -1) {
|
||||
DRM_ERROR("no session command at start of IB\n");
|
||||
r = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
idx += len / 4;
|
||||
}
|
||||
|
||||
if (allocated & ~created) {
|
||||
DRM_ERROR("New session without create command!\n");
|
||||
r = -ENOENT;
|
||||
}
|
||||
|
||||
out:
|
||||
if (!r) {
|
||||
/* No error, free all destroyed handle slots */
|
||||
tmp = destroyed;
|
||||
amdgpu_ib_free(p->adev, ib, NULL);
|
||||
} else {
|
||||
/* Error during parsing, free all allocated handle slots */
|
||||
tmp = allocated;
|
||||
}
|
||||
|
||||
for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i)
|
||||
if (tmp & (1 << i))
|
||||
atomic_set(&p->adev->vce.handles[i], 0);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_vce_ring_emit_ib - execute indirect buffer
|
||||
*
|
||||
|
@ -823,18 +917,6 @@ void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
|
|||
amdgpu_ring_write(ring, VCE_CMD_END);
|
||||
}
|
||||
|
||||
unsigned amdgpu_vce_ring_get_emit_ib_size(struct amdgpu_ring *ring)
|
||||
{
|
||||
return
|
||||
4; /* amdgpu_vce_ring_emit_ib */
|
||||
}
|
||||
|
||||
unsigned amdgpu_vce_ring_get_dma_frame_size(struct amdgpu_ring *ring)
|
||||
{
|
||||
return
|
||||
6; /* amdgpu_vce_ring_emit_fence x1 no user fence */
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_vce_ring_test_ring - test if VCE ring is working
|
||||
*
|
||||
|
|
|
@ -34,6 +34,7 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
|
|||
bool direct, struct fence **fence);
|
||||
void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp);
|
||||
int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx);
|
||||
int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p, uint32_t ib_idx);
|
||||
void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib,
|
||||
unsigned vm_id, bool ctx_switch);
|
||||
void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
|
||||
|
|
|
@ -116,38 +116,43 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
|
|||
}
|
||||
|
||||
/**
|
||||
* amdgpu_vm_get_bos - add the vm BOs to a duplicates list
|
||||
* amdgpu_vm_validate_pt_bos - validate the page table BOs
|
||||
*
|
||||
* @adev: amdgpu device pointer
|
||||
* @vm: vm providing the BOs
|
||||
* @duplicates: head of duplicates list
|
||||
* @validate: callback to do the validation
|
||||
* @param: parameter for the validation callback
|
||||
*
|
||||
* Add the page directory to the BO duplicates list
|
||||
* for command submission.
|
||||
* Validate the page table BOs on command submission if neccessary.
|
||||
*/
|
||||
void amdgpu_vm_get_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
||||
struct list_head *duplicates)
|
||||
int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
||||
int (*validate)(void *p, struct amdgpu_bo *bo),
|
||||
void *param)
|
||||
{
|
||||
uint64_t num_evictions;
|
||||
unsigned i;
|
||||
int r;
|
||||
|
||||
/* We only need to validate the page tables
|
||||
* if they aren't already valid.
|
||||
*/
|
||||
num_evictions = atomic64_read(&adev->num_evictions);
|
||||
if (num_evictions == vm->last_eviction_counter)
|
||||
return;
|
||||
return 0;
|
||||
|
||||
/* add the vm page table to the list */
|
||||
for (i = 0; i <= vm->max_pde_used; ++i) {
|
||||
struct amdgpu_bo_list_entry *entry = &vm->page_tables[i].entry;
|
||||
struct amdgpu_bo *bo = vm->page_tables[i].bo;
|
||||
|
||||
if (!entry->robj)
|
||||
if (!bo)
|
||||
continue;
|
||||
|
||||
list_add(&entry->tv.head, duplicates);
|
||||
r = validate(param, bo);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -166,12 +171,12 @@ void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
|
|||
|
||||
spin_lock(&glob->lru_lock);
|
||||
for (i = 0; i <= vm->max_pde_used; ++i) {
|
||||
struct amdgpu_bo_list_entry *entry = &vm->page_tables[i].entry;
|
||||
struct amdgpu_bo *bo = vm->page_tables[i].bo;
|
||||
|
||||
if (!entry->robj)
|
||||
if (!bo)
|
||||
continue;
|
||||
|
||||
ttm_bo_move_to_lru_tail(&entry->robj->tbo);
|
||||
ttm_bo_move_to_lru_tail(&bo->tbo);
|
||||
}
|
||||
spin_unlock(&glob->lru_lock);
|
||||
}
|
||||
|
@ -341,9 +346,9 @@ error:
|
|||
static bool amdgpu_vm_ring_has_compute_vm_bug(struct amdgpu_ring *ring)
|
||||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
const struct amdgpu_ip_block_version *ip_block;
|
||||
const struct amdgpu_ip_block *ip_block;
|
||||
|
||||
if (ring->type != AMDGPU_RING_TYPE_COMPUTE)
|
||||
if (ring->funcs->type != AMDGPU_RING_TYPE_COMPUTE)
|
||||
/* only compute rings */
|
||||
return false;
|
||||
|
||||
|
@ -351,10 +356,10 @@ static bool amdgpu_vm_ring_has_compute_vm_bug(struct amdgpu_ring *ring)
|
|||
if (!ip_block)
|
||||
return false;
|
||||
|
||||
if (ip_block->major <= 7) {
|
||||
if (ip_block->version->major <= 7) {
|
||||
/* gfx7 has no workaround */
|
||||
return true;
|
||||
} else if (ip_block->major == 8) {
|
||||
} else if (ip_block->version->major == 8) {
|
||||
if (adev->gfx.mec_fw_version >= 673)
|
||||
/* gfx8 is fixed in MEC firmware 673 */
|
||||
return false;
|
||||
|
@ -612,123 +617,6 @@ static uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
|
|||
return result;
|
||||
}
|
||||
|
||||
static int amdgpu_vm_update_pd_or_shadow(struct amdgpu_device *adev,
|
||||
struct amdgpu_vm *vm,
|
||||
bool shadow)
|
||||
{
|
||||
struct amdgpu_ring *ring;
|
||||
struct amdgpu_bo *pd = shadow ? vm->page_directory->shadow :
|
||||
vm->page_directory;
|
||||
uint64_t pd_addr;
|
||||
uint32_t incr = AMDGPU_VM_PTE_COUNT * 8;
|
||||
uint64_t last_pde = ~0, last_pt = ~0;
|
||||
unsigned count = 0, pt_idx, ndw;
|
||||
struct amdgpu_job *job;
|
||||
struct amdgpu_pte_update_params params;
|
||||
struct fence *fence = NULL;
|
||||
|
||||
int r;
|
||||
|
||||
if (!pd)
|
||||
return 0;
|
||||
|
||||
r = amdgpu_ttm_bind(&pd->tbo, &pd->tbo.mem);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
pd_addr = amdgpu_bo_gpu_offset(pd);
|
||||
ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
|
||||
|
||||
/* padding, etc. */
|
||||
ndw = 64;
|
||||
|
||||
/* assume the worst case */
|
||||
ndw += vm->max_pde_used * 6;
|
||||
|
||||
r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
memset(¶ms, 0, sizeof(params));
|
||||
params.adev = adev;
|
||||
params.ib = &job->ibs[0];
|
||||
|
||||
/* walk over the address space and update the page directory */
|
||||
for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) {
|
||||
struct amdgpu_bo *bo = vm->page_tables[pt_idx].entry.robj;
|
||||
uint64_t pde, pt;
|
||||
|
||||
if (bo == NULL)
|
||||
continue;
|
||||
|
||||
if (bo->shadow) {
|
||||
struct amdgpu_bo *shadow = bo->shadow;
|
||||
|
||||
r = amdgpu_ttm_bind(&shadow->tbo, &shadow->tbo.mem);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
||||
pt = amdgpu_bo_gpu_offset(bo);
|
||||
if (!shadow) {
|
||||
if (vm->page_tables[pt_idx].addr == pt)
|
||||
continue;
|
||||
vm->page_tables[pt_idx].addr = pt;
|
||||
} else {
|
||||
if (vm->page_tables[pt_idx].shadow_addr == pt)
|
||||
continue;
|
||||
vm->page_tables[pt_idx].shadow_addr = pt;
|
||||
}
|
||||
|
||||
pde = pd_addr + pt_idx * 8;
|
||||
if (((last_pde + 8 * count) != pde) ||
|
||||
((last_pt + incr * count) != pt) ||
|
||||
(count == AMDGPU_VM_MAX_UPDATE_SIZE)) {
|
||||
|
||||
if (count) {
|
||||
amdgpu_vm_do_set_ptes(¶ms, last_pde,
|
||||
last_pt, count, incr,
|
||||
AMDGPU_PTE_VALID);
|
||||
}
|
||||
|
||||
count = 1;
|
||||
last_pde = pde;
|
||||
last_pt = pt;
|
||||
} else {
|
||||
++count;
|
||||
}
|
||||
}
|
||||
|
||||
if (count)
|
||||
amdgpu_vm_do_set_ptes(¶ms, last_pde, last_pt,
|
||||
count, incr, AMDGPU_PTE_VALID);
|
||||
|
||||
if (params.ib->length_dw != 0) {
|
||||
amdgpu_ring_pad_ib(ring, params.ib);
|
||||
amdgpu_sync_resv(adev, &job->sync, pd->tbo.resv,
|
||||
AMDGPU_FENCE_OWNER_VM);
|
||||
WARN_ON(params.ib->length_dw > ndw);
|
||||
r = amdgpu_job_submit(job, ring, &vm->entity,
|
||||
AMDGPU_FENCE_OWNER_VM, &fence);
|
||||
if (r)
|
||||
goto error_free;
|
||||
|
||||
amdgpu_bo_fence(pd, fence, true);
|
||||
fence_put(vm->page_directory_fence);
|
||||
vm->page_directory_fence = fence_get(fence);
|
||||
fence_put(fence);
|
||||
|
||||
} else {
|
||||
amdgpu_job_free(job);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
error_free:
|
||||
amdgpu_job_free(job);
|
||||
return r;
|
||||
}
|
||||
|
||||
/*
|
||||
* amdgpu_vm_update_pdes - make sure that page directory is valid
|
||||
*
|
||||
|
@ -744,12 +632,133 @@ error_free:
|
|||
int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
|
||||
struct amdgpu_vm *vm)
|
||||
{
|
||||
struct amdgpu_bo *shadow;
|
||||
struct amdgpu_ring *ring;
|
||||
uint64_t pd_addr, shadow_addr;
|
||||
uint32_t incr = AMDGPU_VM_PTE_COUNT * 8;
|
||||
uint64_t last_pde = ~0, last_pt = ~0, last_shadow = ~0;
|
||||
unsigned count = 0, pt_idx, ndw;
|
||||
struct amdgpu_job *job;
|
||||
struct amdgpu_pte_update_params params;
|
||||
struct fence *fence = NULL;
|
||||
|
||||
int r;
|
||||
|
||||
r = amdgpu_vm_update_pd_or_shadow(adev, vm, true);
|
||||
ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
|
||||
shadow = vm->page_directory->shadow;
|
||||
|
||||
/* padding, etc. */
|
||||
ndw = 64;
|
||||
|
||||
/* assume the worst case */
|
||||
ndw += vm->max_pde_used * 6;
|
||||
|
||||
pd_addr = amdgpu_bo_gpu_offset(vm->page_directory);
|
||||
if (shadow) {
|
||||
r = amdgpu_ttm_bind(&shadow->tbo, &shadow->tbo.mem);
|
||||
if (r)
|
||||
return r;
|
||||
return amdgpu_vm_update_pd_or_shadow(adev, vm, false);
|
||||
shadow_addr = amdgpu_bo_gpu_offset(shadow);
|
||||
ndw *= 2;
|
||||
} else {
|
||||
shadow_addr = 0;
|
||||
}
|
||||
|
||||
r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
memset(¶ms, 0, sizeof(params));
|
||||
params.adev = adev;
|
||||
params.ib = &job->ibs[0];
|
||||
|
||||
/* walk over the address space and update the page directory */
|
||||
for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) {
|
||||
struct amdgpu_bo *bo = vm->page_tables[pt_idx].bo;
|
||||
uint64_t pde, pt;
|
||||
|
||||
if (bo == NULL)
|
||||
continue;
|
||||
|
||||
if (bo->shadow) {
|
||||
struct amdgpu_bo *pt_shadow = bo->shadow;
|
||||
|
||||
r = amdgpu_ttm_bind(&pt_shadow->tbo,
|
||||
&pt_shadow->tbo.mem);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
||||
pt = amdgpu_bo_gpu_offset(bo);
|
||||
if (vm->page_tables[pt_idx].addr == pt)
|
||||
continue;
|
||||
|
||||
vm->page_tables[pt_idx].addr = pt;
|
||||
|
||||
pde = pd_addr + pt_idx * 8;
|
||||
if (((last_pde + 8 * count) != pde) ||
|
||||
((last_pt + incr * count) != pt) ||
|
||||
(count == AMDGPU_VM_MAX_UPDATE_SIZE)) {
|
||||
|
||||
if (count) {
|
||||
if (shadow)
|
||||
amdgpu_vm_do_set_ptes(¶ms,
|
||||
last_shadow,
|
||||
last_pt, count,
|
||||
incr,
|
||||
AMDGPU_PTE_VALID);
|
||||
|
||||
amdgpu_vm_do_set_ptes(¶ms, last_pde,
|
||||
last_pt, count, incr,
|
||||
AMDGPU_PTE_VALID);
|
||||
}
|
||||
|
||||
count = 1;
|
||||
last_pde = pde;
|
||||
last_shadow = shadow_addr + pt_idx * 8;
|
||||
last_pt = pt;
|
||||
} else {
|
||||
++count;
|
||||
}
|
||||
}
|
||||
|
||||
if (count) {
|
||||
if (vm->page_directory->shadow)
|
||||
amdgpu_vm_do_set_ptes(¶ms, last_shadow, last_pt,
|
||||
count, incr, AMDGPU_PTE_VALID);
|
||||
|
||||
amdgpu_vm_do_set_ptes(¶ms, last_pde, last_pt,
|
||||
count, incr, AMDGPU_PTE_VALID);
|
||||
}
|
||||
|
||||
if (params.ib->length_dw == 0) {
|
||||
amdgpu_job_free(job);
|
||||
return 0;
|
||||
}
|
||||
|
||||
amdgpu_ring_pad_ib(ring, params.ib);
|
||||
amdgpu_sync_resv(adev, &job->sync, vm->page_directory->tbo.resv,
|
||||
AMDGPU_FENCE_OWNER_VM);
|
||||
if (shadow)
|
||||
amdgpu_sync_resv(adev, &job->sync, shadow->tbo.resv,
|
||||
AMDGPU_FENCE_OWNER_VM);
|
||||
|
||||
WARN_ON(params.ib->length_dw > ndw);
|
||||
r = amdgpu_job_submit(job, ring, &vm->entity,
|
||||
AMDGPU_FENCE_OWNER_VM, &fence);
|
||||
if (r)
|
||||
goto error_free;
|
||||
|
||||
amdgpu_bo_fence(vm->page_directory, fence, true);
|
||||
fence_put(vm->page_directory_fence);
|
||||
vm->page_directory_fence = fence_get(fence);
|
||||
fence_put(fence);
|
||||
|
||||
return 0;
|
||||
|
||||
error_free:
|
||||
amdgpu_job_free(job);
|
||||
return r;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -781,11 +790,11 @@ static void amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
|
|||
/* initialize the variables */
|
||||
addr = start;
|
||||
pt_idx = addr >> amdgpu_vm_block_size;
|
||||
pt = vm->page_tables[pt_idx].entry.robj;
|
||||
pt = vm->page_tables[pt_idx].bo;
|
||||
if (params->shadow) {
|
||||
if (!pt->shadow)
|
||||
return;
|
||||
pt = vm->page_tables[pt_idx].entry.robj->shadow;
|
||||
pt = pt->shadow;
|
||||
}
|
||||
if ((addr & ~mask) == (end & ~mask))
|
||||
nptes = end - addr;
|
||||
|
@ -804,11 +813,11 @@ static void amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
|
|||
/* walk over the address space and update the page tables */
|
||||
while (addr < end) {
|
||||
pt_idx = addr >> amdgpu_vm_block_size;
|
||||
pt = vm->page_tables[pt_idx].entry.robj;
|
||||
pt = vm->page_tables[pt_idx].bo;
|
||||
if (params->shadow) {
|
||||
if (!pt->shadow)
|
||||
return;
|
||||
pt = vm->page_tables[pt_idx].entry.robj->shadow;
|
||||
pt = pt->shadow;
|
||||
}
|
||||
|
||||
if ((addr & ~mask) == (end & ~mask))
|
||||
|
@ -1065,8 +1074,8 @@ error_free:
|
|||
* @pages_addr: DMA addresses to use for mapping
|
||||
* @vm: requested vm
|
||||
* @mapping: mapped range and flags to use for the update
|
||||
* @addr: addr to set the area to
|
||||
* @flags: HW flags for the mapping
|
||||
* @nodes: array of drm_mm_nodes with the MC addresses
|
||||
* @fence: optional resulting fence
|
||||
*
|
||||
* Split the mapping into smaller chunks so that each update fits
|
||||
|
@ -1079,12 +1088,11 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
|
|||
dma_addr_t *pages_addr,
|
||||
struct amdgpu_vm *vm,
|
||||
struct amdgpu_bo_va_mapping *mapping,
|
||||
uint32_t flags, uint64_t addr,
|
||||
uint32_t flags,
|
||||
struct drm_mm_node *nodes,
|
||||
struct fence **fence)
|
||||
{
|
||||
const uint64_t max_size = 64ULL * 1024ULL * 1024ULL / AMDGPU_GPU_PAGE_SIZE;
|
||||
|
||||
uint64_t src = 0, start = mapping->it.start;
|
||||
uint64_t pfn, src = 0, start = mapping->it.start;
|
||||
int r;
|
||||
|
||||
/* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
|
||||
|
@ -1097,23 +1105,40 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
|
|||
|
||||
trace_amdgpu_vm_bo_update(mapping);
|
||||
|
||||
pfn = mapping->offset >> PAGE_SHIFT;
|
||||
if (nodes) {
|
||||
while (pfn >= nodes->size) {
|
||||
pfn -= nodes->size;
|
||||
++nodes;
|
||||
}
|
||||
}
|
||||
|
||||
do {
|
||||
uint64_t max_entries;
|
||||
uint64_t addr, last;
|
||||
|
||||
if (nodes) {
|
||||
addr = nodes->start << PAGE_SHIFT;
|
||||
max_entries = (nodes->size - pfn) *
|
||||
(PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
|
||||
} else {
|
||||
addr = 0;
|
||||
max_entries = S64_MAX;
|
||||
}
|
||||
|
||||
if (pages_addr) {
|
||||
if (flags == gtt_flags)
|
||||
src = adev->gart.table_addr + (addr >> 12) * 8;
|
||||
src = adev->gart.table_addr +
|
||||
(addr >> AMDGPU_GPU_PAGE_SHIFT) * 8;
|
||||
else
|
||||
max_entries = min(max_entries, 16ull * 1024ull);
|
||||
addr = 0;
|
||||
} else if (flags & AMDGPU_PTE_VALID) {
|
||||
addr += adev->vm_manager.vram_base_offset;
|
||||
}
|
||||
addr += mapping->offset;
|
||||
addr += pfn << PAGE_SHIFT;
|
||||
|
||||
if (!pages_addr || src)
|
||||
return amdgpu_vm_bo_update_mapping(adev, exclusive,
|
||||
src, pages_addr, vm,
|
||||
start, mapping->it.last,
|
||||
flags, addr, fence);
|
||||
|
||||
while (start != mapping->it.last + 1) {
|
||||
uint64_t last;
|
||||
|
||||
last = min((uint64_t)mapping->it.last, start + max_size - 1);
|
||||
last = min((uint64_t)mapping->it.last, start + max_entries - 1);
|
||||
r = amdgpu_vm_bo_update_mapping(adev, exclusive,
|
||||
src, pages_addr, vm,
|
||||
start, last, flags, addr,
|
||||
|
@ -1121,9 +1146,14 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
|
|||
if (r)
|
||||
return r;
|
||||
|
||||
start = last + 1;
|
||||
addr += max_size * AMDGPU_GPU_PAGE_SIZE;
|
||||
pfn += last - start + 1;
|
||||
if (nodes && nodes->size == pfn) {
|
||||
pfn = 0;
|
||||
++nodes;
|
||||
}
|
||||
start = last + 1;
|
||||
|
||||
} while (unlikely(start != mapping->it.last + 1));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1147,40 +1177,30 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
|
|||
dma_addr_t *pages_addr = NULL;
|
||||
uint32_t gtt_flags, flags;
|
||||
struct ttm_mem_reg *mem;
|
||||
struct drm_mm_node *nodes;
|
||||
struct fence *exclusive;
|
||||
uint64_t addr;
|
||||
int r;
|
||||
|
||||
if (clear) {
|
||||
mem = NULL;
|
||||
addr = 0;
|
||||
nodes = NULL;
|
||||
exclusive = NULL;
|
||||
} else {
|
||||
struct ttm_dma_tt *ttm;
|
||||
|
||||
mem = &bo_va->bo->tbo.mem;
|
||||
addr = (u64)mem->start << PAGE_SHIFT;
|
||||
switch (mem->mem_type) {
|
||||
case TTM_PL_TT:
|
||||
nodes = mem->mm_node;
|
||||
if (mem->mem_type == TTM_PL_TT) {
|
||||
ttm = container_of(bo_va->bo->tbo.ttm, struct
|
||||
ttm_dma_tt, ttm);
|
||||
pages_addr = ttm->dma_address;
|
||||
break;
|
||||
|
||||
case TTM_PL_VRAM:
|
||||
addr += adev->vm_manager.vram_base_offset;
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
exclusive = reservation_object_get_excl(bo_va->bo->tbo.resv);
|
||||
}
|
||||
|
||||
flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem);
|
||||
gtt_flags = (amdgpu_ttm_is_bound(bo_va->bo->tbo.ttm) &&
|
||||
adev == bo_va->bo->adev) ? flags : 0;
|
||||
adev == amdgpu_ttm_adev(bo_va->bo->tbo.bdev)) ? flags : 0;
|
||||
|
||||
spin_lock(&vm->status_lock);
|
||||
if (!list_empty(&bo_va->vm_status))
|
||||
|
@ -1190,7 +1210,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
|
|||
list_for_each_entry(mapping, &bo_va->invalids, list) {
|
||||
r = amdgpu_vm_bo_split_mapping(adev, exclusive,
|
||||
gtt_flags, pages_addr, vm,
|
||||
mapping, flags, addr,
|
||||
mapping, flags, nodes,
|
||||
&bo_va->last_pt_update);
|
||||
if (r)
|
||||
return r;
|
||||
|
@ -1405,18 +1425,17 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
|
|||
/* walk over the address space and allocate the page tables */
|
||||
for (pt_idx = saddr; pt_idx <= eaddr; ++pt_idx) {
|
||||
struct reservation_object *resv = vm->page_directory->tbo.resv;
|
||||
struct amdgpu_bo_list_entry *entry;
|
||||
struct amdgpu_bo *pt;
|
||||
|
||||
entry = &vm->page_tables[pt_idx].entry;
|
||||
if (entry->robj)
|
||||
if (vm->page_tables[pt_idx].bo)
|
||||
continue;
|
||||
|
||||
r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8,
|
||||
AMDGPU_GPU_PAGE_SIZE, true,
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
|
||||
AMDGPU_GEM_CREATE_SHADOW,
|
||||
AMDGPU_GEM_CREATE_SHADOW |
|
||||
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
|
||||
NULL, resv, &pt);
|
||||
if (r)
|
||||
goto error_free;
|
||||
|
@ -1442,11 +1461,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
|
|||
}
|
||||
}
|
||||
|
||||
entry->robj = pt;
|
||||
entry->priority = 0;
|
||||
entry->tv.bo = &entry->robj->tbo;
|
||||
entry->tv.shared = true;
|
||||
entry->user_pages = NULL;
|
||||
vm->page_tables[pt_idx].bo = pt;
|
||||
vm->page_tables[pt_idx].addr = 0;
|
||||
}
|
||||
|
||||
|
@ -1626,7 +1641,8 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
|
|||
r = amdgpu_bo_create(adev, pd_size, align, true,
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
|
||||
AMDGPU_GEM_CREATE_SHADOW,
|
||||
AMDGPU_GEM_CREATE_SHADOW |
|
||||
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
|
||||
NULL, NULL, &vm->page_directory);
|
||||
if (r)
|
||||
goto error_free_sched_entity;
|
||||
|
@ -1697,7 +1713,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
|
|||
}
|
||||
|
||||
for (i = 0; i < amdgpu_vm_num_pdes(adev); i++) {
|
||||
struct amdgpu_bo *pt = vm->page_tables[i].entry.robj;
|
||||
struct amdgpu_bo *pt = vm->page_tables[i].bo;
|
||||
|
||||
if (!pt)
|
||||
continue;
|
||||
|
|
|
@ -0,0 +1,205 @@
|
|||
/*
|
||||
* Copyright 2016 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Christian König
|
||||
*/
|
||||
#ifndef __AMDGPU_VM_H__
|
||||
#define __AMDGPU_VM_H__
|
||||
|
||||
#include <linux/rbtree.h>
|
||||
|
||||
#include "gpu_scheduler.h"
|
||||
#include "amdgpu_sync.h"
|
||||
#include "amdgpu_ring.h"
|
||||
|
||||
struct amdgpu_bo_va;
|
||||
struct amdgpu_job;
|
||||
struct amdgpu_bo_list_entry;
|
||||
|
||||
/*
|
||||
* GPUVM handling
|
||||
*/
|
||||
|
||||
/* maximum number of VMIDs */
|
||||
#define AMDGPU_NUM_VM 16
|
||||
|
||||
/* Maximum number of PTEs the hardware can write with one command */
|
||||
#define AMDGPU_VM_MAX_UPDATE_SIZE 0x3FFFF
|
||||
|
||||
/* number of entries in page table */
|
||||
#define AMDGPU_VM_PTE_COUNT (1 << amdgpu_vm_block_size)
|
||||
|
||||
/* PTBs (Page Table Blocks) need to be aligned to 32K */
|
||||
#define AMDGPU_VM_PTB_ALIGN_SIZE 32768
|
||||
|
||||
/* LOG2 number of continuous pages for the fragment field */
|
||||
#define AMDGPU_LOG2_PAGES_PER_FRAG 4
|
||||
|
||||
#define AMDGPU_PTE_VALID (1 << 0)
|
||||
#define AMDGPU_PTE_SYSTEM (1 << 1)
|
||||
#define AMDGPU_PTE_SNOOPED (1 << 2)
|
||||
|
||||
/* VI only */
|
||||
#define AMDGPU_PTE_EXECUTABLE (1 << 4)
|
||||
|
||||
#define AMDGPU_PTE_READABLE (1 << 5)
|
||||
#define AMDGPU_PTE_WRITEABLE (1 << 6)
|
||||
|
||||
#define AMDGPU_PTE_FRAG(x) ((x & 0x1f) << 7)
|
||||
|
||||
/* How to programm VM fault handling */
|
||||
#define AMDGPU_VM_FAULT_STOP_NEVER 0
|
||||
#define AMDGPU_VM_FAULT_STOP_FIRST 1
|
||||
#define AMDGPU_VM_FAULT_STOP_ALWAYS 2
|
||||
|
||||
struct amdgpu_vm_pt {
|
||||
struct amdgpu_bo *bo;
|
||||
uint64_t addr;
|
||||
};
|
||||
|
||||
struct amdgpu_vm {
|
||||
/* tree of virtual addresses mapped */
|
||||
struct rb_root va;
|
||||
|
||||
/* protecting invalidated */
|
||||
spinlock_t status_lock;
|
||||
|
||||
/* BOs moved, but not yet updated in the PT */
|
||||
struct list_head invalidated;
|
||||
|
||||
/* BOs cleared in the PT because of a move */
|
||||
struct list_head cleared;
|
||||
|
||||
/* BO mappings freed, but not yet updated in the PT */
|
||||
struct list_head freed;
|
||||
|
||||
/* contains the page directory */
|
||||
struct amdgpu_bo *page_directory;
|
||||
unsigned max_pde_used;
|
||||
struct fence *page_directory_fence;
|
||||
uint64_t last_eviction_counter;
|
||||
|
||||
/* array of page tables, one for each page directory entry */
|
||||
struct amdgpu_vm_pt *page_tables;
|
||||
|
||||
/* for id and flush management per ring */
|
||||
struct amdgpu_vm_id *ids[AMDGPU_MAX_RINGS];
|
||||
|
||||
/* protecting freed */
|
||||
spinlock_t freed_lock;
|
||||
|
||||
/* Scheduler entity for page table updates */
|
||||
struct amd_sched_entity entity;
|
||||
|
||||
/* client id */
|
||||
u64 client_id;
|
||||
};
|
||||
|
||||
struct amdgpu_vm_id {
|
||||
struct list_head list;
|
||||
struct fence *first;
|
||||
struct amdgpu_sync active;
|
||||
struct fence *last_flush;
|
||||
atomic64_t owner;
|
||||
|
||||
uint64_t pd_gpu_addr;
|
||||
/* last flushed PD/PT update */
|
||||
struct fence *flushed_updates;
|
||||
|
||||
uint32_t current_gpu_reset_count;
|
||||
|
||||
uint32_t gds_base;
|
||||
uint32_t gds_size;
|
||||
uint32_t gws_base;
|
||||
uint32_t gws_size;
|
||||
uint32_t oa_base;
|
||||
uint32_t oa_size;
|
||||
};
|
||||
|
||||
struct amdgpu_vm_manager {
|
||||
/* Handling of VMIDs */
|
||||
struct mutex lock;
|
||||
unsigned num_ids;
|
||||
struct list_head ids_lru;
|
||||
struct amdgpu_vm_id ids[AMDGPU_NUM_VM];
|
||||
|
||||
/* Handling of VM fences */
|
||||
u64 fence_context;
|
||||
unsigned seqno[AMDGPU_MAX_RINGS];
|
||||
|
||||
uint32_t max_pfn;
|
||||
/* vram base address for page table entry */
|
||||
u64 vram_base_offset;
|
||||
/* is vm enabled? */
|
||||
bool enabled;
|
||||
/* vm pte handling */
|
||||
const struct amdgpu_vm_pte_funcs *vm_pte_funcs;
|
||||
struct amdgpu_ring *vm_pte_rings[AMDGPU_MAX_RINGS];
|
||||
unsigned vm_pte_num_rings;
|
||||
atomic_t vm_pte_next_ring;
|
||||
/* client id counter */
|
||||
atomic64_t client_counter;
|
||||
};
|
||||
|
||||
void amdgpu_vm_manager_init(struct amdgpu_device *adev);
|
||||
void amdgpu_vm_manager_fini(struct amdgpu_device *adev);
|
||||
int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm);
|
||||
void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
|
||||
void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
|
||||
struct list_head *validated,
|
||||
struct amdgpu_bo_list_entry *entry);
|
||||
int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
||||
int (*callback)(void *p, struct amdgpu_bo *bo),
|
||||
void *param);
|
||||
void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
|
||||
struct amdgpu_vm *vm);
|
||||
int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
|
||||
struct amdgpu_sync *sync, struct fence *fence,
|
||||
struct amdgpu_job *job);
|
||||
int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job);
|
||||
void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id);
|
||||
int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
|
||||
struct amdgpu_vm *vm);
|
||||
int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
|
||||
struct amdgpu_vm *vm);
|
||||
int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
||||
struct amdgpu_sync *sync);
|
||||
int amdgpu_vm_bo_update(struct amdgpu_device *adev,
|
||||
struct amdgpu_bo_va *bo_va,
|
||||
bool clear);
|
||||
void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
|
||||
struct amdgpu_bo *bo);
|
||||
struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
|
||||
struct amdgpu_bo *bo);
|
||||
struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
|
||||
struct amdgpu_vm *vm,
|
||||
struct amdgpu_bo *bo);
|
||||
int amdgpu_vm_bo_map(struct amdgpu_device *adev,
|
||||
struct amdgpu_bo_va *bo_va,
|
||||
uint64_t addr, uint64_t offset,
|
||||
uint64_t size, uint32_t flags);
|
||||
int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
|
||||
struct amdgpu_bo_va *bo_va,
|
||||
uint64_t addr);
|
||||
void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
|
||||
struct amdgpu_bo_va *bo_va);
|
||||
|
||||
#endif
|
|
@ -0,0 +1,222 @@
|
|||
/*
|
||||
* Copyright 2016 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Christian König
|
||||
*/
|
||||
|
||||
#include <drm/drmP.h>
|
||||
#include "amdgpu.h"
|
||||
|
||||
struct amdgpu_vram_mgr {
|
||||
struct drm_mm mm;
|
||||
spinlock_t lock;
|
||||
};
|
||||
|
||||
/**
|
||||
* amdgpu_vram_mgr_init - init VRAM manager and DRM MM
|
||||
*
|
||||
* @man: TTM memory type manager
|
||||
* @p_size: maximum size of VRAM
|
||||
*
|
||||
* Allocate and initialize the VRAM manager.
|
||||
*/
|
||||
static int amdgpu_vram_mgr_init(struct ttm_mem_type_manager *man,
|
||||
unsigned long p_size)
|
||||
{
|
||||
struct amdgpu_vram_mgr *mgr;
|
||||
|
||||
mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
|
||||
if (!mgr)
|
||||
return -ENOMEM;
|
||||
|
||||
drm_mm_init(&mgr->mm, 0, p_size);
|
||||
spin_lock_init(&mgr->lock);
|
||||
man->priv = mgr;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_vram_mgr_fini - free and destroy VRAM manager
|
||||
*
|
||||
* @man: TTM memory type manager
|
||||
*
|
||||
* Destroy and free the VRAM manager, returns -EBUSY if ranges are still
|
||||
* allocated inside it.
|
||||
*/
|
||||
static int amdgpu_vram_mgr_fini(struct ttm_mem_type_manager *man)
|
||||
{
|
||||
struct amdgpu_vram_mgr *mgr = man->priv;
|
||||
|
||||
spin_lock(&mgr->lock);
|
||||
if (!drm_mm_clean(&mgr->mm)) {
|
||||
spin_unlock(&mgr->lock);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
drm_mm_takedown(&mgr->mm);
|
||||
spin_unlock(&mgr->lock);
|
||||
kfree(mgr);
|
||||
man->priv = NULL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_vram_mgr_new - allocate new ranges
|
||||
*
|
||||
* @man: TTM memory type manager
|
||||
* @tbo: TTM BO we need this range for
|
||||
* @place: placement flags and restrictions
|
||||
* @mem: the resulting mem object
|
||||
*
|
||||
* Allocate VRAM for the given BO.
|
||||
*/
|
||||
static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
|
||||
struct ttm_buffer_object *tbo,
|
||||
const struct ttm_place *place,
|
||||
struct ttm_mem_reg *mem)
|
||||
{
|
||||
struct amdgpu_bo *bo = container_of(tbo, struct amdgpu_bo, tbo);
|
||||
struct amdgpu_vram_mgr *mgr = man->priv;
|
||||
struct drm_mm *mm = &mgr->mm;
|
||||
struct drm_mm_node *nodes;
|
||||
enum drm_mm_search_flags sflags = DRM_MM_SEARCH_DEFAULT;
|
||||
enum drm_mm_allocator_flags aflags = DRM_MM_CREATE_DEFAULT;
|
||||
unsigned long lpfn, num_nodes, pages_per_node, pages_left;
|
||||
unsigned i;
|
||||
int r;
|
||||
|
||||
lpfn = place->lpfn;
|
||||
if (!lpfn)
|
||||
lpfn = man->size;
|
||||
|
||||
if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS ||
|
||||
amdgpu_vram_page_split == -1) {
|
||||
pages_per_node = ~0ul;
|
||||
num_nodes = 1;
|
||||
} else {
|
||||
pages_per_node = max((uint32_t)amdgpu_vram_page_split,
|
||||
mem->page_alignment);
|
||||
num_nodes = DIV_ROUND_UP(mem->num_pages, pages_per_node);
|
||||
}
|
||||
|
||||
nodes = kcalloc(num_nodes, sizeof(*nodes), GFP_KERNEL);
|
||||
if (!nodes)
|
||||
return -ENOMEM;
|
||||
|
||||
if (place->flags & TTM_PL_FLAG_TOPDOWN) {
|
||||
sflags = DRM_MM_SEARCH_BELOW;
|
||||
aflags = DRM_MM_CREATE_TOP;
|
||||
}
|
||||
|
||||
pages_left = mem->num_pages;
|
||||
|
||||
spin_lock(&mgr->lock);
|
||||
for (i = 0; i < num_nodes; ++i) {
|
||||
unsigned long pages = min(pages_left, pages_per_node);
|
||||
uint32_t alignment = mem->page_alignment;
|
||||
|
||||
if (pages == pages_per_node)
|
||||
alignment = pages_per_node;
|
||||
else
|
||||
sflags |= DRM_MM_SEARCH_BEST;
|
||||
|
||||
r = drm_mm_insert_node_in_range_generic(mm, &nodes[i], pages,
|
||||
alignment, 0,
|
||||
place->fpfn, lpfn,
|
||||
sflags, aflags);
|
||||
if (unlikely(r))
|
||||
goto error;
|
||||
|
||||
pages_left -= pages;
|
||||
}
|
||||
spin_unlock(&mgr->lock);
|
||||
|
||||
mem->start = num_nodes == 1 ? nodes[0].start : AMDGPU_BO_INVALID_OFFSET;
|
||||
mem->mm_node = nodes;
|
||||
|
||||
return 0;
|
||||
|
||||
error:
|
||||
while (i--)
|
||||
drm_mm_remove_node(&nodes[i]);
|
||||
spin_unlock(&mgr->lock);
|
||||
|
||||
kfree(nodes);
|
||||
return r == -ENOSPC ? 0 : r;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_vram_mgr_del - free ranges
|
||||
*
|
||||
* @man: TTM memory type manager
|
||||
* @tbo: TTM BO we need this range for
|
||||
* @place: placement flags and restrictions
|
||||
* @mem: TTM memory object
|
||||
*
|
||||
* Free the allocated VRAM again.
|
||||
*/
|
||||
static void amdgpu_vram_mgr_del(struct ttm_mem_type_manager *man,
|
||||
struct ttm_mem_reg *mem)
|
||||
{
|
||||
struct amdgpu_vram_mgr *mgr = man->priv;
|
||||
struct drm_mm_node *nodes = mem->mm_node;
|
||||
unsigned pages = mem->num_pages;
|
||||
|
||||
if (!mem->mm_node)
|
||||
return;
|
||||
|
||||
spin_lock(&mgr->lock);
|
||||
while (pages) {
|
||||
pages -= nodes->size;
|
||||
drm_mm_remove_node(nodes);
|
||||
++nodes;
|
||||
}
|
||||
spin_unlock(&mgr->lock);
|
||||
|
||||
kfree(mem->mm_node);
|
||||
mem->mm_node = NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_vram_mgr_debug - dump VRAM table
|
||||
*
|
||||
* @man: TTM memory type manager
|
||||
* @prefix: text prefix
|
||||
*
|
||||
* Dump the table content using printk.
|
||||
*/
|
||||
static void amdgpu_vram_mgr_debug(struct ttm_mem_type_manager *man,
|
||||
const char *prefix)
|
||||
{
|
||||
struct amdgpu_vram_mgr *mgr = man->priv;
|
||||
|
||||
spin_lock(&mgr->lock);
|
||||
drm_mm_debug_table(&mgr->mm, prefix);
|
||||
spin_unlock(&mgr->lock);
|
||||
}
|
||||
|
||||
const struct ttm_mem_type_manager_func amdgpu_vram_mgr_func = {
|
||||
amdgpu_vram_mgr_init,
|
||||
amdgpu_vram_mgr_fini,
|
||||
amdgpu_vram_mgr_new,
|
||||
amdgpu_vram_mgr_del,
|
||||
amdgpu_vram_mgr_debug
|
||||
};
|
|
@ -31,6 +31,7 @@
|
|||
#include "atom.h"
|
||||
#include "atom-bits.h"
|
||||
#include "atombios_encoders.h"
|
||||
#include "atombios_crtc.h"
|
||||
#include "amdgpu_atombios.h"
|
||||
#include "amdgpu_pll.h"
|
||||
#include "amdgpu_connectors.h"
|
||||
|
|
|
@ -887,9 +887,6 @@ static void ci_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate)
|
|||
{
|
||||
struct ci_power_info *pi = ci_get_pi(adev);
|
||||
|
||||
if (pi->uvd_power_gated == gate)
|
||||
return;
|
||||
|
||||
pi->uvd_power_gated = gate;
|
||||
|
||||
ci_update_uvd_dpm(adev, gate);
|
||||
|
@ -960,6 +957,12 @@ static void ci_apply_state_adjust_rules(struct amdgpu_device *adev,
|
|||
sclk = ps->performance_levels[0].sclk;
|
||||
}
|
||||
|
||||
if (adev->pm.pm_display_cfg.min_core_set_clock > sclk)
|
||||
sclk = adev->pm.pm_display_cfg.min_core_set_clock;
|
||||
|
||||
if (adev->pm.pm_display_cfg.min_mem_set_clock > mclk)
|
||||
mclk = adev->pm.pm_display_cfg.min_mem_set_clock;
|
||||
|
||||
if (rps->vce_active) {
|
||||
if (sclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk)
|
||||
sclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk;
|
||||
|
@ -2201,6 +2204,11 @@ static int ci_upload_firmware(struct amdgpu_device *adev)
|
|||
struct ci_power_info *pi = ci_get_pi(adev);
|
||||
int i, ret;
|
||||
|
||||
if (amdgpu_ci_is_smc_running(adev)) {
|
||||
DRM_INFO("smc is running, no need to load smc firmware\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
for (i = 0; i < adev->usec_timeout; i++) {
|
||||
if (RREG32_SMC(ixRCU_UC_EVENTS) & RCU_UC_EVENTS__boot_seq_done_MASK)
|
||||
break;
|
||||
|
@ -4190,8 +4198,15 @@ static int ci_update_uvd_dpm(struct amdgpu_device *adev, bool gate)
|
|||
{
|
||||
struct ci_power_info *pi = ci_get_pi(adev);
|
||||
u32 tmp;
|
||||
int ret = 0;
|
||||
|
||||
if (!gate) {
|
||||
/* turn the clocks on when decoding */
|
||||
ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
|
||||
AMD_CG_STATE_UNGATE);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (pi->caps_uvd_dpm ||
|
||||
(adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count <= 0))
|
||||
pi->smc_state_table.UvdBootLevel = 0;
|
||||
|
@ -4203,9 +4218,17 @@ static int ci_update_uvd_dpm(struct amdgpu_device *adev, bool gate)
|
|||
tmp &= ~DPM_TABLE_475__UvdBootLevel_MASK;
|
||||
tmp |= (pi->smc_state_table.UvdBootLevel << DPM_TABLE_475__UvdBootLevel__SHIFT);
|
||||
WREG32_SMC(ixDPM_TABLE_475, tmp);
|
||||
ret = ci_enable_uvd_dpm(adev, true);
|
||||
} else {
|
||||
ret = ci_enable_uvd_dpm(adev, false);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
|
||||
AMD_CG_STATE_GATE);
|
||||
}
|
||||
|
||||
return ci_enable_uvd_dpm(adev, !gate);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static u8 ci_get_vce_boot_level(struct amdgpu_device *adev)
|
||||
|
@ -4247,13 +4270,12 @@ static int ci_update_vce_dpm(struct amdgpu_device *adev,
|
|||
|
||||
ret = ci_enable_vce_dpm(adev, true);
|
||||
} else {
|
||||
ret = ci_enable_vce_dpm(adev, false);
|
||||
if (ret)
|
||||
return ret;
|
||||
/* turn the clocks off when not encoding */
|
||||
ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
|
||||
AMD_CG_STATE_GATE);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = ci_enable_vce_dpm(adev, false);
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
|
@ -5219,6 +5241,7 @@ static void ci_update_current_ps(struct amdgpu_device *adev,
|
|||
pi->current_rps = *rps;
|
||||
pi->current_ps = *new_ps;
|
||||
pi->current_rps.ps_priv = &pi->current_ps;
|
||||
adev->pm.dpm.current_ps = &pi->current_rps;
|
||||
}
|
||||
|
||||
static void ci_update_requested_ps(struct amdgpu_device *adev,
|
||||
|
@ -5230,6 +5253,7 @@ static void ci_update_requested_ps(struct amdgpu_device *adev,
|
|||
pi->requested_rps = *rps;
|
||||
pi->requested_ps = *new_ps;
|
||||
pi->requested_rps.ps_priv = &pi->requested_ps;
|
||||
adev->pm.dpm.requested_ps = &pi->requested_rps;
|
||||
}
|
||||
|
||||
static int ci_dpm_pre_set_power_state(struct amdgpu_device *adev)
|
||||
|
@ -5267,8 +5291,6 @@ static int ci_dpm_enable(struct amdgpu_device *adev)
|
|||
struct amdgpu_ps *boot_ps = adev->pm.dpm.boot_ps;
|
||||
int ret;
|
||||
|
||||
if (amdgpu_ci_is_smc_running(adev))
|
||||
return -EINVAL;
|
||||
if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
|
||||
ci_enable_voltage_control(adev);
|
||||
ret = ci_construct_voltage_tables(adev);
|
||||
|
@ -5689,7 +5711,7 @@ static int ci_parse_power_table(struct amdgpu_device *adev)
|
|||
adev->pm.dpm.num_ps = state_array->ucNumEntries;
|
||||
|
||||
/* fill in the vce power states */
|
||||
for (i = 0; i < AMDGPU_MAX_VCE_LEVELS; i++) {
|
||||
for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) {
|
||||
u32 sclk, mclk;
|
||||
clock_array_index = adev->pm.dpm.vce_states[i].clk_idx;
|
||||
clock_info = (union pplib_clock_info *)
|
||||
|
@ -6094,6 +6116,56 @@ static void ci_dpm_print_power_state(struct amdgpu_device *adev,
|
|||
amdgpu_dpm_print_ps_status(adev, rps);
|
||||
}
|
||||
|
||||
static inline bool ci_are_power_levels_equal(const struct ci_pl *ci_cpl1,
|
||||
const struct ci_pl *ci_cpl2)
|
||||
{
|
||||
return ((ci_cpl1->mclk == ci_cpl2->mclk) &&
|
||||
(ci_cpl1->sclk == ci_cpl2->sclk) &&
|
||||
(ci_cpl1->pcie_gen == ci_cpl2->pcie_gen) &&
|
||||
(ci_cpl1->pcie_lane == ci_cpl2->pcie_lane));
|
||||
}
|
||||
|
||||
static int ci_check_state_equal(struct amdgpu_device *adev,
|
||||
struct amdgpu_ps *cps,
|
||||
struct amdgpu_ps *rps,
|
||||
bool *equal)
|
||||
{
|
||||
struct ci_ps *ci_cps;
|
||||
struct ci_ps *ci_rps;
|
||||
int i;
|
||||
|
||||
if (adev == NULL || cps == NULL || rps == NULL || equal == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
ci_cps = ci_get_ps(cps);
|
||||
ci_rps = ci_get_ps(rps);
|
||||
|
||||
if (ci_cps == NULL) {
|
||||
*equal = false;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (ci_cps->performance_level_count != ci_rps->performance_level_count) {
|
||||
|
||||
*equal = false;
|
||||
return 0;
|
||||
}
|
||||
|
||||
for (i = 0; i < ci_cps->performance_level_count; i++) {
|
||||
if (!ci_are_power_levels_equal(&(ci_cps->performance_levels[i]),
|
||||
&(ci_rps->performance_levels[i]))) {
|
||||
*equal = false;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
/* If all performance levels are the same try to use the UVD clocks to break the tie.*/
|
||||
*equal = ((cps->vclk == rps->vclk) && (cps->dclk == rps->dclk));
|
||||
*equal &= ((cps->evclk == rps->evclk) && (cps->ecclk == rps->ecclk));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static u32 ci_dpm_get_sclk(struct amdgpu_device *adev, bool low)
|
||||
{
|
||||
struct ci_power_info *pi = ci_get_pi(adev);
|
||||
|
@ -6287,12 +6359,19 @@ static int ci_dpm_suspend(void *handle)
|
|||
|
||||
if (adev->pm.dpm_enabled) {
|
||||
mutex_lock(&adev->pm.mutex);
|
||||
/* disable dpm */
|
||||
ci_dpm_disable(adev);
|
||||
/* reset the power state */
|
||||
adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
|
||||
amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
|
||||
AMDGPU_THERMAL_IRQ_LOW_TO_HIGH);
|
||||
amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
|
||||
AMDGPU_THERMAL_IRQ_HIGH_TO_LOW);
|
||||
adev->pm.dpm.last_user_state = adev->pm.dpm.user_state;
|
||||
adev->pm.dpm.last_state = adev->pm.dpm.state;
|
||||
adev->pm.dpm.user_state = POWER_STATE_TYPE_INTERNAL_BOOT;
|
||||
adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_BOOT;
|
||||
mutex_unlock(&adev->pm.mutex);
|
||||
amdgpu_pm_compute_clocks(adev);
|
||||
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -6310,6 +6389,8 @@ static int ci_dpm_resume(void *handle)
|
|||
adev->pm.dpm_enabled = false;
|
||||
else
|
||||
adev->pm.dpm_enabled = true;
|
||||
adev->pm.dpm.user_state = adev->pm.dpm.last_user_state;
|
||||
adev->pm.dpm.state = adev->pm.dpm.last_state;
|
||||
mutex_unlock(&adev->pm.mutex);
|
||||
if (adev->pm.dpm_enabled)
|
||||
amdgpu_pm_compute_clocks(adev);
|
||||
|
@ -6644,6 +6725,8 @@ static const struct amdgpu_dpm_funcs ci_dpm_funcs = {
|
|||
.set_sclk_od = ci_dpm_set_sclk_od,
|
||||
.get_mclk_od = ci_dpm_get_mclk_od,
|
||||
.set_mclk_od = ci_dpm_set_mclk_od,
|
||||
.check_state_equal = ci_check_state_equal,
|
||||
.get_vce_clock_state = amdgpu_get_vce_clock_state,
|
||||
};
|
||||
|
||||
static void ci_dpm_set_dpm_funcs(struct amdgpu_device *adev)
|
||||
|
@ -6662,3 +6745,12 @@ static void ci_dpm_set_irq_funcs(struct amdgpu_device *adev)
|
|||
adev->pm.dpm.thermal.irq.num_types = AMDGPU_THERMAL_IRQ_LAST;
|
||||
adev->pm.dpm.thermal.irq.funcs = &ci_dpm_irq_funcs;
|
||||
}
|
||||
|
||||
const struct amdgpu_ip_block_version ci_dpm_ip_block =
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_SMC,
|
||||
.major = 7,
|
||||
.minor = 0,
|
||||
.rev = 0,
|
||||
.funcs = &ci_dpm_ip_funcs,
|
||||
};
|
||||
|
|
|
@ -1189,18 +1189,6 @@ static int cik_gpu_pci_config_reset(struct amdgpu_device *adev)
|
|||
return r;
|
||||
}
|
||||
|
||||
static void cik_set_bios_scratch_engine_hung(struct amdgpu_device *adev, bool hung)
|
||||
{
|
||||
u32 tmp = RREG32(mmBIOS_SCRATCH_3);
|
||||
|
||||
if (hung)
|
||||
tmp |= ATOM_S3_ASIC_GUI_ENGINE_HUNG;
|
||||
else
|
||||
tmp &= ~ATOM_S3_ASIC_GUI_ENGINE_HUNG;
|
||||
|
||||
WREG32(mmBIOS_SCRATCH_3, tmp);
|
||||
}
|
||||
|
||||
/**
|
||||
* cik_asic_reset - soft reset GPU
|
||||
*
|
||||
|
@ -1213,11 +1201,12 @@ static void cik_set_bios_scratch_engine_hung(struct amdgpu_device *adev, bool hu
|
|||
static int cik_asic_reset(struct amdgpu_device *adev)
|
||||
{
|
||||
int r;
|
||||
cik_set_bios_scratch_engine_hung(adev, true);
|
||||
|
||||
amdgpu_atombios_scratch_regs_engine_hung(adev, true);
|
||||
|
||||
r = cik_gpu_pci_config_reset(adev);
|
||||
|
||||
cik_set_bios_scratch_engine_hung(adev, false);
|
||||
amdgpu_atombios_scratch_regs_engine_hung(adev, false);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
@ -1641,745 +1630,6 @@ static void cik_detect_hw_virtualization(struct amdgpu_device *adev)
|
|||
adev->virtualization.virtual_caps |= AMDGPU_PASSTHROUGH_MODE;
|
||||
}
|
||||
|
||||
static const struct amdgpu_ip_block_version bonaire_ip_blocks[] =
|
||||
{
|
||||
/* ORDER MATTERS! */
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_COMMON,
|
||||
.major = 1,
|
||||
.minor = 0,
|
||||
.rev = 0,
|
||||
.funcs = &cik_common_ip_funcs,
|
||||
},
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_GMC,
|
||||
.major = 7,
|
||||
.minor = 0,
|
||||
.rev = 0,
|
||||
.funcs = &gmc_v7_0_ip_funcs,
|
||||
},
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_IH,
|
||||
.major = 2,
|
||||
.minor = 0,
|
||||
.rev = 0,
|
||||
.funcs = &cik_ih_ip_funcs,
|
||||
},
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_SMC,
|
||||
.major = 7,
|
||||
.minor = 0,
|
||||
.rev = 0,
|
||||
.funcs = &amdgpu_pp_ip_funcs,
|
||||
},
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_DCE,
|
||||
.major = 8,
|
||||
.minor = 2,
|
||||
.rev = 0,
|
||||
.funcs = &dce_v8_0_ip_funcs,
|
||||
},
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_GFX,
|
||||
.major = 7,
|
||||
.minor = 2,
|
||||
.rev = 0,
|
||||
.funcs = &gfx_v7_0_ip_funcs,
|
||||
},
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_SDMA,
|
||||
.major = 2,
|
||||
.minor = 0,
|
||||
.rev = 0,
|
||||
.funcs = &cik_sdma_ip_funcs,
|
||||
},
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_UVD,
|
||||
.major = 4,
|
||||
.minor = 2,
|
||||
.rev = 0,
|
||||
.funcs = &uvd_v4_2_ip_funcs,
|
||||
},
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_VCE,
|
||||
.major = 2,
|
||||
.minor = 0,
|
||||
.rev = 0,
|
||||
.funcs = &vce_v2_0_ip_funcs,
|
||||
},
|
||||
};
|
||||
|
||||
static const struct amdgpu_ip_block_version bonaire_ip_blocks_vd[] =
|
||||
{
|
||||
/* ORDER MATTERS! */
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_COMMON,
|
||||
.major = 1,
|
||||
.minor = 0,
|
||||
.rev = 0,
|
||||
.funcs = &cik_common_ip_funcs,
|
||||
},
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_GMC,
|
||||
.major = 7,
|
||||
.minor = 0,
|
||||
.rev = 0,
|
||||
.funcs = &gmc_v7_0_ip_funcs,
|
||||
},
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_IH,
|
||||
.major = 2,
|
||||
.minor = 0,
|
||||
.rev = 0,
|
||||
.funcs = &cik_ih_ip_funcs,
|
||||
},
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_SMC,
|
||||
.major = 7,
|
||||
.minor = 0,
|
||||
.rev = 0,
|
||||
.funcs = &amdgpu_pp_ip_funcs,
|
||||
},
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_DCE,
|
||||
.major = 8,
|
||||
.minor = 2,
|
||||
.rev = 0,
|
||||
.funcs = &dce_virtual_ip_funcs,
|
||||
},
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_GFX,
|
||||
.major = 7,
|
||||
.minor = 2,
|
||||
.rev = 0,
|
||||
.funcs = &gfx_v7_0_ip_funcs,
|
||||
},
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_SDMA,
|
||||
.major = 2,
|
||||
.minor = 0,
|
||||
.rev = 0,
|
||||
.funcs = &cik_sdma_ip_funcs,
|
||||
},
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_UVD,
|
||||
.major = 4,
|
||||
.minor = 2,
|
||||
.rev = 0,
|
||||
.funcs = &uvd_v4_2_ip_funcs,
|
||||
},
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_VCE,
|
||||
.major = 2,
|
||||
.minor = 0,
|
||||
.rev = 0,
|
||||
.funcs = &vce_v2_0_ip_funcs,
|
||||
},
|
||||
};
|
||||
|
||||
static const struct amdgpu_ip_block_version hawaii_ip_blocks[] =
|
||||
{
|
||||
/* ORDER MATTERS! */
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_COMMON,
|
||||
.major = 1,
|
||||
.minor = 0,
|
||||
.rev = 0,
|
||||
.funcs = &cik_common_ip_funcs,
|
||||
},
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_GMC,
|
||||
.major = 7,
|
||||
.minor = 0,
|
||||
.rev = 0,
|
||||
.funcs = &gmc_v7_0_ip_funcs,
|
||||
},
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_IH,
|
||||
.major = 2,
|
||||
.minor = 0,
|
||||
.rev = 0,
|
||||
.funcs = &cik_ih_ip_funcs,
|
||||
},
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_SMC,
|
||||
.major = 7,
|
||||
.minor = 0,
|
||||
.rev = 0,
|
||||
.funcs = &amdgpu_pp_ip_funcs,
|
||||
},
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_DCE,
|
||||
.major = 8,
|
||||
.minor = 5,
|
||||
.rev = 0,
|
||||
.funcs = &dce_v8_0_ip_funcs,
|
||||
},
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_GFX,
|
||||
.major = 7,
|
||||
.minor = 3,
|
||||
.rev = 0,
|
||||
.funcs = &gfx_v7_0_ip_funcs,
|
||||
},
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_SDMA,
|
||||
.major = 2,
|
||||
.minor = 0,
|
||||
.rev = 0,
|
||||
.funcs = &cik_sdma_ip_funcs,
|
||||
},
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_UVD,
|
||||
.major = 4,
|
||||
.minor = 2,
|
||||
.rev = 0,
|
||||
.funcs = &uvd_v4_2_ip_funcs,
|
||||
},
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_VCE,
|
||||
.major = 2,
|
||||
.minor = 0,
|
||||
.rev = 0,
|
||||
.funcs = &vce_v2_0_ip_funcs,
|
||||
},
|
||||
};
|
||||
|
||||
static const struct amdgpu_ip_block_version hawaii_ip_blocks_vd[] =
|
||||
{
|
||||
/* ORDER MATTERS! */
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_COMMON,
|
||||
.major = 1,
|
||||
.minor = 0,
|
||||
.rev = 0,
|
||||
.funcs = &cik_common_ip_funcs,
|
||||
},
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_GMC,
|
||||
.major = 7,
|
||||
.minor = 0,
|
||||
.rev = 0,
|
||||
.funcs = &gmc_v7_0_ip_funcs,
|
||||
},
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_IH,
|
||||
.major = 2,
|
||||
.minor = 0,
|
||||
.rev = 0,
|
||||
.funcs = &cik_ih_ip_funcs,
|
||||
},
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_SMC,
|
||||
.major = 7,
|
||||
.minor = 0,
|
||||
.rev = 0,
|
||||
.funcs = &amdgpu_pp_ip_funcs,
|
||||
},
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_DCE,
|
||||
.major = 8,
|
||||
.minor = 5,
|
||||
.rev = 0,
|
||||
.funcs = &dce_virtual_ip_funcs,
|
||||
},
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_GFX,
|
||||
.major = 7,
|
||||
.minor = 3,
|
||||
.rev = 0,
|
||||
.funcs = &gfx_v7_0_ip_funcs,
|
||||
},
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_SDMA,
|
||||
.major = 2,
|
||||
.minor = 0,
|
||||
.rev = 0,
|
||||
.funcs = &cik_sdma_ip_funcs,
|
||||
},
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_UVD,
|
||||
.major = 4,
|
||||
.minor = 2,
|
||||
.rev = 0,
|
||||
.funcs = &uvd_v4_2_ip_funcs,
|
||||
},
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_VCE,
|
||||
.major = 2,
|
||||
.minor = 0,
|
||||
.rev = 0,
|
||||
.funcs = &vce_v2_0_ip_funcs,
|
||||
},
|
||||
};
|
||||
|
||||
static const struct amdgpu_ip_block_version kabini_ip_blocks[] =
|
||||
{
|
||||
/* ORDER MATTERS! */
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_COMMON,
|
||||
.major = 1,
|
||||
.minor = 0,
|
||||
.rev = 0,
|
||||
.funcs = &cik_common_ip_funcs,
|
||||
},
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_GMC,
|
||||
.major = 7,
|
||||
.minor = 0,
|
||||
.rev = 0,
|
||||
.funcs = &gmc_v7_0_ip_funcs,
|
||||
},
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_IH,
|
||||
.major = 2,
|
||||
.minor = 0,
|
||||
.rev = 0,
|
||||
.funcs = &cik_ih_ip_funcs,
|
||||
},
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_SMC,
|
||||
.major = 7,
|
||||
.minor = 0,
|
||||
.rev = 0,
|
||||
.funcs = &amdgpu_pp_ip_funcs,
|
||||
},
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_DCE,
|
||||
.major = 8,
|
||||
.minor = 3,
|
||||
.rev = 0,
|
||||
.funcs = &dce_v8_0_ip_funcs,
|
||||
},
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_GFX,
|
||||
.major = 7,
|
||||
.minor = 2,
|
||||
.rev = 0,
|
||||
.funcs = &gfx_v7_0_ip_funcs,
|
||||
},
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_SDMA,
|
||||
.major = 2,
|
||||
.minor = 0,
|
||||
.rev = 0,
|
||||
.funcs = &cik_sdma_ip_funcs,
|
||||
},
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_UVD,
|
||||
.major = 4,
|
||||
.minor = 2,
|
||||
.rev = 0,
|
||||
.funcs = &uvd_v4_2_ip_funcs,
|
||||
},
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_VCE,
|
||||
.major = 2,
|
||||
.minor = 0,
|
||||
.rev = 0,
|
||||
.funcs = &vce_v2_0_ip_funcs,
|
||||
},
|
||||
};
|
||||
|
||||
static const struct amdgpu_ip_block_version kabini_ip_blocks_vd[] =
|
||||
{
|
||||
/* ORDER MATTERS! */
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_COMMON,
|
||||
.major = 1,
|
||||
.minor = 0,
|
||||
.rev = 0,
|
||||
.funcs = &cik_common_ip_funcs,
|
||||
},
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_GMC,
|
||||
.major = 7,
|
||||
.minor = 0,
|
||||
.rev = 0,
|
||||
.funcs = &gmc_v7_0_ip_funcs,
|
||||
},
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_IH,
|
||||
.major = 2,
|
||||
.minor = 0,
|
||||
.rev = 0,
|
||||
.funcs = &cik_ih_ip_funcs,
|
||||
},
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_SMC,
|
||||
.major = 7,
|
||||
.minor = 0,
|
||||
.rev = 0,
|
||||
.funcs = &amdgpu_pp_ip_funcs,
|
||||
},
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_DCE,
|
||||
.major = 8,
|
||||
.minor = 3,
|
||||
.rev = 0,
|
||||
.funcs = &dce_virtual_ip_funcs,
|
||||
},
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_GFX,
|
||||
.major = 7,
|
||||
.minor = 2,
|
||||
.rev = 0,
|
||||
.funcs = &gfx_v7_0_ip_funcs,
|
||||
},
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_SDMA,
|
||||
.major = 2,
|
||||
.minor = 0,
|
||||
.rev = 0,
|
||||
.funcs = &cik_sdma_ip_funcs,
|
||||
},
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_UVD,
|
||||
.major = 4,
|
||||
.minor = 2,
|
||||
.rev = 0,
|
||||
.funcs = &uvd_v4_2_ip_funcs,
|
||||
},
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_VCE,
|
||||
.major = 2,
|
||||
.minor = 0,
|
||||
.rev = 0,
|
||||
.funcs = &vce_v2_0_ip_funcs,
|
||||
},
|
||||
};
|
||||
|
||||
static const struct amdgpu_ip_block_version mullins_ip_blocks[] =
|
||||
{
|
||||
/* ORDER MATTERS! */
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_COMMON,
|
||||
.major = 1,
|
||||
.minor = 0,
|
||||
.rev = 0,
|
||||
.funcs = &cik_common_ip_funcs,
|
||||
},
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_GMC,
|
||||
.major = 7,
|
||||
.minor = 0,
|
||||
.rev = 0,
|
||||
.funcs = &gmc_v7_0_ip_funcs,
|
||||
},
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_IH,
|
||||
.major = 2,
|
||||
.minor = 0,
|
||||
.rev = 0,
|
||||
.funcs = &cik_ih_ip_funcs,
|
||||
},
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_SMC,
|
||||
.major = 7,
|
||||
.minor = 0,
|
||||
.rev = 0,
|
||||
.funcs = &amdgpu_pp_ip_funcs,
|
||||
},
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_DCE,
|
||||
.major = 8,
|
||||
.minor = 3,
|
||||
.rev = 0,
|
||||
.funcs = &dce_v8_0_ip_funcs,
|
||||
},
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_GFX,
|
||||
.major = 7,
|
||||
.minor = 2,
|
||||
.rev = 0,
|
||||
.funcs = &gfx_v7_0_ip_funcs,
|
||||
},
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_SDMA,
|
||||
.major = 2,
|
||||
.minor = 0,
|
||||
.rev = 0,
|
||||
.funcs = &cik_sdma_ip_funcs,
|
||||
},
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_UVD,
|
||||
.major = 4,
|
||||
.minor = 2,
|
||||
.rev = 0,
|
||||
.funcs = &uvd_v4_2_ip_funcs,
|
||||
},
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_VCE,
|
||||
.major = 2,
|
||||
.minor = 0,
|
||||
.rev = 0,
|
||||
.funcs = &vce_v2_0_ip_funcs,
|
||||
},
|
||||
};
|
||||
|
||||
static const struct amdgpu_ip_block_version mullins_ip_blocks_vd[] =
|
||||
{
|
||||
/* ORDER MATTERS! */
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_COMMON,
|
||||
.major = 1,
|
||||
.minor = 0,
|
||||
.rev = 0,
|
||||
.funcs = &cik_common_ip_funcs,
|
||||
},
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_GMC,
|
||||
.major = 7,
|
||||
.minor = 0,
|
||||
.rev = 0,
|
||||
.funcs = &gmc_v7_0_ip_funcs,
|
||||
},
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_IH,
|
||||
.major = 2,
|
||||
.minor = 0,
|
||||
.rev = 0,
|
||||
.funcs = &cik_ih_ip_funcs,
|
||||
},
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_SMC,
|
||||
.major = 7,
|
||||
.minor = 0,
|
||||
.rev = 0,
|
||||
.funcs = &amdgpu_pp_ip_funcs,
|
||||
},
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_DCE,
|
||||
.major = 8,
|
||||
.minor = 3,
|
||||
.rev = 0,
|
||||
.funcs = &dce_virtual_ip_funcs,
|
||||
},
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_GFX,
|
||||
.major = 7,
|
||||
.minor = 2,
|
||||
.rev = 0,
|
||||
.funcs = &gfx_v7_0_ip_funcs,
|
||||
},
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_SDMA,
|
||||
.major = 2,
|
||||
.minor = 0,
|
||||
.rev = 0,
|
||||
.funcs = &cik_sdma_ip_funcs,
|
||||
},
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_UVD,
|
||||
.major = 4,
|
||||
.minor = 2,
|
||||
.rev = 0,
|
||||
.funcs = &uvd_v4_2_ip_funcs,
|
||||
},
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_VCE,
|
||||
.major = 2,
|
||||
.minor = 0,
|
||||
.rev = 0,
|
||||
.funcs = &vce_v2_0_ip_funcs,
|
||||
},
|
||||
};
|
||||
|
||||
static const struct amdgpu_ip_block_version kaveri_ip_blocks[] =
|
||||
{
|
||||
/* ORDER MATTERS! */
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_COMMON,
|
||||
.major = 1,
|
||||
.minor = 0,
|
||||
.rev = 0,
|
||||
.funcs = &cik_common_ip_funcs,
|
||||
},
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_GMC,
|
||||
.major = 7,
|
||||
.minor = 0,
|
||||
.rev = 0,
|
||||
.funcs = &gmc_v7_0_ip_funcs,
|
||||
},
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_IH,
|
||||
.major = 2,
|
||||
.minor = 0,
|
||||
.rev = 0,
|
||||
.funcs = &cik_ih_ip_funcs,
|
||||
},
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_SMC,
|
||||
.major = 7,
|
||||
.minor = 0,
|
||||
.rev = 0,
|
||||
.funcs = &amdgpu_pp_ip_funcs,
|
||||
},
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_DCE,
|
||||
.major = 8,
|
||||
.minor = 1,
|
||||
.rev = 0,
|
||||
.funcs = &dce_v8_0_ip_funcs,
|
||||
},
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_GFX,
|
||||
.major = 7,
|
||||
.minor = 1,
|
||||
.rev = 0,
|
||||
.funcs = &gfx_v7_0_ip_funcs,
|
||||
},
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_SDMA,
|
||||
.major = 2,
|
||||
.minor = 0,
|
||||
.rev = 0,
|
||||
.funcs = &cik_sdma_ip_funcs,
|
||||
},
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_UVD,
|
||||
.major = 4,
|
||||
.minor = 2,
|
||||
.rev = 0,
|
||||
.funcs = &uvd_v4_2_ip_funcs,
|
||||
},
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_VCE,
|
||||
.major = 2,
|
||||
.minor = 0,
|
||||
.rev = 0,
|
||||
.funcs = &vce_v2_0_ip_funcs,
|
||||
},
|
||||
};
|
||||
|
||||
static const struct amdgpu_ip_block_version kaveri_ip_blocks_vd[] =
|
||||
{
|
||||
/* ORDER MATTERS! */
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_COMMON,
|
||||
.major = 1,
|
||||
.minor = 0,
|
||||
.rev = 0,
|
||||
.funcs = &cik_common_ip_funcs,
|
||||
},
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_GMC,
|
||||
.major = 7,
|
||||
.minor = 0,
|
||||
.rev = 0,
|
||||
.funcs = &gmc_v7_0_ip_funcs,
|
||||
},
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_IH,
|
||||
.major = 2,
|
||||
.minor = 0,
|
||||
.rev = 0,
|
||||
.funcs = &cik_ih_ip_funcs,
|
||||
},
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_SMC,
|
||||
.major = 7,
|
||||
.minor = 0,
|
||||
.rev = 0,
|
||||
.funcs = &amdgpu_pp_ip_funcs,
|
||||
},
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_DCE,
|
||||
.major = 8,
|
||||
.minor = 1,
|
||||
.rev = 0,
|
||||
.funcs = &dce_virtual_ip_funcs,
|
||||
},
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_GFX,
|
||||
.major = 7,
|
||||
.minor = 1,
|
||||
.rev = 0,
|
||||
.funcs = &gfx_v7_0_ip_funcs,
|
||||
},
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_SDMA,
|
||||
.major = 2,
|
||||
.minor = 0,
|
||||
.rev = 0,
|
||||
.funcs = &cik_sdma_ip_funcs,
|
||||
},
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_UVD,
|
||||
.major = 4,
|
||||
.minor = 2,
|
||||
.rev = 0,
|
||||
.funcs = &uvd_v4_2_ip_funcs,
|
||||
},
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_VCE,
|
||||
.major = 2,
|
||||
.minor = 0,
|
||||
.rev = 0,
|
||||
.funcs = &vce_v2_0_ip_funcs,
|
||||
},
|
||||
};
|
||||
|
||||
int cik_set_ip_blocks(struct amdgpu_device *adev)
|
||||
{
|
||||
if (adev->enable_virtual_display) {
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_BONAIRE:
|
||||
adev->ip_blocks = bonaire_ip_blocks_vd;
|
||||
adev->num_ip_blocks = ARRAY_SIZE(bonaire_ip_blocks_vd);
|
||||
break;
|
||||
case CHIP_HAWAII:
|
||||
adev->ip_blocks = hawaii_ip_blocks_vd;
|
||||
adev->num_ip_blocks = ARRAY_SIZE(hawaii_ip_blocks_vd);
|
||||
break;
|
||||
case CHIP_KAVERI:
|
||||
adev->ip_blocks = kaveri_ip_blocks_vd;
|
||||
adev->num_ip_blocks = ARRAY_SIZE(kaveri_ip_blocks_vd);
|
||||
break;
|
||||
case CHIP_KABINI:
|
||||
adev->ip_blocks = kabini_ip_blocks_vd;
|
||||
adev->num_ip_blocks = ARRAY_SIZE(kabini_ip_blocks_vd);
|
||||
break;
|
||||
case CHIP_MULLINS:
|
||||
adev->ip_blocks = mullins_ip_blocks_vd;
|
||||
adev->num_ip_blocks = ARRAY_SIZE(mullins_ip_blocks_vd);
|
||||
break;
|
||||
default:
|
||||
/* FIXME: not supported yet */
|
||||
return -EINVAL;
|
||||
}
|
||||
} else {
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_BONAIRE:
|
||||
adev->ip_blocks = bonaire_ip_blocks;
|
||||
adev->num_ip_blocks = ARRAY_SIZE(bonaire_ip_blocks);
|
||||
break;
|
||||
case CHIP_HAWAII:
|
||||
adev->ip_blocks = hawaii_ip_blocks;
|
||||
adev->num_ip_blocks = ARRAY_SIZE(hawaii_ip_blocks);
|
||||
break;
|
||||
case CHIP_KAVERI:
|
||||
adev->ip_blocks = kaveri_ip_blocks;
|
||||
adev->num_ip_blocks = ARRAY_SIZE(kaveri_ip_blocks);
|
||||
break;
|
||||
case CHIP_KABINI:
|
||||
adev->ip_blocks = kabini_ip_blocks;
|
||||
adev->num_ip_blocks = ARRAY_SIZE(kabini_ip_blocks);
|
||||
break;
|
||||
case CHIP_MULLINS:
|
||||
adev->ip_blocks = mullins_ip_blocks;
|
||||
adev->num_ip_blocks = ARRAY_SIZE(mullins_ip_blocks);
|
||||
break;
|
||||
default:
|
||||
/* FIXME: not supported yet */
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct amdgpu_asic_funcs cik_asic_funcs =
|
||||
{
|
||||
.read_disabled_bios = &cik_read_disabled_bios,
|
||||
|
@ -2612,7 +1862,7 @@ static int cik_common_set_powergating_state(void *handle,
|
|||
return 0;
|
||||
}
|
||||
|
||||
const struct amd_ip_funcs cik_common_ip_funcs = {
|
||||
static const struct amd_ip_funcs cik_common_ip_funcs = {
|
||||
.name = "cik_common",
|
||||
.early_init = cik_common_early_init,
|
||||
.late_init = NULL,
|
||||
|
@ -2628,3 +1878,79 @@ const struct amd_ip_funcs cik_common_ip_funcs = {
|
|||
.set_clockgating_state = cik_common_set_clockgating_state,
|
||||
.set_powergating_state = cik_common_set_powergating_state,
|
||||
};
|
||||
|
||||
static const struct amdgpu_ip_block_version cik_common_ip_block =
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_COMMON,
|
||||
.major = 1,
|
||||
.minor = 0,
|
||||
.rev = 0,
|
||||
.funcs = &cik_common_ip_funcs,
|
||||
};
|
||||
|
||||
int cik_set_ip_blocks(struct amdgpu_device *adev)
|
||||
{
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_BONAIRE:
|
||||
amdgpu_ip_block_add(adev, &cik_common_ip_block);
|
||||
amdgpu_ip_block_add(adev, &gmc_v7_0_ip_block);
|
||||
amdgpu_ip_block_add(adev, &cik_ih_ip_block);
|
||||
amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
|
||||
if (adev->enable_virtual_display)
|
||||
amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
|
||||
else
|
||||
amdgpu_ip_block_add(adev, &dce_v8_2_ip_block);
|
||||
amdgpu_ip_block_add(adev, &gfx_v7_2_ip_block);
|
||||
amdgpu_ip_block_add(adev, &cik_sdma_ip_block);
|
||||
amdgpu_ip_block_add(adev, &uvd_v4_2_ip_block);
|
||||
amdgpu_ip_block_add(adev, &vce_v2_0_ip_block);
|
||||
break;
|
||||
case CHIP_HAWAII:
|
||||
amdgpu_ip_block_add(adev, &cik_common_ip_block);
|
||||
amdgpu_ip_block_add(adev, &gmc_v7_0_ip_block);
|
||||
amdgpu_ip_block_add(adev, &cik_ih_ip_block);
|
||||
amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
|
||||
if (adev->enable_virtual_display)
|
||||
amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
|
||||
else
|
||||
amdgpu_ip_block_add(adev, &dce_v8_5_ip_block);
|
||||
amdgpu_ip_block_add(adev, &gfx_v7_3_ip_block);
|
||||
amdgpu_ip_block_add(adev, &cik_sdma_ip_block);
|
||||
amdgpu_ip_block_add(adev, &uvd_v4_2_ip_block);
|
||||
amdgpu_ip_block_add(adev, &vce_v2_0_ip_block);
|
||||
break;
|
||||
case CHIP_KAVERI:
|
||||
amdgpu_ip_block_add(adev, &cik_common_ip_block);
|
||||
amdgpu_ip_block_add(adev, &gmc_v7_0_ip_block);
|
||||
amdgpu_ip_block_add(adev, &cik_ih_ip_block);
|
||||
amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
|
||||
if (adev->enable_virtual_display)
|
||||
amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
|
||||
else
|
||||
amdgpu_ip_block_add(adev, &dce_v8_1_ip_block);
|
||||
amdgpu_ip_block_add(adev, &gfx_v7_1_ip_block);
|
||||
amdgpu_ip_block_add(adev, &cik_sdma_ip_block);
|
||||
amdgpu_ip_block_add(adev, &uvd_v4_2_ip_block);
|
||||
amdgpu_ip_block_add(adev, &vce_v2_0_ip_block);
|
||||
break;
|
||||
case CHIP_KABINI:
|
||||
case CHIP_MULLINS:
|
||||
amdgpu_ip_block_add(adev, &cik_common_ip_block);
|
||||
amdgpu_ip_block_add(adev, &gmc_v7_0_ip_block);
|
||||
amdgpu_ip_block_add(adev, &cik_ih_ip_block);
|
||||
amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
|
||||
if (adev->enable_virtual_display)
|
||||
amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
|
||||
else
|
||||
amdgpu_ip_block_add(adev, &dce_v8_3_ip_block);
|
||||
amdgpu_ip_block_add(adev, &gfx_v7_2_ip_block);
|
||||
amdgpu_ip_block_add(adev, &cik_sdma_ip_block);
|
||||
amdgpu_ip_block_add(adev, &uvd_v4_2_ip_block);
|
||||
amdgpu_ip_block_add(adev, &vce_v2_0_ip_block);
|
||||
break;
|
||||
default:
|
||||
/* FIXME: not supported yet */
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -24,8 +24,6 @@
|
|||
#ifndef __CIK_H__
|
||||
#define __CIK_H__
|
||||
|
||||
extern const struct amd_ip_funcs cik_common_ip_funcs;
|
||||
|
||||
void cik_srbm_select(struct amdgpu_device *adev,
|
||||
u32 me, u32 pipe, u32 queue, u32 vmid);
|
||||
int cik_set_ip_blocks(struct amdgpu_device *adev);
|
||||
|
|
|
@ -413,7 +413,7 @@ static int cik_ih_set_powergating_state(void *handle,
|
|||
return 0;
|
||||
}
|
||||
|
||||
const struct amd_ip_funcs cik_ih_ip_funcs = {
|
||||
static const struct amd_ip_funcs cik_ih_ip_funcs = {
|
||||
.name = "cik_ih",
|
||||
.early_init = cik_ih_early_init,
|
||||
.late_init = NULL,
|
||||
|
@ -441,3 +441,12 @@ static void cik_ih_set_interrupt_funcs(struct amdgpu_device *adev)
|
|||
if (adev->irq.ih_funcs == NULL)
|
||||
adev->irq.ih_funcs = &cik_ih_funcs;
|
||||
}
|
||||
|
||||
const struct amdgpu_ip_block_version cik_ih_ip_block =
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_IH,
|
||||
.major = 2,
|
||||
.minor = 0,
|
||||
.rev = 0,
|
||||
.funcs = &cik_ih_ip_funcs,
|
||||
};
|
||||
|
|
|
@ -24,6 +24,6 @@
|
|||
#ifndef __CIK_IH_H__
|
||||
#define __CIK_IH_H__
|
||||
|
||||
extern const struct amd_ip_funcs cik_ih_ip_funcs;
|
||||
extern const struct amdgpu_ip_block_version cik_ih_ip_block;
|
||||
|
||||
#endif
|
||||
|
|
|
@ -206,10 +206,10 @@ static void cik_sdma_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
|
|||
|
||||
for (i = 0; i < count; i++)
|
||||
if (sdma && sdma->burst_nop && (i == 0))
|
||||
amdgpu_ring_write(ring, ring->nop |
|
||||
amdgpu_ring_write(ring, ring->funcs->nop |
|
||||
SDMA_NOP_COUNT(count - 1));
|
||||
else
|
||||
amdgpu_ring_write(ring, ring->nop);
|
||||
amdgpu_ring_write(ring, ring->funcs->nop);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -848,22 +848,6 @@ static void cik_sdma_ring_emit_vm_flush(struct amdgpu_ring *ring,
|
|||
amdgpu_ring_write(ring, (0xfff << 16) | 10); /* retry count, poll interval */
|
||||
}
|
||||
|
||||
static unsigned cik_sdma_ring_get_emit_ib_size(struct amdgpu_ring *ring)
|
||||
{
|
||||
return
|
||||
7 + 4; /* cik_sdma_ring_emit_ib */
|
||||
}
|
||||
|
||||
static unsigned cik_sdma_ring_get_dma_frame_size(struct amdgpu_ring *ring)
|
||||
{
|
||||
return
|
||||
6 + /* cik_sdma_ring_emit_hdp_flush */
|
||||
3 + /* cik_sdma_ring_emit_hdp_invalidate */
|
||||
6 + /* cik_sdma_ring_emit_pipeline_sync */
|
||||
12 + /* cik_sdma_ring_emit_vm_flush */
|
||||
9 + 9 + 9; /* cik_sdma_ring_emit_fence x3 for user fence, vm fence */
|
||||
}
|
||||
|
||||
static void cik_enable_sdma_mgcg(struct amdgpu_device *adev,
|
||||
bool enable)
|
||||
{
|
||||
|
@ -959,11 +943,10 @@ static int cik_sdma_sw_init(void *handle)
|
|||
ring->ring_obj = NULL;
|
||||
sprintf(ring->name, "sdma%d", i);
|
||||
r = amdgpu_ring_init(adev, ring, 1024,
|
||||
SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0), 0xf,
|
||||
&adev->sdma.trap_irq,
|
||||
(i == 0) ?
|
||||
AMDGPU_SDMA_IRQ_TRAP0 : AMDGPU_SDMA_IRQ_TRAP1,
|
||||
AMDGPU_RING_TYPE_SDMA);
|
||||
AMDGPU_SDMA_IRQ_TRAP0 :
|
||||
AMDGPU_SDMA_IRQ_TRAP1);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
@ -1207,7 +1190,7 @@ static int cik_sdma_set_powergating_state(void *handle,
|
|||
return 0;
|
||||
}
|
||||
|
||||
const struct amd_ip_funcs cik_sdma_ip_funcs = {
|
||||
static const struct amd_ip_funcs cik_sdma_ip_funcs = {
|
||||
.name = "cik_sdma",
|
||||
.early_init = cik_sdma_early_init,
|
||||
.late_init = NULL,
|
||||
|
@ -1225,10 +1208,19 @@ const struct amd_ip_funcs cik_sdma_ip_funcs = {
|
|||
};
|
||||
|
||||
static const struct amdgpu_ring_funcs cik_sdma_ring_funcs = {
|
||||
.type = AMDGPU_RING_TYPE_SDMA,
|
||||
.align_mask = 0xf,
|
||||
.nop = SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0),
|
||||
.get_rptr = cik_sdma_ring_get_rptr,
|
||||
.get_wptr = cik_sdma_ring_get_wptr,
|
||||
.set_wptr = cik_sdma_ring_set_wptr,
|
||||
.parse_cs = NULL,
|
||||
.emit_frame_size =
|
||||
6 + /* cik_sdma_ring_emit_hdp_flush */
|
||||
3 + /* cik_sdma_ring_emit_hdp_invalidate */
|
||||
6 + /* cik_sdma_ring_emit_pipeline_sync */
|
||||
12 + /* cik_sdma_ring_emit_vm_flush */
|
||||
9 + 9 + 9, /* cik_sdma_ring_emit_fence x3 for user fence, vm fence */
|
||||
.emit_ib_size = 7 + 4, /* cik_sdma_ring_emit_ib */
|
||||
.emit_ib = cik_sdma_ring_emit_ib,
|
||||
.emit_fence = cik_sdma_ring_emit_fence,
|
||||
.emit_pipeline_sync = cik_sdma_ring_emit_pipeline_sync,
|
||||
|
@ -1239,8 +1231,6 @@ static const struct amdgpu_ring_funcs cik_sdma_ring_funcs = {
|
|||
.test_ib = cik_sdma_ring_test_ib,
|
||||
.insert_nop = cik_sdma_ring_insert_nop,
|
||||
.pad_ib = cik_sdma_ring_pad_ib,
|
||||
.get_emit_ib_size = cik_sdma_ring_get_emit_ib_size,
|
||||
.get_dma_frame_size = cik_sdma_ring_get_dma_frame_size,
|
||||
};
|
||||
|
||||
static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev)
|
||||
|
@ -1352,3 +1342,12 @@ static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev)
|
|||
adev->vm_manager.vm_pte_num_rings = adev->sdma.num_instances;
|
||||
}
|
||||
}
|
||||
|
||||
const struct amdgpu_ip_block_version cik_sdma_ip_block =
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_SDMA,
|
||||
.major = 2,
|
||||
.minor = 0,
|
||||
.rev = 0,
|
||||
.funcs = &cik_sdma_ip_funcs,
|
||||
};
|
||||
|
|
|
@ -24,6 +24,6 @@
|
|||
#ifndef __CIK_SDMA_H__
|
||||
#define __CIK_SDMA_H__
|
||||
|
||||
extern const struct amd_ip_funcs cik_sdma_ip_funcs;
|
||||
extern const struct amdgpu_ip_block_version cik_sdma_ip_block;
|
||||
|
||||
#endif
|
||||
|
|
|
@ -43,6 +43,14 @@
|
|||
#define CRTC4_REGISTER_OFFSET (0x477c - 0x1b7c)
|
||||
#define CRTC5_REGISTER_OFFSET (0x4a7c - 0x1b7c)
|
||||
|
||||
/* hpd instance offsets */
|
||||
#define HPD0_REGISTER_OFFSET (0x1807 - 0x1807)
|
||||
#define HPD1_REGISTER_OFFSET (0x180a - 0x1807)
|
||||
#define HPD2_REGISTER_OFFSET (0x180d - 0x1807)
|
||||
#define HPD3_REGISTER_OFFSET (0x1810 - 0x1807)
|
||||
#define HPD4_REGISTER_OFFSET (0x1813 - 0x1807)
|
||||
#define HPD5_REGISTER_OFFSET (0x1816 - 0x1807)
|
||||
|
||||
#define BONAIRE_GB_ADDR_CONFIG_GOLDEN 0x12010001
|
||||
#define HAWAII_GB_ADDR_CONFIG_GOLDEN 0x12011003
|
||||
|
||||
|
|
|
@ -1250,7 +1250,8 @@ static void cz_update_current_ps(struct amdgpu_device *adev,
|
|||
|
||||
pi->current_ps = *ps;
|
||||
pi->current_rps = *rps;
|
||||
pi->current_rps.ps_priv = ps;
|
||||
pi->current_rps.ps_priv = &pi->current_ps;
|
||||
adev->pm.dpm.current_ps = &pi->current_rps;
|
||||
|
||||
}
|
||||
|
||||
|
@ -1262,7 +1263,8 @@ static void cz_update_requested_ps(struct amdgpu_device *adev,
|
|||
|
||||
pi->requested_ps = *ps;
|
||||
pi->requested_rps = *rps;
|
||||
pi->requested_rps.ps_priv = ps;
|
||||
pi->requested_rps.ps_priv = &pi->requested_ps;
|
||||
adev->pm.dpm.requested_ps = &pi->requested_rps;
|
||||
|
||||
}
|
||||
|
||||
|
@ -2257,6 +2259,18 @@ static void cz_dpm_powergate_vce(struct amdgpu_device *adev, bool gate)
|
|||
}
|
||||
}
|
||||
|
||||
static int cz_check_state_equal(struct amdgpu_device *adev,
|
||||
struct amdgpu_ps *cps,
|
||||
struct amdgpu_ps *rps,
|
||||
bool *equal)
|
||||
{
|
||||
if (equal == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
*equal = false;
|
||||
return 0;
|
||||
}
|
||||
|
||||
const struct amd_ip_funcs cz_dpm_ip_funcs = {
|
||||
.name = "cz_dpm",
|
||||
.early_init = cz_dpm_early_init,
|
||||
|
@ -2289,6 +2303,7 @@ static const struct amdgpu_dpm_funcs cz_dpm_funcs = {
|
|||
.vblank_too_short = NULL,
|
||||
.powergate_uvd = cz_dpm_powergate_uvd,
|
||||
.powergate_vce = cz_dpm_powergate_vce,
|
||||
.check_state_equal = cz_check_state_equal,
|
||||
};
|
||||
|
||||
static void cz_dpm_set_funcs(struct amdgpu_device *adev)
|
||||
|
@ -2296,3 +2311,12 @@ static void cz_dpm_set_funcs(struct amdgpu_device *adev)
|
|||
if (NULL == adev->pm.funcs)
|
||||
adev->pm.funcs = &cz_dpm_funcs;
|
||||
}
|
||||
|
||||
const struct amdgpu_ip_block_version cz_dpm_ip_block =
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_SMC,
|
||||
.major = 8,
|
||||
.minor = 0,
|
||||
.rev = 0,
|
||||
.funcs = &cz_dpm_ip_funcs,
|
||||
};
|
||||
|
|
|
@ -394,7 +394,7 @@ static int cz_ih_set_powergating_state(void *handle,
|
|||
return 0;
|
||||
}
|
||||
|
||||
const struct amd_ip_funcs cz_ih_ip_funcs = {
|
||||
static const struct amd_ip_funcs cz_ih_ip_funcs = {
|
||||
.name = "cz_ih",
|
||||
.early_init = cz_ih_early_init,
|
||||
.late_init = NULL,
|
||||
|
@ -423,3 +423,11 @@ static void cz_ih_set_interrupt_funcs(struct amdgpu_device *adev)
|
|||
adev->irq.ih_funcs = &cz_ih_funcs;
|
||||
}
|
||||
|
||||
const struct amdgpu_ip_block_version cz_ih_ip_block =
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_IH,
|
||||
.major = 3,
|
||||
.minor = 0,
|
||||
.rev = 0,
|
||||
.funcs = &cz_ih_ip_funcs,
|
||||
};
|
||||
|
|
|
@ -24,6 +24,6 @@
|
|||
#ifndef __CZ_IH_H__
|
||||
#define __CZ_IH_H__
|
||||
|
||||
extern const struct amd_ip_funcs cz_ih_ip_funcs;
|
||||
extern const struct amdgpu_ip_block_version cz_ih_ip_block;
|
||||
|
||||
#endif /* __CZ_IH_H__ */
|
||||
|
|
|
@ -31,6 +31,7 @@
|
|||
#include "atombios_encoders.h"
|
||||
#include "amdgpu_pll.h"
|
||||
#include "amdgpu_connectors.h"
|
||||
#include "dce_v10_0.h"
|
||||
|
||||
#include "dce/dce_10_0_d.h"
|
||||
#include "dce/dce_10_0_sh_mask.h"
|
||||
|
@ -330,33 +331,12 @@ static int dce_v10_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
|
|||
static bool dce_v10_0_hpd_sense(struct amdgpu_device *adev,
|
||||
enum amdgpu_hpd_id hpd)
|
||||
{
|
||||
int idx;
|
||||
bool connected = false;
|
||||
|
||||
switch (hpd) {
|
||||
case AMDGPU_HPD_1:
|
||||
idx = 0;
|
||||
break;
|
||||
case AMDGPU_HPD_2:
|
||||
idx = 1;
|
||||
break;
|
||||
case AMDGPU_HPD_3:
|
||||
idx = 2;
|
||||
break;
|
||||
case AMDGPU_HPD_4:
|
||||
idx = 3;
|
||||
break;
|
||||
case AMDGPU_HPD_5:
|
||||
idx = 4;
|
||||
break;
|
||||
case AMDGPU_HPD_6:
|
||||
idx = 5;
|
||||
break;
|
||||
default:
|
||||
if (hpd >= adev->mode_info.num_hpd)
|
||||
return connected;
|
||||
}
|
||||
|
||||
if (RREG32(mmDC_HPD_INT_STATUS + hpd_offsets[idx]) &
|
||||
if (RREG32(mmDC_HPD_INT_STATUS + hpd_offsets[hpd]) &
|
||||
DC_HPD_INT_STATUS__DC_HPD_SENSE_MASK)
|
||||
connected = true;
|
||||
|
||||
|
@ -376,37 +356,16 @@ static void dce_v10_0_hpd_set_polarity(struct amdgpu_device *adev,
|
|||
{
|
||||
u32 tmp;
|
||||
bool connected = dce_v10_0_hpd_sense(adev, hpd);
|
||||
int idx;
|
||||
|
||||
switch (hpd) {
|
||||
case AMDGPU_HPD_1:
|
||||
idx = 0;
|
||||
break;
|
||||
case AMDGPU_HPD_2:
|
||||
idx = 1;
|
||||
break;
|
||||
case AMDGPU_HPD_3:
|
||||
idx = 2;
|
||||
break;
|
||||
case AMDGPU_HPD_4:
|
||||
idx = 3;
|
||||
break;
|
||||
case AMDGPU_HPD_5:
|
||||
idx = 4;
|
||||
break;
|
||||
case AMDGPU_HPD_6:
|
||||
idx = 5;
|
||||
break;
|
||||
default:
|
||||
if (hpd >= adev->mode_info.num_hpd)
|
||||
return;
|
||||
}
|
||||
|
||||
tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx]);
|
||||
tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]);
|
||||
if (connected)
|
||||
tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY, 0);
|
||||
else
|
||||
tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY, 1);
|
||||
WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx], tmp);
|
||||
WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -422,33 +381,12 @@ static void dce_v10_0_hpd_init(struct amdgpu_device *adev)
|
|||
struct drm_device *dev = adev->ddev;
|
||||
struct drm_connector *connector;
|
||||
u32 tmp;
|
||||
int idx;
|
||||
|
||||
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
|
||||
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
|
||||
|
||||
switch (amdgpu_connector->hpd.hpd) {
|
||||
case AMDGPU_HPD_1:
|
||||
idx = 0;
|
||||
break;
|
||||
case AMDGPU_HPD_2:
|
||||
idx = 1;
|
||||
break;
|
||||
case AMDGPU_HPD_3:
|
||||
idx = 2;
|
||||
break;
|
||||
case AMDGPU_HPD_4:
|
||||
idx = 3;
|
||||
break;
|
||||
case AMDGPU_HPD_5:
|
||||
idx = 4;
|
||||
break;
|
||||
case AMDGPU_HPD_6:
|
||||
idx = 5;
|
||||
break;
|
||||
default:
|
||||
if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
|
||||
continue;
|
||||
}
|
||||
|
||||
if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
|
||||
connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
|
||||
|
@ -457,24 +395,24 @@ static void dce_v10_0_hpd_init(struct amdgpu_device *adev)
|
|||
* https://bugzilla.redhat.com/show_bug.cgi?id=726143
|
||||
* also avoid interrupt storms during dpms.
|
||||
*/
|
||||
tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx]);
|
||||
tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
|
||||
tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 0);
|
||||
WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx], tmp);
|
||||
WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
|
||||
continue;
|
||||
}
|
||||
|
||||
tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[idx]);
|
||||
tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
|
||||
tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 1);
|
||||
WREG32(mmDC_HPD_CONTROL + hpd_offsets[idx], tmp);
|
||||
WREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
|
||||
|
||||
tmp = RREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[idx]);
|
||||
tmp = RREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[amdgpu_connector->hpd.hpd]);
|
||||
tmp = REG_SET_FIELD(tmp, DC_HPD_TOGGLE_FILT_CNTL,
|
||||
DC_HPD_CONNECT_INT_DELAY,
|
||||
AMDGPU_HPD_CONNECT_INT_DELAY_IN_MS);
|
||||
tmp = REG_SET_FIELD(tmp, DC_HPD_TOGGLE_FILT_CNTL,
|
||||
DC_HPD_DISCONNECT_INT_DELAY,
|
||||
AMDGPU_HPD_DISCONNECT_INT_DELAY_IN_MS);
|
||||
WREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[idx], tmp);
|
||||
WREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
|
||||
|
||||
dce_v10_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
|
||||
amdgpu_irq_get(adev, &adev->hpd_irq,
|
||||
|
@ -495,37 +433,16 @@ static void dce_v10_0_hpd_fini(struct amdgpu_device *adev)
|
|||
struct drm_device *dev = adev->ddev;
|
||||
struct drm_connector *connector;
|
||||
u32 tmp;
|
||||
int idx;
|
||||
|
||||
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
|
||||
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
|
||||
|
||||
switch (amdgpu_connector->hpd.hpd) {
|
||||
case AMDGPU_HPD_1:
|
||||
idx = 0;
|
||||
break;
|
||||
case AMDGPU_HPD_2:
|
||||
idx = 1;
|
||||
break;
|
||||
case AMDGPU_HPD_3:
|
||||
idx = 2;
|
||||
break;
|
||||
case AMDGPU_HPD_4:
|
||||
idx = 3;
|
||||
break;
|
||||
case AMDGPU_HPD_5:
|
||||
idx = 4;
|
||||
break;
|
||||
case AMDGPU_HPD_6:
|
||||
idx = 5;
|
||||
break;
|
||||
default:
|
||||
if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
|
||||
continue;
|
||||
}
|
||||
|
||||
tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[idx]);
|
||||
tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
|
||||
tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 0);
|
||||
WREG32(mmDC_HPD_CONTROL + hpd_offsets[idx], tmp);
|
||||
WREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
|
||||
|
||||
amdgpu_irq_put(adev, &adev->hpd_irq,
|
||||
amdgpu_connector->hpd.hpd);
|
||||
|
@ -3554,7 +3471,7 @@ static int dce_v10_0_set_powergating_state(void *handle,
|
|||
return 0;
|
||||
}
|
||||
|
||||
const struct amd_ip_funcs dce_v10_0_ip_funcs = {
|
||||
static const struct amd_ip_funcs dce_v10_0_ip_funcs = {
|
||||
.name = "dce_v10_0",
|
||||
.early_init = dce_v10_0_early_init,
|
||||
.late_init = NULL,
|
||||
|
@ -3885,3 +3802,21 @@ static void dce_v10_0_set_irq_funcs(struct amdgpu_device *adev)
|
|||
adev->hpd_irq.num_types = AMDGPU_HPD_LAST;
|
||||
adev->hpd_irq.funcs = &dce_v10_0_hpd_irq_funcs;
|
||||
}
|
||||
|
||||
const struct amdgpu_ip_block_version dce_v10_0_ip_block =
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_DCE,
|
||||
.major = 10,
|
||||
.minor = 0,
|
||||
.rev = 0,
|
||||
.funcs = &dce_v10_0_ip_funcs,
|
||||
};
|
||||
|
||||
const struct amdgpu_ip_block_version dce_v10_1_ip_block =
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_DCE,
|
||||
.major = 10,
|
||||
.minor = 1,
|
||||
.rev = 0,
|
||||
.funcs = &dce_v10_0_ip_funcs,
|
||||
};
|
||||
|
|
|
@ -24,7 +24,9 @@
|
|||
#ifndef __DCE_V10_0_H__
|
||||
#define __DCE_V10_0_H__
|
||||
|
||||
extern const struct amd_ip_funcs dce_v10_0_ip_funcs;
|
||||
|
||||
extern const struct amdgpu_ip_block_version dce_v10_0_ip_block;
|
||||
extern const struct amdgpu_ip_block_version dce_v10_1_ip_block;
|
||||
|
||||
void dce_v10_0_disable_dce(struct amdgpu_device *adev);
|
||||
|
||||
|
|
|
@ -31,6 +31,7 @@
|
|||
#include "atombios_encoders.h"
|
||||
#include "amdgpu_pll.h"
|
||||
#include "amdgpu_connectors.h"
|
||||
#include "dce_v11_0.h"
|
||||
|
||||
#include "dce/dce_11_0_d.h"
|
||||
#include "dce/dce_11_0_sh_mask.h"
|
||||
|
@ -346,33 +347,12 @@ static int dce_v11_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
|
|||
static bool dce_v11_0_hpd_sense(struct amdgpu_device *adev,
|
||||
enum amdgpu_hpd_id hpd)
|
||||
{
|
||||
int idx;
|
||||
bool connected = false;
|
||||
|
||||
switch (hpd) {
|
||||
case AMDGPU_HPD_1:
|
||||
idx = 0;
|
||||
break;
|
||||
case AMDGPU_HPD_2:
|
||||
idx = 1;
|
||||
break;
|
||||
case AMDGPU_HPD_3:
|
||||
idx = 2;
|
||||
break;
|
||||
case AMDGPU_HPD_4:
|
||||
idx = 3;
|
||||
break;
|
||||
case AMDGPU_HPD_5:
|
||||
idx = 4;
|
||||
break;
|
||||
case AMDGPU_HPD_6:
|
||||
idx = 5;
|
||||
break;
|
||||
default:
|
||||
if (hpd >= adev->mode_info.num_hpd)
|
||||
return connected;
|
||||
}
|
||||
|
||||
if (RREG32(mmDC_HPD_INT_STATUS + hpd_offsets[idx]) &
|
||||
if (RREG32(mmDC_HPD_INT_STATUS + hpd_offsets[hpd]) &
|
||||
DC_HPD_INT_STATUS__DC_HPD_SENSE_MASK)
|
||||
connected = true;
|
||||
|
||||
|
@ -392,37 +372,16 @@ static void dce_v11_0_hpd_set_polarity(struct amdgpu_device *adev,
|
|||
{
|
||||
u32 tmp;
|
||||
bool connected = dce_v11_0_hpd_sense(adev, hpd);
|
||||
int idx;
|
||||
|
||||
switch (hpd) {
|
||||
case AMDGPU_HPD_1:
|
||||
idx = 0;
|
||||
break;
|
||||
case AMDGPU_HPD_2:
|
||||
idx = 1;
|
||||
break;
|
||||
case AMDGPU_HPD_3:
|
||||
idx = 2;
|
||||
break;
|
||||
case AMDGPU_HPD_4:
|
||||
idx = 3;
|
||||
break;
|
||||
case AMDGPU_HPD_5:
|
||||
idx = 4;
|
||||
break;
|
||||
case AMDGPU_HPD_6:
|
||||
idx = 5;
|
||||
break;
|
||||
default:
|
||||
if (hpd >= adev->mode_info.num_hpd)
|
||||
return;
|
||||
}
|
||||
|
||||
tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx]);
|
||||
tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]);
|
||||
if (connected)
|
||||
tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY, 0);
|
||||
else
|
||||
tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY, 1);
|
||||
WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx], tmp);
|
||||
WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -438,33 +397,12 @@ static void dce_v11_0_hpd_init(struct amdgpu_device *adev)
|
|||
struct drm_device *dev = adev->ddev;
|
||||
struct drm_connector *connector;
|
||||
u32 tmp;
|
||||
int idx;
|
||||
|
||||
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
|
||||
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
|
||||
|
||||
switch (amdgpu_connector->hpd.hpd) {
|
||||
case AMDGPU_HPD_1:
|
||||
idx = 0;
|
||||
break;
|
||||
case AMDGPU_HPD_2:
|
||||
idx = 1;
|
||||
break;
|
||||
case AMDGPU_HPD_3:
|
||||
idx = 2;
|
||||
break;
|
||||
case AMDGPU_HPD_4:
|
||||
idx = 3;
|
||||
break;
|
||||
case AMDGPU_HPD_5:
|
||||
idx = 4;
|
||||
break;
|
||||
case AMDGPU_HPD_6:
|
||||
idx = 5;
|
||||
break;
|
||||
default:
|
||||
if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
|
||||
continue;
|
||||
}
|
||||
|
||||
if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
|
||||
connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
|
||||
|
@ -473,24 +411,24 @@ static void dce_v11_0_hpd_init(struct amdgpu_device *adev)
|
|||
* https://bugzilla.redhat.com/show_bug.cgi?id=726143
|
||||
* also avoid interrupt storms during dpms.
|
||||
*/
|
||||
tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx]);
|
||||
tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
|
||||
tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 0);
|
||||
WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx], tmp);
|
||||
WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
|
||||
continue;
|
||||
}
|
||||
|
||||
tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[idx]);
|
||||
tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
|
||||
tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 1);
|
||||
WREG32(mmDC_HPD_CONTROL + hpd_offsets[idx], tmp);
|
||||
WREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
|
||||
|
||||
tmp = RREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[idx]);
|
||||
tmp = RREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[amdgpu_connector->hpd.hpd]);
|
||||
tmp = REG_SET_FIELD(tmp, DC_HPD_TOGGLE_FILT_CNTL,
|
||||
DC_HPD_CONNECT_INT_DELAY,
|
||||
AMDGPU_HPD_CONNECT_INT_DELAY_IN_MS);
|
||||
tmp = REG_SET_FIELD(tmp, DC_HPD_TOGGLE_FILT_CNTL,
|
||||
DC_HPD_DISCONNECT_INT_DELAY,
|
||||
AMDGPU_HPD_DISCONNECT_INT_DELAY_IN_MS);
|
||||
WREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[idx], tmp);
|
||||
WREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
|
||||
|
||||
dce_v11_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
|
||||
amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
|
||||
|
@ -510,37 +448,16 @@ static void dce_v11_0_hpd_fini(struct amdgpu_device *adev)
|
|||
struct drm_device *dev = adev->ddev;
|
||||
struct drm_connector *connector;
|
||||
u32 tmp;
|
||||
int idx;
|
||||
|
||||
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
|
||||
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
|
||||
|
||||
switch (amdgpu_connector->hpd.hpd) {
|
||||
case AMDGPU_HPD_1:
|
||||
idx = 0;
|
||||
break;
|
||||
case AMDGPU_HPD_2:
|
||||
idx = 1;
|
||||
break;
|
||||
case AMDGPU_HPD_3:
|
||||
idx = 2;
|
||||
break;
|
||||
case AMDGPU_HPD_4:
|
||||
idx = 3;
|
||||
break;
|
||||
case AMDGPU_HPD_5:
|
||||
idx = 4;
|
||||
break;
|
||||
case AMDGPU_HPD_6:
|
||||
idx = 5;
|
||||
break;
|
||||
default:
|
||||
if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
|
||||
continue;
|
||||
}
|
||||
|
||||
tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[idx]);
|
||||
tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
|
||||
tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 0);
|
||||
WREG32(mmDC_HPD_CONTROL + hpd_offsets[idx], tmp);
|
||||
WREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
|
||||
|
||||
amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
|
||||
}
|
||||
|
@ -3611,7 +3528,7 @@ static int dce_v11_0_set_powergating_state(void *handle,
|
|||
return 0;
|
||||
}
|
||||
|
||||
const struct amd_ip_funcs dce_v11_0_ip_funcs = {
|
||||
static const struct amd_ip_funcs dce_v11_0_ip_funcs = {
|
||||
.name = "dce_v11_0",
|
||||
.early_init = dce_v11_0_early_init,
|
||||
.late_init = NULL,
|
||||
|
@ -3941,3 +3858,21 @@ static void dce_v11_0_set_irq_funcs(struct amdgpu_device *adev)
|
|||
adev->hpd_irq.num_types = AMDGPU_HPD_LAST;
|
||||
adev->hpd_irq.funcs = &dce_v11_0_hpd_irq_funcs;
|
||||
}
|
||||
|
||||
const struct amdgpu_ip_block_version dce_v11_0_ip_block =
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_DCE,
|
||||
.major = 11,
|
||||
.minor = 0,
|
||||
.rev = 0,
|
||||
.funcs = &dce_v11_0_ip_funcs,
|
||||
};
|
||||
|
||||
const struct amdgpu_ip_block_version dce_v11_2_ip_block =
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_DCE,
|
||||
.major = 11,
|
||||
.minor = 2,
|
||||
.rev = 0,
|
||||
.funcs = &dce_v11_0_ip_funcs,
|
||||
};
|
||||
|
|
|
@ -24,7 +24,8 @@
|
|||
#ifndef __DCE_V11_0_H__
|
||||
#define __DCE_V11_0_H__
|
||||
|
||||
extern const struct amd_ip_funcs dce_v11_0_ip_funcs;
|
||||
extern const struct amdgpu_ip_block_version dce_v11_0_ip_block;
|
||||
extern const struct amdgpu_ip_block_version dce_v11_2_ip_block;
|
||||
|
||||
void dce_v11_0_disable_dce(struct amdgpu_device *adev);
|
||||
|
||||
|
|
|
@ -46,6 +46,16 @@ static const u32 crtc_offsets[6] =
|
|||
SI_CRTC5_REGISTER_OFFSET
|
||||
};
|
||||
|
||||
static const u32 hpd_offsets[] =
|
||||
{
|
||||
DC_HPD1_INT_STATUS - DC_HPD1_INT_STATUS,
|
||||
DC_HPD2_INT_STATUS - DC_HPD1_INT_STATUS,
|
||||
DC_HPD3_INT_STATUS - DC_HPD1_INT_STATUS,
|
||||
DC_HPD4_INT_STATUS - DC_HPD1_INT_STATUS,
|
||||
DC_HPD5_INT_STATUS - DC_HPD1_INT_STATUS,
|
||||
DC_HPD6_INT_STATUS - DC_HPD1_INT_STATUS,
|
||||
};
|
||||
|
||||
static const uint32_t dig_offsets[] = {
|
||||
SI_CRTC0_REGISTER_OFFSET,
|
||||
SI_CRTC1_REGISTER_OFFSET,
|
||||
|
@ -94,15 +104,6 @@ static const struct {
|
|||
.hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK
|
||||
} };
|
||||
|
||||
static const uint32_t hpd_int_control_offsets[6] = {
|
||||
DC_HPD1_INT_CONTROL,
|
||||
DC_HPD2_INT_CONTROL,
|
||||
DC_HPD3_INT_CONTROL,
|
||||
DC_HPD4_INT_CONTROL,
|
||||
DC_HPD5_INT_CONTROL,
|
||||
DC_HPD6_INT_CONTROL,
|
||||
};
|
||||
|
||||
static u32 dce_v6_0_audio_endpt_rreg(struct amdgpu_device *adev,
|
||||
u32 block_offset, u32 reg)
|
||||
{
|
||||
|
@ -257,34 +258,11 @@ static bool dce_v6_0_hpd_sense(struct amdgpu_device *adev,
|
|||
{
|
||||
bool connected = false;
|
||||
|
||||
switch (hpd) {
|
||||
case AMDGPU_HPD_1:
|
||||
if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE)
|
||||
if (hpd >= adev->mode_info.num_hpd)
|
||||
return connected;
|
||||
|
||||
if (RREG32(DC_HPD1_INT_STATUS + hpd_offsets[hpd]) & DC_HPDx_SENSE)
|
||||
connected = true;
|
||||
break;
|
||||
case AMDGPU_HPD_2:
|
||||
if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE)
|
||||
connected = true;
|
||||
break;
|
||||
case AMDGPU_HPD_3:
|
||||
if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE)
|
||||
connected = true;
|
||||
break;
|
||||
case AMDGPU_HPD_4:
|
||||
if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE)
|
||||
connected = true;
|
||||
break;
|
||||
case AMDGPU_HPD_5:
|
||||
if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE)
|
||||
connected = true;
|
||||
break;
|
||||
case AMDGPU_HPD_6:
|
||||
if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE)
|
||||
connected = true;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return connected;
|
||||
}
|
||||
|
@ -303,58 +281,15 @@ static void dce_v6_0_hpd_set_polarity(struct amdgpu_device *adev,
|
|||
u32 tmp;
|
||||
bool connected = dce_v6_0_hpd_sense(adev, hpd);
|
||||
|
||||
switch (hpd) {
|
||||
case AMDGPU_HPD_1:
|
||||
tmp = RREG32(DC_HPD1_INT_CONTROL);
|
||||
if (hpd >= adev->mode_info.num_hpd)
|
||||
return;
|
||||
|
||||
tmp = RREG32(DC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
|
||||
if (connected)
|
||||
tmp &= ~DC_HPDx_INT_POLARITY;
|
||||
else
|
||||
tmp |= DC_HPDx_INT_POLARITY;
|
||||
WREG32(DC_HPD1_INT_CONTROL, tmp);
|
||||
break;
|
||||
case AMDGPU_HPD_2:
|
||||
tmp = RREG32(DC_HPD2_INT_CONTROL);
|
||||
if (connected)
|
||||
tmp &= ~DC_HPDx_INT_POLARITY;
|
||||
else
|
||||
tmp |= DC_HPDx_INT_POLARITY;
|
||||
WREG32(DC_HPD2_INT_CONTROL, tmp);
|
||||
break;
|
||||
case AMDGPU_HPD_3:
|
||||
tmp = RREG32(DC_HPD3_INT_CONTROL);
|
||||
if (connected)
|
||||
tmp &= ~DC_HPDx_INT_POLARITY;
|
||||
else
|
||||
tmp |= DC_HPDx_INT_POLARITY;
|
||||
WREG32(DC_HPD3_INT_CONTROL, tmp);
|
||||
break;
|
||||
case AMDGPU_HPD_4:
|
||||
tmp = RREG32(DC_HPD4_INT_CONTROL);
|
||||
if (connected)
|
||||
tmp &= ~DC_HPDx_INT_POLARITY;
|
||||
else
|
||||
tmp |= DC_HPDx_INT_POLARITY;
|
||||
WREG32(DC_HPD4_INT_CONTROL, tmp);
|
||||
break;
|
||||
case AMDGPU_HPD_5:
|
||||
tmp = RREG32(DC_HPD5_INT_CONTROL);
|
||||
if (connected)
|
||||
tmp &= ~DC_HPDx_INT_POLARITY;
|
||||
else
|
||||
tmp |= DC_HPDx_INT_POLARITY;
|
||||
WREG32(DC_HPD5_INT_CONTROL, tmp);
|
||||
break;
|
||||
case AMDGPU_HPD_6:
|
||||
tmp = RREG32(DC_HPD6_INT_CONTROL);
|
||||
if (connected)
|
||||
tmp &= ~DC_HPDx_INT_POLARITY;
|
||||
else
|
||||
tmp |= DC_HPDx_INT_POLARITY;
|
||||
WREG32(DC_HPD6_INT_CONTROL, tmp);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
WREG32(DC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -369,34 +304,17 @@ static void dce_v6_0_hpd_init(struct amdgpu_device *adev)
|
|||
{
|
||||
struct drm_device *dev = adev->ddev;
|
||||
struct drm_connector *connector;
|
||||
u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) |
|
||||
DC_HPDx_RX_INT_TIMER(0xfa) | DC_HPDx_EN;
|
||||
u32 tmp;
|
||||
|
||||
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
|
||||
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
|
||||
|
||||
switch (amdgpu_connector->hpd.hpd) {
|
||||
case AMDGPU_HPD_1:
|
||||
WREG32(DC_HPD1_CONTROL, tmp);
|
||||
break;
|
||||
case AMDGPU_HPD_2:
|
||||
WREG32(DC_HPD2_CONTROL, tmp);
|
||||
break;
|
||||
case AMDGPU_HPD_3:
|
||||
WREG32(DC_HPD3_CONTROL, tmp);
|
||||
break;
|
||||
case AMDGPU_HPD_4:
|
||||
WREG32(DC_HPD4_CONTROL, tmp);
|
||||
break;
|
||||
case AMDGPU_HPD_5:
|
||||
WREG32(DC_HPD5_CONTROL, tmp);
|
||||
break;
|
||||
case AMDGPU_HPD_6:
|
||||
WREG32(DC_HPD6_CONTROL, tmp);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
|
||||
continue;
|
||||
|
||||
tmp = RREG32(DC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
|
||||
tmp |= DC_HPDx_EN;
|
||||
WREG32(DC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
|
||||
|
||||
if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
|
||||
connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
|
||||
|
@ -405,34 +323,9 @@ static void dce_v6_0_hpd_init(struct amdgpu_device *adev)
|
|||
* https://bugzilla.redhat.com/show_bug.cgi?id=726143
|
||||
* also avoid interrupt storms during dpms.
|
||||
*/
|
||||
u32 dc_hpd_int_cntl_reg, dc_hpd_int_cntl;
|
||||
|
||||
switch (amdgpu_connector->hpd.hpd) {
|
||||
case AMDGPU_HPD_1:
|
||||
dc_hpd_int_cntl_reg = DC_HPD1_INT_CONTROL;
|
||||
break;
|
||||
case AMDGPU_HPD_2:
|
||||
dc_hpd_int_cntl_reg = DC_HPD2_INT_CONTROL;
|
||||
break;
|
||||
case AMDGPU_HPD_3:
|
||||
dc_hpd_int_cntl_reg = DC_HPD3_INT_CONTROL;
|
||||
break;
|
||||
case AMDGPU_HPD_4:
|
||||
dc_hpd_int_cntl_reg = DC_HPD4_INT_CONTROL;
|
||||
break;
|
||||
case AMDGPU_HPD_5:
|
||||
dc_hpd_int_cntl_reg = DC_HPD5_INT_CONTROL;
|
||||
break;
|
||||
case AMDGPU_HPD_6:
|
||||
dc_hpd_int_cntl_reg = DC_HPD6_INT_CONTROL;
|
||||
break;
|
||||
default:
|
||||
continue;
|
||||
}
|
||||
|
||||
dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg);
|
||||
dc_hpd_int_cntl &= ~DC_HPDx_INT_EN;
|
||||
WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl);
|
||||
tmp = RREG32(DC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
|
||||
tmp &= ~DC_HPDx_INT_EN;
|
||||
WREG32(DC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -454,32 +347,18 @@ static void dce_v6_0_hpd_fini(struct amdgpu_device *adev)
|
|||
{
|
||||
struct drm_device *dev = adev->ddev;
|
||||
struct drm_connector *connector;
|
||||
u32 tmp;
|
||||
|
||||
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
|
||||
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
|
||||
|
||||
switch (amdgpu_connector->hpd.hpd) {
|
||||
case AMDGPU_HPD_1:
|
||||
WREG32(DC_HPD1_CONTROL, 0);
|
||||
break;
|
||||
case AMDGPU_HPD_2:
|
||||
WREG32(DC_HPD2_CONTROL, 0);
|
||||
break;
|
||||
case AMDGPU_HPD_3:
|
||||
WREG32(DC_HPD3_CONTROL, 0);
|
||||
break;
|
||||
case AMDGPU_HPD_4:
|
||||
WREG32(DC_HPD4_CONTROL, 0);
|
||||
break;
|
||||
case AMDGPU_HPD_5:
|
||||
WREG32(DC_HPD5_CONTROL, 0);
|
||||
break;
|
||||
case AMDGPU_HPD_6:
|
||||
WREG32(DC_HPD6_CONTROL, 0);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
|
||||
continue;
|
||||
|
||||
tmp = RREG32(DC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
|
||||
tmp &= ~DC_HPDx_EN;
|
||||
WREG32(DC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], 0);
|
||||
|
||||
amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
|
||||
}
|
||||
}
|
||||
|
@ -617,6 +496,49 @@ static void dce_v6_0_set_vga_render_state(struct amdgpu_device *adev,
|
|||
|
||||
}
|
||||
|
||||
static int dce_v6_0_get_num_crtc(struct amdgpu_device *adev)
|
||||
{
|
||||
int num_crtc = 0;
|
||||
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_TAHITI:
|
||||
case CHIP_PITCAIRN:
|
||||
case CHIP_VERDE:
|
||||
num_crtc = 6;
|
||||
break;
|
||||
case CHIP_OLAND:
|
||||
num_crtc = 2;
|
||||
break;
|
||||
default:
|
||||
num_crtc = 0;
|
||||
}
|
||||
return num_crtc;
|
||||
}
|
||||
|
||||
void dce_v6_0_disable_dce(struct amdgpu_device *adev)
|
||||
{
|
||||
/*Disable VGA render and enabled crtc, if has DCE engine*/
|
||||
if (amdgpu_atombios_has_dce_engine_info(adev)) {
|
||||
u32 tmp;
|
||||
int crtc_enabled, i;
|
||||
|
||||
dce_v6_0_set_vga_render_state(adev, false);
|
||||
|
||||
/*Disable crtc*/
|
||||
for (i = 0; i < dce_v6_0_get_num_crtc(adev); i++) {
|
||||
crtc_enabled = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]) &
|
||||
EVERGREEN_CRTC_MASTER_EN;
|
||||
if (crtc_enabled) {
|
||||
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
|
||||
tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
|
||||
tmp &= ~EVERGREEN_CRTC_MASTER_EN;
|
||||
WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
|
||||
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void dce_v6_0_program_fmt(struct drm_encoder *encoder)
|
||||
{
|
||||
|
||||
|
@ -2338,21 +2260,20 @@ static int dce_v6_0_early_init(void *handle)
|
|||
dce_v6_0_set_display_funcs(adev);
|
||||
dce_v6_0_set_irq_funcs(adev);
|
||||
|
||||
adev->mode_info.num_crtc = dce_v6_0_get_num_crtc(adev);
|
||||
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_TAHITI:
|
||||
case CHIP_PITCAIRN:
|
||||
case CHIP_VERDE:
|
||||
adev->mode_info.num_crtc = 6;
|
||||
adev->mode_info.num_hpd = 6;
|
||||
adev->mode_info.num_dig = 6;
|
||||
break;
|
||||
case CHIP_OLAND:
|
||||
adev->mode_info.num_crtc = 2;
|
||||
adev->mode_info.num_hpd = 2;
|
||||
adev->mode_info.num_dig = 2;
|
||||
break;
|
||||
default:
|
||||
/* FIXME: not supported yet */
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -2588,42 +2509,23 @@ static int dce_v6_0_set_hpd_interrupt_state(struct amdgpu_device *adev,
|
|||
unsigned type,
|
||||
enum amdgpu_interrupt_state state)
|
||||
{
|
||||
u32 dc_hpd_int_cntl_reg, dc_hpd_int_cntl;
|
||||
u32 dc_hpd_int_cntl;
|
||||
|
||||
switch (type) {
|
||||
case AMDGPU_HPD_1:
|
||||
dc_hpd_int_cntl_reg = DC_HPD1_INT_CONTROL;
|
||||
break;
|
||||
case AMDGPU_HPD_2:
|
||||
dc_hpd_int_cntl_reg = DC_HPD2_INT_CONTROL;
|
||||
break;
|
||||
case AMDGPU_HPD_3:
|
||||
dc_hpd_int_cntl_reg = DC_HPD3_INT_CONTROL;
|
||||
break;
|
||||
case AMDGPU_HPD_4:
|
||||
dc_hpd_int_cntl_reg = DC_HPD4_INT_CONTROL;
|
||||
break;
|
||||
case AMDGPU_HPD_5:
|
||||
dc_hpd_int_cntl_reg = DC_HPD5_INT_CONTROL;
|
||||
break;
|
||||
case AMDGPU_HPD_6:
|
||||
dc_hpd_int_cntl_reg = DC_HPD6_INT_CONTROL;
|
||||
break;
|
||||
default:
|
||||
if (type >= adev->mode_info.num_hpd) {
|
||||
DRM_DEBUG("invalid hdp %d\n", type);
|
||||
return 0;
|
||||
}
|
||||
|
||||
switch (state) {
|
||||
case AMDGPU_IRQ_STATE_DISABLE:
|
||||
dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg);
|
||||
dc_hpd_int_cntl &= ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
|
||||
WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl);
|
||||
dc_hpd_int_cntl = RREG32(DC_HPD1_INT_CONTROL + hpd_offsets[type]);
|
||||
dc_hpd_int_cntl &= ~DC_HPDx_INT_EN;
|
||||
WREG32(DC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl);
|
||||
break;
|
||||
case AMDGPU_IRQ_STATE_ENABLE:
|
||||
dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg);
|
||||
dc_hpd_int_cntl |= (DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
|
||||
WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl);
|
||||
dc_hpd_int_cntl = RREG32(DC_HPD1_INT_CONTROL + hpd_offsets[type]);
|
||||
dc_hpd_int_cntl |= DC_HPDx_INT_EN;
|
||||
WREG32(DC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
|
@ -2796,7 +2698,7 @@ static int dce_v6_0_hpd_irq(struct amdgpu_device *adev,
|
|||
struct amdgpu_irq_src *source,
|
||||
struct amdgpu_iv_entry *entry)
|
||||
{
|
||||
uint32_t disp_int, mask, int_control, tmp;
|
||||
uint32_t disp_int, mask, tmp;
|
||||
unsigned hpd;
|
||||
|
||||
if (entry->src_data >= adev->mode_info.num_hpd) {
|
||||
|
@ -2807,12 +2709,11 @@ static int dce_v6_0_hpd_irq(struct amdgpu_device *adev,
|
|||
hpd = entry->src_data;
|
||||
disp_int = RREG32(interrupt_status_offsets[hpd].reg);
|
||||
mask = interrupt_status_offsets[hpd].hpd;
|
||||
int_control = hpd_int_control_offsets[hpd];
|
||||
|
||||
if (disp_int & mask) {
|
||||
tmp = RREG32(int_control);
|
||||
tmp = RREG32(DC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
|
||||
tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK;
|
||||
WREG32(int_control, tmp);
|
||||
WREG32(DC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
|
||||
schedule_work(&adev->hotplug_work);
|
||||
DRM_INFO("IH: HPD%d\n", hpd + 1);
|
||||
}
|
||||
|
@ -2833,7 +2734,7 @@ static int dce_v6_0_set_powergating_state(void *handle,
|
|||
return 0;
|
||||
}
|
||||
|
||||
const struct amd_ip_funcs dce_v6_0_ip_funcs = {
|
||||
static const struct amd_ip_funcs dce_v6_0_ip_funcs = {
|
||||
.name = "dce_v6_0",
|
||||
.early_init = dce_v6_0_early_init,
|
||||
.late_init = NULL,
|
||||
|
@ -3174,3 +3075,21 @@ static void dce_v6_0_set_irq_funcs(struct amdgpu_device *adev)
|
|||
adev->hpd_irq.num_types = AMDGPU_HPD_LAST;
|
||||
adev->hpd_irq.funcs = &dce_v6_0_hpd_irq_funcs;
|
||||
}
|
||||
|
||||
const struct amdgpu_ip_block_version dce_v6_0_ip_block =
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_DCE,
|
||||
.major = 6,
|
||||
.minor = 0,
|
||||
.rev = 0,
|
||||
.funcs = &dce_v6_0_ip_funcs,
|
||||
};
|
||||
|
||||
const struct amdgpu_ip_block_version dce_v6_4_ip_block =
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_DCE,
|
||||
.major = 6,
|
||||
.minor = 4,
|
||||
.rev = 0,
|
||||
.funcs = &dce_v6_0_ip_funcs,
|
||||
};
|
||||
|
|
|
@ -24,6 +24,9 @@
|
|||
#ifndef __DCE_V6_0_H__
|
||||
#define __DCE_V6_0_H__
|
||||
|
||||
extern const struct amd_ip_funcs dce_v6_0_ip_funcs;
|
||||
extern const struct amdgpu_ip_block_version dce_v6_0_ip_block;
|
||||
extern const struct amdgpu_ip_block_version dce_v6_4_ip_block;
|
||||
|
||||
void dce_v6_0_disable_dce(struct amdgpu_device *adev);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -31,6 +31,7 @@
|
|||
#include "atombios_encoders.h"
|
||||
#include "amdgpu_pll.h"
|
||||
#include "amdgpu_connectors.h"
|
||||
#include "dce_v8_0.h"
|
||||
|
||||
#include "dce/dce_8_0_d.h"
|
||||
#include "dce/dce_8_0_sh_mask.h"
|
||||
|
@ -56,6 +57,16 @@ static const u32 crtc_offsets[6] =
|
|||
CRTC5_REGISTER_OFFSET
|
||||
};
|
||||
|
||||
static const u32 hpd_offsets[] =
|
||||
{
|
||||
HPD0_REGISTER_OFFSET,
|
||||
HPD1_REGISTER_OFFSET,
|
||||
HPD2_REGISTER_OFFSET,
|
||||
HPD3_REGISTER_OFFSET,
|
||||
HPD4_REGISTER_OFFSET,
|
||||
HPD5_REGISTER_OFFSET
|
||||
};
|
||||
|
||||
static const uint32_t dig_offsets[] = {
|
||||
CRTC0_REGISTER_OFFSET,
|
||||
CRTC1_REGISTER_OFFSET,
|
||||
|
@ -104,15 +115,6 @@ static const struct {
|
|||
.hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK
|
||||
} };
|
||||
|
||||
static const uint32_t hpd_int_control_offsets[6] = {
|
||||
mmDC_HPD1_INT_CONTROL,
|
||||
mmDC_HPD2_INT_CONTROL,
|
||||
mmDC_HPD3_INT_CONTROL,
|
||||
mmDC_HPD4_INT_CONTROL,
|
||||
mmDC_HPD5_INT_CONTROL,
|
||||
mmDC_HPD6_INT_CONTROL,
|
||||
};
|
||||
|
||||
static u32 dce_v8_0_audio_endpt_rreg(struct amdgpu_device *adev,
|
||||
u32 block_offset, u32 reg)
|
||||
{
|
||||
|
@ -278,34 +280,12 @@ static bool dce_v8_0_hpd_sense(struct amdgpu_device *adev,
|
|||
{
|
||||
bool connected = false;
|
||||
|
||||
switch (hpd) {
|
||||
case AMDGPU_HPD_1:
|
||||
if (RREG32(mmDC_HPD1_INT_STATUS) & DC_HPD1_INT_STATUS__DC_HPD1_SENSE_MASK)
|
||||
if (hpd >= adev->mode_info.num_hpd)
|
||||
return connected;
|
||||
|
||||
if (RREG32(mmDC_HPD1_INT_STATUS + hpd_offsets[hpd]) &
|
||||
DC_HPD1_INT_STATUS__DC_HPD1_SENSE_MASK)
|
||||
connected = true;
|
||||
break;
|
||||
case AMDGPU_HPD_2:
|
||||
if (RREG32(mmDC_HPD2_INT_STATUS) & DC_HPD2_INT_STATUS__DC_HPD2_SENSE_MASK)
|
||||
connected = true;
|
||||
break;
|
||||
case AMDGPU_HPD_3:
|
||||
if (RREG32(mmDC_HPD3_INT_STATUS) & DC_HPD3_INT_STATUS__DC_HPD3_SENSE_MASK)
|
||||
connected = true;
|
||||
break;
|
||||
case AMDGPU_HPD_4:
|
||||
if (RREG32(mmDC_HPD4_INT_STATUS) & DC_HPD4_INT_STATUS__DC_HPD4_SENSE_MASK)
|
||||
connected = true;
|
||||
break;
|
||||
case AMDGPU_HPD_5:
|
||||
if (RREG32(mmDC_HPD5_INT_STATUS) & DC_HPD5_INT_STATUS__DC_HPD5_SENSE_MASK)
|
||||
connected = true;
|
||||
break;
|
||||
case AMDGPU_HPD_6:
|
||||
if (RREG32(mmDC_HPD6_INT_STATUS) & DC_HPD6_INT_STATUS__DC_HPD6_SENSE_MASK)
|
||||
connected = true;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return connected;
|
||||
}
|
||||
|
@ -324,58 +304,15 @@ static void dce_v8_0_hpd_set_polarity(struct amdgpu_device *adev,
|
|||
u32 tmp;
|
||||
bool connected = dce_v8_0_hpd_sense(adev, hpd);
|
||||
|
||||
switch (hpd) {
|
||||
case AMDGPU_HPD_1:
|
||||
tmp = RREG32(mmDC_HPD1_INT_CONTROL);
|
||||
if (hpd >= adev->mode_info.num_hpd)
|
||||
return;
|
||||
|
||||
tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
|
||||
if (connected)
|
||||
tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
|
||||
else
|
||||
tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
|
||||
WREG32(mmDC_HPD1_INT_CONTROL, tmp);
|
||||
break;
|
||||
case AMDGPU_HPD_2:
|
||||
tmp = RREG32(mmDC_HPD2_INT_CONTROL);
|
||||
if (connected)
|
||||
tmp &= ~DC_HPD2_INT_CONTROL__DC_HPD2_INT_POLARITY_MASK;
|
||||
else
|
||||
tmp |= DC_HPD2_INT_CONTROL__DC_HPD2_INT_POLARITY_MASK;
|
||||
WREG32(mmDC_HPD2_INT_CONTROL, tmp);
|
||||
break;
|
||||
case AMDGPU_HPD_3:
|
||||
tmp = RREG32(mmDC_HPD3_INT_CONTROL);
|
||||
if (connected)
|
||||
tmp &= ~DC_HPD3_INT_CONTROL__DC_HPD3_INT_POLARITY_MASK;
|
||||
else
|
||||
tmp |= DC_HPD3_INT_CONTROL__DC_HPD3_INT_POLARITY_MASK;
|
||||
WREG32(mmDC_HPD3_INT_CONTROL, tmp);
|
||||
break;
|
||||
case AMDGPU_HPD_4:
|
||||
tmp = RREG32(mmDC_HPD4_INT_CONTROL);
|
||||
if (connected)
|
||||
tmp &= ~DC_HPD4_INT_CONTROL__DC_HPD4_INT_POLARITY_MASK;
|
||||
else
|
||||
tmp |= DC_HPD4_INT_CONTROL__DC_HPD4_INT_POLARITY_MASK;
|
||||
WREG32(mmDC_HPD4_INT_CONTROL, tmp);
|
||||
break;
|
||||
case AMDGPU_HPD_5:
|
||||
tmp = RREG32(mmDC_HPD5_INT_CONTROL);
|
||||
if (connected)
|
||||
tmp &= ~DC_HPD5_INT_CONTROL__DC_HPD5_INT_POLARITY_MASK;
|
||||
else
|
||||
tmp |= DC_HPD5_INT_CONTROL__DC_HPD5_INT_POLARITY_MASK;
|
||||
WREG32(mmDC_HPD5_INT_CONTROL, tmp);
|
||||
break;
|
||||
case AMDGPU_HPD_6:
|
||||
tmp = RREG32(mmDC_HPD6_INT_CONTROL);
|
||||
if (connected)
|
||||
tmp &= ~DC_HPD6_INT_CONTROL__DC_HPD6_INT_POLARITY_MASK;
|
||||
else
|
||||
tmp |= DC_HPD6_INT_CONTROL__DC_HPD6_INT_POLARITY_MASK;
|
||||
WREG32(mmDC_HPD6_INT_CONTROL, tmp);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -390,35 +327,17 @@ static void dce_v8_0_hpd_init(struct amdgpu_device *adev)
|
|||
{
|
||||
struct drm_device *dev = adev->ddev;
|
||||
struct drm_connector *connector;
|
||||
u32 tmp = (0x9c4 << DC_HPD1_CONTROL__DC_HPD1_CONNECTION_TIMER__SHIFT) |
|
||||
(0xfa << DC_HPD1_CONTROL__DC_HPD1_RX_INT_TIMER__SHIFT) |
|
||||
DC_HPD1_CONTROL__DC_HPD1_EN_MASK;
|
||||
u32 tmp;
|
||||
|
||||
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
|
||||
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
|
||||
|
||||
switch (amdgpu_connector->hpd.hpd) {
|
||||
case AMDGPU_HPD_1:
|
||||
WREG32(mmDC_HPD1_CONTROL, tmp);
|
||||
break;
|
||||
case AMDGPU_HPD_2:
|
||||
WREG32(mmDC_HPD2_CONTROL, tmp);
|
||||
break;
|
||||
case AMDGPU_HPD_3:
|
||||
WREG32(mmDC_HPD3_CONTROL, tmp);
|
||||
break;
|
||||
case AMDGPU_HPD_4:
|
||||
WREG32(mmDC_HPD4_CONTROL, tmp);
|
||||
break;
|
||||
case AMDGPU_HPD_5:
|
||||
WREG32(mmDC_HPD5_CONTROL, tmp);
|
||||
break;
|
||||
case AMDGPU_HPD_6:
|
||||
WREG32(mmDC_HPD6_CONTROL, tmp);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
|
||||
continue;
|
||||
|
||||
tmp = RREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
|
||||
tmp |= DC_HPD1_CONTROL__DC_HPD1_EN_MASK;
|
||||
WREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
|
||||
|
||||
if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
|
||||
connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
|
||||
|
@ -427,34 +346,9 @@ static void dce_v8_0_hpd_init(struct amdgpu_device *adev)
|
|||
* https://bugzilla.redhat.com/show_bug.cgi?id=726143
|
||||
* also avoid interrupt storms during dpms.
|
||||
*/
|
||||
u32 dc_hpd_int_cntl_reg, dc_hpd_int_cntl;
|
||||
|
||||
switch (amdgpu_connector->hpd.hpd) {
|
||||
case AMDGPU_HPD_1:
|
||||
dc_hpd_int_cntl_reg = mmDC_HPD1_INT_CONTROL;
|
||||
break;
|
||||
case AMDGPU_HPD_2:
|
||||
dc_hpd_int_cntl_reg = mmDC_HPD2_INT_CONTROL;
|
||||
break;
|
||||
case AMDGPU_HPD_3:
|
||||
dc_hpd_int_cntl_reg = mmDC_HPD3_INT_CONTROL;
|
||||
break;
|
||||
case AMDGPU_HPD_4:
|
||||
dc_hpd_int_cntl_reg = mmDC_HPD4_INT_CONTROL;
|
||||
break;
|
||||
case AMDGPU_HPD_5:
|
||||
dc_hpd_int_cntl_reg = mmDC_HPD5_INT_CONTROL;
|
||||
break;
|
||||
case AMDGPU_HPD_6:
|
||||
dc_hpd_int_cntl_reg = mmDC_HPD6_INT_CONTROL;
|
||||
break;
|
||||
default:
|
||||
continue;
|
||||
}
|
||||
|
||||
dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg);
|
||||
dc_hpd_int_cntl &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
|
||||
WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl);
|
||||
tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
|
||||
tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
|
||||
WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -475,32 +369,18 @@ static void dce_v8_0_hpd_fini(struct amdgpu_device *adev)
|
|||
{
|
||||
struct drm_device *dev = adev->ddev;
|
||||
struct drm_connector *connector;
|
||||
u32 tmp;
|
||||
|
||||
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
|
||||
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
|
||||
|
||||
switch (amdgpu_connector->hpd.hpd) {
|
||||
case AMDGPU_HPD_1:
|
||||
WREG32(mmDC_HPD1_CONTROL, 0);
|
||||
break;
|
||||
case AMDGPU_HPD_2:
|
||||
WREG32(mmDC_HPD2_CONTROL, 0);
|
||||
break;
|
||||
case AMDGPU_HPD_3:
|
||||
WREG32(mmDC_HPD3_CONTROL, 0);
|
||||
break;
|
||||
case AMDGPU_HPD_4:
|
||||
WREG32(mmDC_HPD4_CONTROL, 0);
|
||||
break;
|
||||
case AMDGPU_HPD_5:
|
||||
WREG32(mmDC_HPD5_CONTROL, 0);
|
||||
break;
|
||||
case AMDGPU_HPD_6:
|
||||
WREG32(mmDC_HPD6_CONTROL, 0);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
|
||||
continue;
|
||||
|
||||
tmp = RREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
|
||||
tmp &= ~DC_HPD1_CONTROL__DC_HPD1_EN_MASK;
|
||||
WREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], 0);
|
||||
|
||||
amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
|
||||
}
|
||||
}
|
||||
|
@ -3204,42 +3084,23 @@ static int dce_v8_0_set_hpd_interrupt_state(struct amdgpu_device *adev,
|
|||
unsigned type,
|
||||
enum amdgpu_interrupt_state state)
|
||||
{
|
||||
u32 dc_hpd_int_cntl_reg, dc_hpd_int_cntl;
|
||||
u32 dc_hpd_int_cntl;
|
||||
|
||||
switch (type) {
|
||||
case AMDGPU_HPD_1:
|
||||
dc_hpd_int_cntl_reg = mmDC_HPD1_INT_CONTROL;
|
||||
break;
|
||||
case AMDGPU_HPD_2:
|
||||
dc_hpd_int_cntl_reg = mmDC_HPD2_INT_CONTROL;
|
||||
break;
|
||||
case AMDGPU_HPD_3:
|
||||
dc_hpd_int_cntl_reg = mmDC_HPD3_INT_CONTROL;
|
||||
break;
|
||||
case AMDGPU_HPD_4:
|
||||
dc_hpd_int_cntl_reg = mmDC_HPD4_INT_CONTROL;
|
||||
break;
|
||||
case AMDGPU_HPD_5:
|
||||
dc_hpd_int_cntl_reg = mmDC_HPD5_INT_CONTROL;
|
||||
break;
|
||||
case AMDGPU_HPD_6:
|
||||
dc_hpd_int_cntl_reg = mmDC_HPD6_INT_CONTROL;
|
||||
break;
|
||||
default:
|
||||
if (type >= adev->mode_info.num_hpd) {
|
||||
DRM_DEBUG("invalid hdp %d\n", type);
|
||||
return 0;
|
||||
}
|
||||
|
||||
switch (state) {
|
||||
case AMDGPU_IRQ_STATE_DISABLE:
|
||||
dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg);
|
||||
dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type]);
|
||||
dc_hpd_int_cntl &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
|
||||
WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl);
|
||||
WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl);
|
||||
break;
|
||||
case AMDGPU_IRQ_STATE_ENABLE:
|
||||
dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg);
|
||||
dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type]);
|
||||
dc_hpd_int_cntl |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
|
||||
WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl);
|
||||
WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
|
@ -3412,7 +3273,7 @@ static int dce_v8_0_hpd_irq(struct amdgpu_device *adev,
|
|||
struct amdgpu_irq_src *source,
|
||||
struct amdgpu_iv_entry *entry)
|
||||
{
|
||||
uint32_t disp_int, mask, int_control, tmp;
|
||||
uint32_t disp_int, mask, tmp;
|
||||
unsigned hpd;
|
||||
|
||||
if (entry->src_data >= adev->mode_info.num_hpd) {
|
||||
|
@ -3423,12 +3284,11 @@ static int dce_v8_0_hpd_irq(struct amdgpu_device *adev,
|
|||
hpd = entry->src_data;
|
||||
disp_int = RREG32(interrupt_status_offsets[hpd].reg);
|
||||
mask = interrupt_status_offsets[hpd].hpd;
|
||||
int_control = hpd_int_control_offsets[hpd];
|
||||
|
||||
if (disp_int & mask) {
|
||||
tmp = RREG32(int_control);
|
||||
tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
|
||||
tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK;
|
||||
WREG32(int_control, tmp);
|
||||
WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
|
||||
schedule_work(&adev->hotplug_work);
|
||||
DRM_DEBUG("IH: HPD%d\n", hpd + 1);
|
||||
}
|
||||
|
@ -3449,7 +3309,7 @@ static int dce_v8_0_set_powergating_state(void *handle,
|
|||
return 0;
|
||||
}
|
||||
|
||||
const struct amd_ip_funcs dce_v8_0_ip_funcs = {
|
||||
static const struct amd_ip_funcs dce_v8_0_ip_funcs = {
|
||||
.name = "dce_v8_0",
|
||||
.early_init = dce_v8_0_early_init,
|
||||
.late_init = NULL,
|
||||
|
@ -3779,3 +3639,48 @@ static void dce_v8_0_set_irq_funcs(struct amdgpu_device *adev)
|
|||
adev->hpd_irq.num_types = AMDGPU_HPD_LAST;
|
||||
adev->hpd_irq.funcs = &dce_v8_0_hpd_irq_funcs;
|
||||
}
|
||||
|
||||
const struct amdgpu_ip_block_version dce_v8_0_ip_block =
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_DCE,
|
||||
.major = 8,
|
||||
.minor = 0,
|
||||
.rev = 0,
|
||||
.funcs = &dce_v8_0_ip_funcs,
|
||||
};
|
||||
|
||||
const struct amdgpu_ip_block_version dce_v8_1_ip_block =
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_DCE,
|
||||
.major = 8,
|
||||
.minor = 1,
|
||||
.rev = 0,
|
||||
.funcs = &dce_v8_0_ip_funcs,
|
||||
};
|
||||
|
||||
const struct amdgpu_ip_block_version dce_v8_2_ip_block =
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_DCE,
|
||||
.major = 8,
|
||||
.minor = 2,
|
||||
.rev = 0,
|
||||
.funcs = &dce_v8_0_ip_funcs,
|
||||
};
|
||||
|
||||
const struct amdgpu_ip_block_version dce_v8_3_ip_block =
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_DCE,
|
||||
.major = 8,
|
||||
.minor = 3,
|
||||
.rev = 0,
|
||||
.funcs = &dce_v8_0_ip_funcs,
|
||||
};
|
||||
|
||||
const struct amdgpu_ip_block_version dce_v8_5_ip_block =
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_DCE,
|
||||
.major = 8,
|
||||
.minor = 5,
|
||||
.rev = 0,
|
||||
.funcs = &dce_v8_0_ip_funcs,
|
||||
};
|
||||
|
|
|
@ -24,7 +24,11 @@
|
|||
#ifndef __DCE_V8_0_H__
|
||||
#define __DCE_V8_0_H__
|
||||
|
||||
extern const struct amd_ip_funcs dce_v8_0_ip_funcs;
|
||||
extern const struct amdgpu_ip_block_version dce_v8_0_ip_block;
|
||||
extern const struct amdgpu_ip_block_version dce_v8_1_ip_block;
|
||||
extern const struct amdgpu_ip_block_version dce_v8_2_ip_block;
|
||||
extern const struct amdgpu_ip_block_version dce_v8_3_ip_block;
|
||||
extern const struct amdgpu_ip_block_version dce_v8_5_ip_block;
|
||||
|
||||
void dce_v8_0_disable_dce(struct amdgpu_device *adev);
|
||||
|
||||
|
|
|
@ -27,6 +27,9 @@
|
|||
#include "atom.h"
|
||||
#include "amdgpu_pll.h"
|
||||
#include "amdgpu_connectors.h"
|
||||
#ifdef CONFIG_DRM_AMDGPU_SI
|
||||
#include "dce_v6_0.h"
|
||||
#endif
|
||||
#ifdef CONFIG_DRM_AMDGPU_CIK
|
||||
#include "dce_v8_0.h"
|
||||
#endif
|
||||
|
@ -34,11 +37,13 @@
|
|||
#include "dce_v11_0.h"
|
||||
#include "dce_virtual.h"
|
||||
|
||||
#define DCE_VIRTUAL_VBLANK_PERIOD 16666666
|
||||
|
||||
|
||||
static void dce_virtual_set_display_funcs(struct amdgpu_device *adev);
|
||||
static void dce_virtual_set_irq_funcs(struct amdgpu_device *adev);
|
||||
static int dce_virtual_pageflip_irq(struct amdgpu_device *adev,
|
||||
struct amdgpu_irq_src *source,
|
||||
struct amdgpu_iv_entry *entry);
|
||||
static int dce_virtual_connector_encoder_init(struct amdgpu_device *adev,
|
||||
int index);
|
||||
|
||||
/**
|
||||
* dce_virtual_vblank_wait - vblank wait asic callback.
|
||||
|
@ -99,6 +104,14 @@ static void dce_virtual_stop_mc_access(struct amdgpu_device *adev,
|
|||
struct amdgpu_mode_mc_save *save)
|
||||
{
|
||||
switch (adev->asic_type) {
|
||||
#ifdef CONFIG_DRM_AMDGPU_SI
|
||||
case CHIP_TAHITI:
|
||||
case CHIP_PITCAIRN:
|
||||
case CHIP_VERDE:
|
||||
case CHIP_OLAND:
|
||||
dce_v6_0_disable_dce(adev);
|
||||
break;
|
||||
#endif
|
||||
#ifdef CONFIG_DRM_AMDGPU_CIK
|
||||
case CHIP_BONAIRE:
|
||||
case CHIP_HAWAII:
|
||||
|
@ -119,6 +132,9 @@ static void dce_virtual_stop_mc_access(struct amdgpu_device *adev,
|
|||
dce_v11_0_disable_dce(adev);
|
||||
break;
|
||||
case CHIP_TOPAZ:
|
||||
#ifdef CONFIG_DRM_AMDGPU_SI
|
||||
case CHIP_HAINAN:
|
||||
#endif
|
||||
/* no DCE */
|
||||
return;
|
||||
default:
|
||||
|
@ -195,10 +211,9 @@ static void dce_virtual_crtc_dpms(struct drm_crtc *crtc, int mode)
|
|||
switch (mode) {
|
||||
case DRM_MODE_DPMS_ON:
|
||||
amdgpu_crtc->enabled = true;
|
||||
/* Make sure VBLANK and PFLIP interrupts are still enabled */
|
||||
/* Make sure VBLANK interrupts are still enabled */
|
||||
type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
|
||||
amdgpu_irq_update(adev, &adev->crtc_irq, type);
|
||||
amdgpu_irq_update(adev, &adev->pageflip_irq, type);
|
||||
drm_vblank_on(dev, amdgpu_crtc->crtc_id);
|
||||
break;
|
||||
case DRM_MODE_DPMS_STANDBY:
|
||||
|
@ -264,24 +279,6 @@ static bool dce_virtual_crtc_mode_fixup(struct drm_crtc *crtc,
|
|||
const struct drm_display_mode *mode,
|
||||
struct drm_display_mode *adjusted_mode)
|
||||
{
|
||||
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct drm_encoder *encoder;
|
||||
|
||||
/* assign the encoder to the amdgpu crtc to avoid repeated lookups later */
|
||||
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
|
||||
if (encoder->crtc == crtc) {
|
||||
amdgpu_crtc->encoder = encoder;
|
||||
amdgpu_crtc->connector = amdgpu_get_connector_for_encoder(encoder);
|
||||
break;
|
||||
}
|
||||
}
|
||||
if ((amdgpu_crtc->encoder == NULL) || (amdgpu_crtc->connector == NULL)) {
|
||||
amdgpu_crtc->encoder = NULL;
|
||||
amdgpu_crtc->connector = NULL;
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -341,6 +338,7 @@ static int dce_virtual_crtc_init(struct amdgpu_device *adev, int index)
|
|||
amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
|
||||
amdgpu_crtc->encoder = NULL;
|
||||
amdgpu_crtc->connector = NULL;
|
||||
amdgpu_crtc->vsync_timer_enabled = AMDGPU_IRQ_STATE_DISABLE;
|
||||
drm_crtc_helper_add(&amdgpu_crtc->base, &dce_virtual_crtc_helper_funcs);
|
||||
|
||||
return 0;
|
||||
|
@ -350,48 +348,128 @@ static int dce_virtual_early_init(void *handle)
|
|||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
adev->mode_info.vsync_timer_enabled = AMDGPU_IRQ_STATE_DISABLE;
|
||||
dce_virtual_set_display_funcs(adev);
|
||||
dce_virtual_set_irq_funcs(adev);
|
||||
|
||||
adev->mode_info.num_crtc = 1;
|
||||
adev->mode_info.num_hpd = 1;
|
||||
adev->mode_info.num_dig = 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool dce_virtual_get_connector_info(struct amdgpu_device *adev)
|
||||
static struct drm_encoder *
|
||||
dce_virtual_encoder(struct drm_connector *connector)
|
||||
{
|
||||
struct amdgpu_i2c_bus_rec ddc_bus;
|
||||
struct amdgpu_router router;
|
||||
struct amdgpu_hpd hpd;
|
||||
int enc_id = connector->encoder_ids[0];
|
||||
struct drm_encoder *encoder;
|
||||
int i;
|
||||
|
||||
/* look up gpio for ddc, hpd */
|
||||
ddc_bus.valid = false;
|
||||
hpd.hpd = AMDGPU_HPD_NONE;
|
||||
/* needed for aux chan transactions */
|
||||
ddc_bus.hpd = hpd.hpd;
|
||||
for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
|
||||
if (connector->encoder_ids[i] == 0)
|
||||
break;
|
||||
|
||||
memset(&router, 0, sizeof(router));
|
||||
router.ddc_valid = false;
|
||||
router.cd_valid = false;
|
||||
amdgpu_display_add_connector(adev,
|
||||
0,
|
||||
ATOM_DEVICE_CRT1_SUPPORT,
|
||||
DRM_MODE_CONNECTOR_VIRTUAL, &ddc_bus,
|
||||
CONNECTOR_OBJECT_ID_VIRTUAL,
|
||||
&hpd,
|
||||
&router);
|
||||
encoder = drm_encoder_find(connector->dev, connector->encoder_ids[i]);
|
||||
if (!encoder)
|
||||
continue;
|
||||
|
||||
amdgpu_display_add_encoder(adev, ENCODER_VIRTUAL_ENUM_VIRTUAL,
|
||||
ATOM_DEVICE_CRT1_SUPPORT,
|
||||
0);
|
||||
|
||||
amdgpu_link_encoder_connector(adev->ddev);
|
||||
|
||||
return true;
|
||||
if (encoder->encoder_type == DRM_MODE_ENCODER_VIRTUAL)
|
||||
return encoder;
|
||||
}
|
||||
|
||||
/* pick the first one */
|
||||
if (enc_id)
|
||||
return drm_encoder_find(connector->dev, enc_id);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int dce_virtual_get_modes(struct drm_connector *connector)
|
||||
{
|
||||
struct drm_device *dev = connector->dev;
|
||||
struct drm_display_mode *mode = NULL;
|
||||
unsigned i;
|
||||
static const struct mode_size {
|
||||
int w;
|
||||
int h;
|
||||
} common_modes[17] = {
|
||||
{ 640, 480},
|
||||
{ 720, 480},
|
||||
{ 800, 600},
|
||||
{ 848, 480},
|
||||
{1024, 768},
|
||||
{1152, 768},
|
||||
{1280, 720},
|
||||
{1280, 800},
|
||||
{1280, 854},
|
||||
{1280, 960},
|
||||
{1280, 1024},
|
||||
{1440, 900},
|
||||
{1400, 1050},
|
||||
{1680, 1050},
|
||||
{1600, 1200},
|
||||
{1920, 1080},
|
||||
{1920, 1200}
|
||||
};
|
||||
|
||||
for (i = 0; i < 17; i++) {
|
||||
mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h, 60, false, false, false);
|
||||
drm_mode_probed_add(connector, mode);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dce_virtual_mode_valid(struct drm_connector *connector,
|
||||
struct drm_display_mode *mode)
|
||||
{
|
||||
return MODE_OK;
|
||||
}
|
||||
|
||||
static int
|
||||
dce_virtual_dpms(struct drm_connector *connector, int mode)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static enum drm_connector_status
|
||||
dce_virtual_detect(struct drm_connector *connector, bool force)
|
||||
{
|
||||
return connector_status_connected;
|
||||
}
|
||||
|
||||
static int
|
||||
dce_virtual_set_property(struct drm_connector *connector,
|
||||
struct drm_property *property,
|
||||
uint64_t val)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void dce_virtual_destroy(struct drm_connector *connector)
|
||||
{
|
||||
drm_connector_unregister(connector);
|
||||
drm_connector_cleanup(connector);
|
||||
kfree(connector);
|
||||
}
|
||||
|
||||
static void dce_virtual_force(struct drm_connector *connector)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
static const struct drm_connector_helper_funcs dce_virtual_connector_helper_funcs = {
|
||||
.get_modes = dce_virtual_get_modes,
|
||||
.mode_valid = dce_virtual_mode_valid,
|
||||
.best_encoder = dce_virtual_encoder,
|
||||
};
|
||||
|
||||
static const struct drm_connector_funcs dce_virtual_connector_funcs = {
|
||||
.dpms = dce_virtual_dpms,
|
||||
.detect = dce_virtual_detect,
|
||||
.fill_modes = drm_helper_probe_single_connector_modes,
|
||||
.set_property = dce_virtual_set_property,
|
||||
.destroy = dce_virtual_destroy,
|
||||
.force = dce_virtual_force,
|
||||
};
|
||||
|
||||
static int dce_virtual_sw_init(void *handle)
|
||||
{
|
||||
int r, i;
|
||||
|
@ -420,16 +498,16 @@ static int dce_virtual_sw_init(void *handle)
|
|||
adev->ddev->mode_config.max_width = 16384;
|
||||
adev->ddev->mode_config.max_height = 16384;
|
||||
|
||||
/* allocate crtcs */
|
||||
/* allocate crtcs, encoders, connectors */
|
||||
for (i = 0; i < adev->mode_info.num_crtc; i++) {
|
||||
r = dce_virtual_crtc_init(adev, i);
|
||||
if (r)
|
||||
return r;
|
||||
r = dce_virtual_connector_encoder_init(adev, i);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
||||
dce_virtual_get_connector_info(adev);
|
||||
amdgpu_print_display_setup(adev->ddev);
|
||||
|
||||
drm_kms_helper_poll_init(adev->ddev);
|
||||
|
||||
adev->mode_info.mode_config_initialized = true;
|
||||
|
@ -496,7 +574,7 @@ static int dce_virtual_set_powergating_state(void *handle,
|
|||
return 0;
|
||||
}
|
||||
|
||||
const struct amd_ip_funcs dce_virtual_ip_funcs = {
|
||||
static const struct amd_ip_funcs dce_virtual_ip_funcs = {
|
||||
.name = "dce_virtual",
|
||||
.early_init = dce_virtual_early_init,
|
||||
.late_init = NULL,
|
||||
|
@ -547,10 +625,6 @@ static bool dce_virtual_encoder_mode_fixup(struct drm_encoder *encoder,
|
|||
const struct drm_display_mode *mode,
|
||||
struct drm_display_mode *adjusted_mode)
|
||||
{
|
||||
|
||||
/* set the active encoder to connector routing */
|
||||
amdgpu_encoder_set_active_device(encoder);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -576,45 +650,40 @@ static const struct drm_encoder_funcs dce_virtual_encoder_funcs = {
|
|||
.destroy = dce_virtual_encoder_destroy,
|
||||
};
|
||||
|
||||
static void dce_virtual_encoder_add(struct amdgpu_device *adev,
|
||||
uint32_t encoder_enum,
|
||||
uint32_t supported_device,
|
||||
u16 caps)
|
||||
static int dce_virtual_connector_encoder_init(struct amdgpu_device *adev,
|
||||
int index)
|
||||
{
|
||||
struct drm_device *dev = adev->ddev;
|
||||
struct drm_encoder *encoder;
|
||||
struct amdgpu_encoder *amdgpu_encoder;
|
||||
struct drm_connector *connector;
|
||||
|
||||
/* see if we already added it */
|
||||
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
|
||||
amdgpu_encoder = to_amdgpu_encoder(encoder);
|
||||
if (amdgpu_encoder->encoder_enum == encoder_enum) {
|
||||
amdgpu_encoder->devices |= supported_device;
|
||||
return;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/* add a new one */
|
||||
amdgpu_encoder = kzalloc(sizeof(struct amdgpu_encoder), GFP_KERNEL);
|
||||
if (!amdgpu_encoder)
|
||||
return;
|
||||
|
||||
encoder = &amdgpu_encoder->base;
|
||||
encoder->possible_crtcs = 0x1;
|
||||
amdgpu_encoder->enc_priv = NULL;
|
||||
amdgpu_encoder->encoder_enum = encoder_enum;
|
||||
amdgpu_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
|
||||
amdgpu_encoder->devices = supported_device;
|
||||
amdgpu_encoder->rmx_type = RMX_OFF;
|
||||
amdgpu_encoder->underscan_type = UNDERSCAN_OFF;
|
||||
amdgpu_encoder->is_ext_encoder = false;
|
||||
amdgpu_encoder->caps = caps;
|
||||
|
||||
drm_encoder_init(dev, encoder, &dce_virtual_encoder_funcs,
|
||||
/* add a new encoder */
|
||||
encoder = kzalloc(sizeof(struct drm_encoder), GFP_KERNEL);
|
||||
if (!encoder)
|
||||
return -ENOMEM;
|
||||
encoder->possible_crtcs = 1 << index;
|
||||
drm_encoder_init(adev->ddev, encoder, &dce_virtual_encoder_funcs,
|
||||
DRM_MODE_ENCODER_VIRTUAL, NULL);
|
||||
drm_encoder_helper_add(encoder, &dce_virtual_encoder_helper_funcs);
|
||||
DRM_INFO("[FM]encoder: %d is VIRTUAL\n", amdgpu_encoder->encoder_id);
|
||||
|
||||
connector = kzalloc(sizeof(struct drm_connector), GFP_KERNEL);
|
||||
if (!connector) {
|
||||
kfree(encoder);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* add a new connector */
|
||||
drm_connector_init(adev->ddev, connector, &dce_virtual_connector_funcs,
|
||||
DRM_MODE_CONNECTOR_VIRTUAL);
|
||||
drm_connector_helper_add(connector, &dce_virtual_connector_helper_funcs);
|
||||
connector->display_info.subpixel_order = SubPixelHorizontalRGB;
|
||||
connector->interlace_allowed = false;
|
||||
connector->doublescan_allowed = false;
|
||||
drm_connector_register(connector);
|
||||
|
||||
/* link them */
|
||||
drm_mode_connector_attach_encoder(connector, encoder);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct amdgpu_display_funcs dce_virtual_display_funcs = {
|
||||
|
@ -630,8 +699,8 @@ static const struct amdgpu_display_funcs dce_virtual_display_funcs = {
|
|||
.hpd_get_gpio_reg = &dce_virtual_hpd_get_gpio_reg,
|
||||
.page_flip = &dce_virtual_page_flip,
|
||||
.page_flip_get_scanoutpos = &dce_virtual_crtc_get_scanoutpos,
|
||||
.add_encoder = &dce_virtual_encoder_add,
|
||||
.add_connector = &amdgpu_connector_add,
|
||||
.add_encoder = NULL,
|
||||
.add_connector = NULL,
|
||||
.stop_mc_access = &dce_virtual_stop_mc_access,
|
||||
.resume_mc_access = &dce_virtual_resume_mc_access,
|
||||
};
|
||||
|
@ -642,107 +711,13 @@ static void dce_virtual_set_display_funcs(struct amdgpu_device *adev)
|
|||
adev->mode_info.funcs = &dce_virtual_display_funcs;
|
||||
}
|
||||
|
||||
static enum hrtimer_restart dce_virtual_vblank_timer_handle(struct hrtimer *vblank_timer)
|
||||
{
|
||||
struct amdgpu_mode_info *mode_info = container_of(vblank_timer, struct amdgpu_mode_info ,vblank_timer);
|
||||
struct amdgpu_device *adev = container_of(mode_info, struct amdgpu_device ,mode_info);
|
||||
unsigned crtc = 0;
|
||||
drm_handle_vblank(adev->ddev, crtc);
|
||||
dce_virtual_pageflip_irq(adev, NULL, NULL);
|
||||
hrtimer_start(vblank_timer, ktime_set(0, DCE_VIRTUAL_VBLANK_PERIOD), HRTIMER_MODE_REL);
|
||||
return HRTIMER_NORESTART;
|
||||
}
|
||||
|
||||
static void dce_virtual_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev,
|
||||
int crtc,
|
||||
enum amdgpu_interrupt_state state)
|
||||
{
|
||||
if (crtc >= adev->mode_info.num_crtc) {
|
||||
DRM_DEBUG("invalid crtc %d\n", crtc);
|
||||
return;
|
||||
}
|
||||
|
||||
if (state && !adev->mode_info.vsync_timer_enabled) {
|
||||
DRM_DEBUG("Enable software vsync timer\n");
|
||||
hrtimer_init(&adev->mode_info.vblank_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
|
||||
hrtimer_set_expires(&adev->mode_info.vblank_timer, ktime_set(0, DCE_VIRTUAL_VBLANK_PERIOD));
|
||||
adev->mode_info.vblank_timer.function = dce_virtual_vblank_timer_handle;
|
||||
hrtimer_start(&adev->mode_info.vblank_timer, ktime_set(0, DCE_VIRTUAL_VBLANK_PERIOD), HRTIMER_MODE_REL);
|
||||
} else if (!state && adev->mode_info.vsync_timer_enabled) {
|
||||
DRM_DEBUG("Disable software vsync timer\n");
|
||||
hrtimer_cancel(&adev->mode_info.vblank_timer);
|
||||
}
|
||||
|
||||
adev->mode_info.vsync_timer_enabled = state;
|
||||
DRM_DEBUG("[FM]set crtc %d vblank interrupt state %d\n", crtc, state);
|
||||
}
|
||||
|
||||
|
||||
static int dce_virtual_set_crtc_irq_state(struct amdgpu_device *adev,
|
||||
struct amdgpu_irq_src *source,
|
||||
unsigned type,
|
||||
enum amdgpu_interrupt_state state)
|
||||
{
|
||||
switch (type) {
|
||||
case AMDGPU_CRTC_IRQ_VBLANK1:
|
||||
dce_virtual_set_crtc_vblank_interrupt_state(adev, 0, state);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void dce_virtual_crtc_vblank_int_ack(struct amdgpu_device *adev,
|
||||
int crtc)
|
||||
{
|
||||
if (crtc >= adev->mode_info.num_crtc) {
|
||||
DRM_DEBUG("invalid crtc %d\n", crtc);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
static int dce_virtual_crtc_irq(struct amdgpu_device *adev,
|
||||
struct amdgpu_irq_src *source,
|
||||
struct amdgpu_iv_entry *entry)
|
||||
{
|
||||
unsigned crtc = 0;
|
||||
unsigned irq_type = AMDGPU_CRTC_IRQ_VBLANK1;
|
||||
|
||||
dce_virtual_crtc_vblank_int_ack(adev, crtc);
|
||||
|
||||
if (amdgpu_irq_enabled(adev, source, irq_type)) {
|
||||
drm_handle_vblank(adev->ddev, crtc);
|
||||
}
|
||||
dce_virtual_pageflip_irq(adev, NULL, NULL);
|
||||
DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dce_virtual_set_pageflip_irq_state(struct amdgpu_device *adev,
|
||||
struct amdgpu_irq_src *src,
|
||||
unsigned type,
|
||||
enum amdgpu_interrupt_state state)
|
||||
{
|
||||
if (type >= adev->mode_info.num_crtc) {
|
||||
DRM_ERROR("invalid pageflip crtc %d\n", type);
|
||||
return -EINVAL;
|
||||
}
|
||||
DRM_DEBUG("[FM]set pageflip irq type %d state %d\n", type, state);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dce_virtual_pageflip_irq(struct amdgpu_device *adev,
|
||||
struct amdgpu_irq_src *source,
|
||||
struct amdgpu_iv_entry *entry)
|
||||
static int dce_virtual_pageflip(struct amdgpu_device *adev,
|
||||
unsigned crtc_id)
|
||||
{
|
||||
unsigned long flags;
|
||||
unsigned crtc_id = 0;
|
||||
struct amdgpu_crtc *amdgpu_crtc;
|
||||
struct amdgpu_flip_work *works;
|
||||
|
||||
crtc_id = 0;
|
||||
amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
|
||||
|
||||
if (crtc_id >= adev->mode_info.num_crtc) {
|
||||
|
@ -781,22 +756,79 @@ static int dce_virtual_pageflip_irq(struct amdgpu_device *adev,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static enum hrtimer_restart dce_virtual_vblank_timer_handle(struct hrtimer *vblank_timer)
|
||||
{
|
||||
struct amdgpu_crtc *amdgpu_crtc = container_of(vblank_timer,
|
||||
struct amdgpu_crtc, vblank_timer);
|
||||
struct drm_device *ddev = amdgpu_crtc->base.dev;
|
||||
struct amdgpu_device *adev = ddev->dev_private;
|
||||
|
||||
drm_handle_vblank(ddev, amdgpu_crtc->crtc_id);
|
||||
dce_virtual_pageflip(adev, amdgpu_crtc->crtc_id);
|
||||
hrtimer_start(vblank_timer, ktime_set(0, DCE_VIRTUAL_VBLANK_PERIOD),
|
||||
HRTIMER_MODE_REL);
|
||||
|
||||
return HRTIMER_NORESTART;
|
||||
}
|
||||
|
||||
static void dce_virtual_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev,
|
||||
int crtc,
|
||||
enum amdgpu_interrupt_state state)
|
||||
{
|
||||
if (crtc >= adev->mode_info.num_crtc) {
|
||||
DRM_DEBUG("invalid crtc %d\n", crtc);
|
||||
return;
|
||||
}
|
||||
|
||||
if (state && !adev->mode_info.crtcs[crtc]->vsync_timer_enabled) {
|
||||
DRM_DEBUG("Enable software vsync timer\n");
|
||||
hrtimer_init(&adev->mode_info.crtcs[crtc]->vblank_timer,
|
||||
CLOCK_MONOTONIC, HRTIMER_MODE_REL);
|
||||
hrtimer_set_expires(&adev->mode_info.crtcs[crtc]->vblank_timer,
|
||||
ktime_set(0, DCE_VIRTUAL_VBLANK_PERIOD));
|
||||
adev->mode_info.crtcs[crtc]->vblank_timer.function =
|
||||
dce_virtual_vblank_timer_handle;
|
||||
hrtimer_start(&adev->mode_info.crtcs[crtc]->vblank_timer,
|
||||
ktime_set(0, DCE_VIRTUAL_VBLANK_PERIOD), HRTIMER_MODE_REL);
|
||||
} else if (!state && adev->mode_info.crtcs[crtc]->vsync_timer_enabled) {
|
||||
DRM_DEBUG("Disable software vsync timer\n");
|
||||
hrtimer_cancel(&adev->mode_info.crtcs[crtc]->vblank_timer);
|
||||
}
|
||||
|
||||
adev->mode_info.crtcs[crtc]->vsync_timer_enabled = state;
|
||||
DRM_DEBUG("[FM]set crtc %d vblank interrupt state %d\n", crtc, state);
|
||||
}
|
||||
|
||||
|
||||
static int dce_virtual_set_crtc_irq_state(struct amdgpu_device *adev,
|
||||
struct amdgpu_irq_src *source,
|
||||
unsigned type,
|
||||
enum amdgpu_interrupt_state state)
|
||||
{
|
||||
if (type > AMDGPU_CRTC_IRQ_VBLANK6)
|
||||
return -EINVAL;
|
||||
|
||||
dce_virtual_set_crtc_vblank_interrupt_state(adev, type, state);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct amdgpu_irq_src_funcs dce_virtual_crtc_irq_funcs = {
|
||||
.set = dce_virtual_set_crtc_irq_state,
|
||||
.process = dce_virtual_crtc_irq,
|
||||
};
|
||||
|
||||
static const struct amdgpu_irq_src_funcs dce_virtual_pageflip_irq_funcs = {
|
||||
.set = dce_virtual_set_pageflip_irq_state,
|
||||
.process = dce_virtual_pageflip_irq,
|
||||
.process = NULL,
|
||||
};
|
||||
|
||||
static void dce_virtual_set_irq_funcs(struct amdgpu_device *adev)
|
||||
{
|
||||
adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_LAST;
|
||||
adev->crtc_irq.funcs = &dce_virtual_crtc_irq_funcs;
|
||||
|
||||
adev->pageflip_irq.num_types = AMDGPU_PAGEFLIP_IRQ_LAST;
|
||||
adev->pageflip_irq.funcs = &dce_virtual_pageflip_irq_funcs;
|
||||
}
|
||||
|
||||
const struct amdgpu_ip_block_version dce_virtual_ip_block =
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_DCE,
|
||||
.major = 1,
|
||||
.minor = 0,
|
||||
.rev = 0,
|
||||
.funcs = &dce_virtual_ip_funcs,
|
||||
};
|
||||
|
|
|
@ -24,8 +24,7 @@
|
|||
#ifndef __DCE_VIRTUAL_H__
|
||||
#define __DCE_VIRTUAL_H__
|
||||
|
||||
extern const struct amd_ip_funcs dce_virtual_ip_funcs;
|
||||
#define DCE_VIRTUAL_VBLANK_PERIOD 16666666
|
||||
extern const struct amdgpu_ip_block_version dce_virtual_ip_block;
|
||||
|
||||
#endif
|
||||
|
||||
|
|
|
@ -1940,7 +1940,7 @@ static int gfx_v6_0_cp_resume(struct amdgpu_device *adev)
|
|||
|
||||
static void gfx_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
|
||||
{
|
||||
int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
|
||||
int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
|
||||
uint32_t seq = ring->fence_drv.sync_seq;
|
||||
uint64_t addr = ring->fence_drv.gpu_addr;
|
||||
|
||||
|
@ -1966,7 +1966,7 @@ static void gfx_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
|
|||
static void gfx_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
|
||||
unsigned vm_id, uint64_t pd_addr)
|
||||
{
|
||||
int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
|
||||
int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
|
||||
|
||||
/* write new base address */
|
||||
amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
|
||||
|
@ -2814,33 +2814,6 @@ static void gfx_v6_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
|
|||
amdgpu_ring_write(ring, 0);
|
||||
}
|
||||
|
||||
static unsigned gfx_v6_0_ring_get_emit_ib_size(struct amdgpu_ring *ring)
|
||||
{
|
||||
return
|
||||
6; /* gfx_v6_0_ring_emit_ib */
|
||||
}
|
||||
|
||||
static unsigned gfx_v6_0_ring_get_dma_frame_size_gfx(struct amdgpu_ring *ring)
|
||||
{
|
||||
return
|
||||
5 + /* gfx_v6_0_ring_emit_hdp_flush */
|
||||
5 + /* gfx_v6_0_ring_emit_hdp_invalidate */
|
||||
14 + 14 + 14 + /* gfx_v6_0_ring_emit_fence x3 for user fence, vm fence */
|
||||
7 + 4 + /* gfx_v6_0_ring_emit_pipeline_sync */
|
||||
17 + 6 + /* gfx_v6_0_ring_emit_vm_flush */
|
||||
3; /* gfx_v6_ring_emit_cntxcntl */
|
||||
}
|
||||
|
||||
static unsigned gfx_v6_0_ring_get_dma_frame_size_compute(struct amdgpu_ring *ring)
|
||||
{
|
||||
return
|
||||
5 + /* gfx_v6_0_ring_emit_hdp_flush */
|
||||
5 + /* gfx_v6_0_ring_emit_hdp_invalidate */
|
||||
7 + /* gfx_v6_0_ring_emit_pipeline_sync */
|
||||
17 + /* gfx_v6_0_ring_emit_vm_flush */
|
||||
14 + 14 + 14; /* gfx_v6_0_ring_emit_fence x3 for user fence, vm fence */
|
||||
}
|
||||
|
||||
static const struct amdgpu_gfx_funcs gfx_v6_0_gfx_funcs = {
|
||||
.get_gpu_clock_counter = &gfx_v6_0_get_gpu_clock_counter,
|
||||
.select_se_sh = &gfx_v6_0_select_se_sh,
|
||||
|
@ -2896,9 +2869,7 @@ static int gfx_v6_0_sw_init(void *handle)
|
|||
ring->ring_obj = NULL;
|
||||
sprintf(ring->name, "gfx");
|
||||
r = amdgpu_ring_init(adev, ring, 1024,
|
||||
0x80000000, 0xf,
|
||||
&adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP,
|
||||
AMDGPU_RING_TYPE_GFX);
|
||||
&adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
@ -2920,9 +2891,7 @@ static int gfx_v6_0_sw_init(void *handle)
|
|||
sprintf(ring->name, "comp %d.%d.%d", ring->me, ring->pipe, ring->queue);
|
||||
irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP + ring->pipe;
|
||||
r = amdgpu_ring_init(adev, ring, 1024,
|
||||
0x80000000, 0xf,
|
||||
&adev->gfx.eop_irq, irq_type,
|
||||
AMDGPU_RING_TYPE_COMPUTE);
|
||||
&adev->gfx.eop_irq, irq_type);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
@ -3237,7 +3206,7 @@ static int gfx_v6_0_set_powergating_state(void *handle,
|
|||
return 0;
|
||||
}
|
||||
|
||||
const struct amd_ip_funcs gfx_v6_0_ip_funcs = {
|
||||
static const struct amd_ip_funcs gfx_v6_0_ip_funcs = {
|
||||
.name = "gfx_v6_0",
|
||||
.early_init = gfx_v6_0_early_init,
|
||||
.late_init = NULL,
|
||||
|
@ -3255,10 +3224,20 @@ const struct amd_ip_funcs gfx_v6_0_ip_funcs = {
|
|||
};
|
||||
|
||||
static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_gfx = {
|
||||
.type = AMDGPU_RING_TYPE_GFX,
|
||||
.align_mask = 0xff,
|
||||
.nop = 0x80000000,
|
||||
.get_rptr = gfx_v6_0_ring_get_rptr,
|
||||
.get_wptr = gfx_v6_0_ring_get_wptr,
|
||||
.set_wptr = gfx_v6_0_ring_set_wptr_gfx,
|
||||
.parse_cs = NULL,
|
||||
.emit_frame_size =
|
||||
5 + /* gfx_v6_0_ring_emit_hdp_flush */
|
||||
5 + /* gfx_v6_0_ring_emit_hdp_invalidate */
|
||||
14 + 14 + 14 + /* gfx_v6_0_ring_emit_fence x3 for user fence, vm fence */
|
||||
7 + 4 + /* gfx_v6_0_ring_emit_pipeline_sync */
|
||||
17 + 6 + /* gfx_v6_0_ring_emit_vm_flush */
|
||||
3, /* gfx_v6_ring_emit_cntxcntl */
|
||||
.emit_ib_size = 6, /* gfx_v6_0_ring_emit_ib */
|
||||
.emit_ib = gfx_v6_0_ring_emit_ib,
|
||||
.emit_fence = gfx_v6_0_ring_emit_fence,
|
||||
.emit_pipeline_sync = gfx_v6_0_ring_emit_pipeline_sync,
|
||||
|
@ -3269,15 +3248,22 @@ static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_gfx = {
|
|||
.test_ib = gfx_v6_0_ring_test_ib,
|
||||
.insert_nop = amdgpu_ring_insert_nop,
|
||||
.emit_cntxcntl = gfx_v6_ring_emit_cntxcntl,
|
||||
.get_emit_ib_size = gfx_v6_0_ring_get_emit_ib_size,
|
||||
.get_dma_frame_size = gfx_v6_0_ring_get_dma_frame_size_gfx,
|
||||
};
|
||||
|
||||
static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_compute = {
|
||||
.type = AMDGPU_RING_TYPE_COMPUTE,
|
||||
.align_mask = 0xff,
|
||||
.nop = 0x80000000,
|
||||
.get_rptr = gfx_v6_0_ring_get_rptr,
|
||||
.get_wptr = gfx_v6_0_ring_get_wptr,
|
||||
.set_wptr = gfx_v6_0_ring_set_wptr_compute,
|
||||
.parse_cs = NULL,
|
||||
.emit_frame_size =
|
||||
5 + /* gfx_v6_0_ring_emit_hdp_flush */
|
||||
5 + /* gfx_v6_0_ring_emit_hdp_invalidate */
|
||||
7 + /* gfx_v6_0_ring_emit_pipeline_sync */
|
||||
17 + /* gfx_v6_0_ring_emit_vm_flush */
|
||||
14 + 14 + 14, /* gfx_v6_0_ring_emit_fence x3 for user fence, vm fence */
|
||||
.emit_ib_size = 6, /* gfx_v6_0_ring_emit_ib */
|
||||
.emit_ib = gfx_v6_0_ring_emit_ib,
|
||||
.emit_fence = gfx_v6_0_ring_emit_fence,
|
||||
.emit_pipeline_sync = gfx_v6_0_ring_emit_pipeline_sync,
|
||||
|
@ -3287,8 +3273,6 @@ static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_compute = {
|
|||
.test_ring = gfx_v6_0_ring_test_ring,
|
||||
.test_ib = gfx_v6_0_ring_test_ib,
|
||||
.insert_nop = amdgpu_ring_insert_nop,
|
||||
.get_emit_ib_size = gfx_v6_0_ring_get_emit_ib_size,
|
||||
.get_dma_frame_size = gfx_v6_0_ring_get_dma_frame_size_compute,
|
||||
};
|
||||
|
||||
static void gfx_v6_0_set_ring_funcs(struct amdgpu_device *adev)
|
||||
|
@ -3360,3 +3344,12 @@ static void gfx_v6_0_get_cu_info(struct amdgpu_device *adev)
|
|||
cu_info->number = active_cu_number;
|
||||
cu_info->ao_cu_mask = ao_cu_mask;
|
||||
}
|
||||
|
||||
const struct amdgpu_ip_block_version gfx_v6_0_ip_block =
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_GFX,
|
||||
.major = 6,
|
||||
.minor = 0,
|
||||
.rev = 0,
|
||||
.funcs = &gfx_v6_0_ip_funcs,
|
||||
};
|
||||
|
|
|
@ -24,6 +24,6 @@
|
|||
#ifndef __GFX_V6_0_H__
|
||||
#define __GFX_V6_0_H__
|
||||
|
||||
extern const struct amd_ip_funcs gfx_v6_0_ip_funcs;
|
||||
extern const struct amdgpu_ip_block_version gfx_v6_0_ip_block;
|
||||
|
||||
#endif
|
||||
|
|
|
@ -2077,9 +2077,9 @@ static int gfx_v7_0_ring_test_ring(struct amdgpu_ring *ring)
|
|||
static void gfx_v7_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
|
||||
{
|
||||
u32 ref_and_mask;
|
||||
int usepfp = ring->type == AMDGPU_RING_TYPE_COMPUTE ? 0 : 1;
|
||||
int usepfp = ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE ? 0 : 1;
|
||||
|
||||
if (ring->type == AMDGPU_RING_TYPE_COMPUTE) {
|
||||
if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
|
||||
switch (ring->me) {
|
||||
case 1:
|
||||
ref_and_mask = GPU_HDP_FLUSH_DONE__CP2_MASK << ring->pipe;
|
||||
|
@ -3222,7 +3222,7 @@ static int gfx_v7_0_cp_resume(struct amdgpu_device *adev)
|
|||
*/
|
||||
static void gfx_v7_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
|
||||
{
|
||||
int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
|
||||
int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
|
||||
uint32_t seq = ring->fence_drv.sync_seq;
|
||||
uint64_t addr = ring->fence_drv.gpu_addr;
|
||||
|
||||
|
@ -3262,7 +3262,7 @@ static void gfx_v7_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
|
|||
static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
|
||||
unsigned vm_id, uint64_t pd_addr)
|
||||
{
|
||||
int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
|
||||
int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
|
||||
|
||||
amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
|
||||
amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
|
||||
|
@ -3391,7 +3391,8 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
|
|||
if (adev->gfx.rlc.save_restore_obj == NULL) {
|
||||
r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true,
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
|
||||
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
|
||||
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
|
||||
NULL, NULL,
|
||||
&adev->gfx.rlc.save_restore_obj);
|
||||
if (r) {
|
||||
|
@ -3435,7 +3436,8 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
|
|||
if (adev->gfx.rlc.clear_state_obj == NULL) {
|
||||
r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true,
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
|
||||
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
|
||||
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
|
||||
NULL, NULL,
|
||||
&adev->gfx.rlc.clear_state_obj);
|
||||
if (r) {
|
||||
|
@ -3475,7 +3477,8 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
|
|||
if (adev->gfx.rlc.cp_table_obj == NULL) {
|
||||
r = amdgpu_bo_create(adev, adev->gfx.rlc.cp_table_size, PAGE_SIZE, true,
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
|
||||
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
|
||||
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
|
||||
NULL, NULL,
|
||||
&adev->gfx.rlc.cp_table_obj);
|
||||
if (r) {
|
||||
|
@ -4354,44 +4357,40 @@ static void gfx_v7_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
|
|||
amdgpu_ring_write(ring, (1 << (oa_size + oa_base)) - (1 << oa_base));
|
||||
}
|
||||
|
||||
static unsigned gfx_v7_0_ring_get_emit_ib_size_gfx(struct amdgpu_ring *ring)
|
||||
static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t address)
|
||||
{
|
||||
return
|
||||
4; /* gfx_v7_0_ring_emit_ib_gfx */
|
||||
WREG32(mmSQ_IND_INDEX, (wave & 0xF) | ((simd & 0x3) << 4) | (address << 16) | (1 << 13));
|
||||
return RREG32(mmSQ_IND_DATA);
|
||||
}
|
||||
|
||||
static unsigned gfx_v7_0_ring_get_dma_frame_size_gfx(struct amdgpu_ring *ring)
|
||||
static void gfx_v7_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields)
|
||||
{
|
||||
return
|
||||
20 + /* gfx_v7_0_ring_emit_gds_switch */
|
||||
7 + /* gfx_v7_0_ring_emit_hdp_flush */
|
||||
5 + /* gfx_v7_0_ring_emit_hdp_invalidate */
|
||||
12 + 12 + 12 + /* gfx_v7_0_ring_emit_fence_gfx x3 for user fence, vm fence */
|
||||
7 + 4 + /* gfx_v7_0_ring_emit_pipeline_sync */
|
||||
17 + 6 + /* gfx_v7_0_ring_emit_vm_flush */
|
||||
3; /* gfx_v7_ring_emit_cntxcntl */
|
||||
}
|
||||
|
||||
static unsigned gfx_v7_0_ring_get_emit_ib_size_compute(struct amdgpu_ring *ring)
|
||||
{
|
||||
return
|
||||
4; /* gfx_v7_0_ring_emit_ib_compute */
|
||||
}
|
||||
|
||||
static unsigned gfx_v7_0_ring_get_dma_frame_size_compute(struct amdgpu_ring *ring)
|
||||
{
|
||||
return
|
||||
20 + /* gfx_v7_0_ring_emit_gds_switch */
|
||||
7 + /* gfx_v7_0_ring_emit_hdp_flush */
|
||||
5 + /* gfx_v7_0_ring_emit_hdp_invalidate */
|
||||
7 + /* gfx_v7_0_ring_emit_pipeline_sync */
|
||||
17 + /* gfx_v7_0_ring_emit_vm_flush */
|
||||
7 + 7 + 7; /* gfx_v7_0_ring_emit_fence_compute x3 for user fence, vm fence */
|
||||
/* type 0 wave data */
|
||||
dst[(*no_fields)++] = 0;
|
||||
dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_STATUS);
|
||||
dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_LO);
|
||||
dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_HI);
|
||||
dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_LO);
|
||||
dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_HI);
|
||||
dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_HW_ID);
|
||||
dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW0);
|
||||
dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW1);
|
||||
dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_GPR_ALLOC);
|
||||
dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_LDS_ALLOC);
|
||||
dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TRAPSTS);
|
||||
dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_STS);
|
||||
dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TBA_LO);
|
||||
dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TBA_HI);
|
||||
dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TMA_LO);
|
||||
dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TMA_HI);
|
||||
dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_DBG0);
|
||||
dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_M0);
|
||||
}
|
||||
|
||||
static const struct amdgpu_gfx_funcs gfx_v7_0_gfx_funcs = {
|
||||
.get_gpu_clock_counter = &gfx_v7_0_get_gpu_clock_counter,
|
||||
.select_se_sh = &gfx_v7_0_select_se_sh,
|
||||
.read_wave_data = &gfx_v7_0_read_wave_data,
|
||||
};
|
||||
|
||||
static const struct amdgpu_rlc_funcs gfx_v7_0_rlc_funcs = {
|
||||
|
@ -4643,9 +4642,7 @@ static int gfx_v7_0_sw_init(void *handle)
|
|||
ring->ring_obj = NULL;
|
||||
sprintf(ring->name, "gfx");
|
||||
r = amdgpu_ring_init(adev, ring, 1024,
|
||||
PACKET3(PACKET3_NOP, 0x3FFF), 0xf,
|
||||
&adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP,
|
||||
AMDGPU_RING_TYPE_GFX);
|
||||
&adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
@ -4670,9 +4667,7 @@ static int gfx_v7_0_sw_init(void *handle)
|
|||
irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP + ring->pipe;
|
||||
/* type-2 packets are deprecated on MEC, use type-3 instead */
|
||||
r = amdgpu_ring_init(adev, ring, 1024,
|
||||
PACKET3(PACKET3_NOP, 0x3FFF), 0xf,
|
||||
&adev->gfx.eop_irq, irq_type,
|
||||
AMDGPU_RING_TYPE_COMPUTE);
|
||||
&adev->gfx.eop_irq, irq_type);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
@ -5123,7 +5118,7 @@ static int gfx_v7_0_set_powergating_state(void *handle,
|
|||
return 0;
|
||||
}
|
||||
|
||||
const struct amd_ip_funcs gfx_v7_0_ip_funcs = {
|
||||
static const struct amd_ip_funcs gfx_v7_0_ip_funcs = {
|
||||
.name = "gfx_v7_0",
|
||||
.early_init = gfx_v7_0_early_init,
|
||||
.late_init = gfx_v7_0_late_init,
|
||||
|
@ -5141,10 +5136,21 @@ const struct amd_ip_funcs gfx_v7_0_ip_funcs = {
|
|||
};
|
||||
|
||||
static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = {
|
||||
.type = AMDGPU_RING_TYPE_GFX,
|
||||
.align_mask = 0xff,
|
||||
.nop = PACKET3(PACKET3_NOP, 0x3FFF),
|
||||
.get_rptr = gfx_v7_0_ring_get_rptr,
|
||||
.get_wptr = gfx_v7_0_ring_get_wptr_gfx,
|
||||
.set_wptr = gfx_v7_0_ring_set_wptr_gfx,
|
||||
.parse_cs = NULL,
|
||||
.emit_frame_size =
|
||||
20 + /* gfx_v7_0_ring_emit_gds_switch */
|
||||
7 + /* gfx_v7_0_ring_emit_hdp_flush */
|
||||
5 + /* gfx_v7_0_ring_emit_hdp_invalidate */
|
||||
12 + 12 + 12 + /* gfx_v7_0_ring_emit_fence_gfx x3 for user fence, vm fence */
|
||||
7 + 4 + /* gfx_v7_0_ring_emit_pipeline_sync */
|
||||
17 + 6 + /* gfx_v7_0_ring_emit_vm_flush */
|
||||
3, /* gfx_v7_ring_emit_cntxcntl */
|
||||
.emit_ib_size = 4, /* gfx_v7_0_ring_emit_ib_gfx */
|
||||
.emit_ib = gfx_v7_0_ring_emit_ib_gfx,
|
||||
.emit_fence = gfx_v7_0_ring_emit_fence_gfx,
|
||||
.emit_pipeline_sync = gfx_v7_0_ring_emit_pipeline_sync,
|
||||
|
@ -5157,15 +5163,23 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = {
|
|||
.insert_nop = amdgpu_ring_insert_nop,
|
||||
.pad_ib = amdgpu_ring_generic_pad_ib,
|
||||
.emit_cntxcntl = gfx_v7_ring_emit_cntxcntl,
|
||||
.get_emit_ib_size = gfx_v7_0_ring_get_emit_ib_size_gfx,
|
||||
.get_dma_frame_size = gfx_v7_0_ring_get_dma_frame_size_gfx,
|
||||
};
|
||||
|
||||
static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = {
|
||||
.type = AMDGPU_RING_TYPE_COMPUTE,
|
||||
.align_mask = 0xff,
|
||||
.nop = PACKET3(PACKET3_NOP, 0x3FFF),
|
||||
.get_rptr = gfx_v7_0_ring_get_rptr,
|
||||
.get_wptr = gfx_v7_0_ring_get_wptr_compute,
|
||||
.set_wptr = gfx_v7_0_ring_set_wptr_compute,
|
||||
.parse_cs = NULL,
|
||||
.emit_frame_size =
|
||||
20 + /* gfx_v7_0_ring_emit_gds_switch */
|
||||
7 + /* gfx_v7_0_ring_emit_hdp_flush */
|
||||
5 + /* gfx_v7_0_ring_emit_hdp_invalidate */
|
||||
7 + /* gfx_v7_0_ring_emit_pipeline_sync */
|
||||
17 + /* gfx_v7_0_ring_emit_vm_flush */
|
||||
7 + 7 + 7, /* gfx_v7_0_ring_emit_fence_compute x3 for user fence, vm fence */
|
||||
.emit_ib_size = 4, /* gfx_v7_0_ring_emit_ib_compute */
|
||||
.emit_ib = gfx_v7_0_ring_emit_ib_compute,
|
||||
.emit_fence = gfx_v7_0_ring_emit_fence_compute,
|
||||
.emit_pipeline_sync = gfx_v7_0_ring_emit_pipeline_sync,
|
||||
|
@ -5177,8 +5191,6 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = {
|
|||
.test_ib = gfx_v7_0_ring_test_ib,
|
||||
.insert_nop = amdgpu_ring_insert_nop,
|
||||
.pad_ib = amdgpu_ring_generic_pad_ib,
|
||||
.get_emit_ib_size = gfx_v7_0_ring_get_emit_ib_size_compute,
|
||||
.get_dma_frame_size = gfx_v7_0_ring_get_dma_frame_size_compute,
|
||||
};
|
||||
|
||||
static void gfx_v7_0_set_ring_funcs(struct amdgpu_device *adev)
|
||||
|
@ -5289,3 +5301,39 @@ static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev)
|
|||
cu_info->number = active_cu_number;
|
||||
cu_info->ao_cu_mask = ao_cu_mask;
|
||||
}
|
||||
|
||||
const struct amdgpu_ip_block_version gfx_v7_0_ip_block =
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_GFX,
|
||||
.major = 7,
|
||||
.minor = 0,
|
||||
.rev = 0,
|
||||
.funcs = &gfx_v7_0_ip_funcs,
|
||||
};
|
||||
|
||||
const struct amdgpu_ip_block_version gfx_v7_1_ip_block =
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_GFX,
|
||||
.major = 7,
|
||||
.minor = 1,
|
||||
.rev = 0,
|
||||
.funcs = &gfx_v7_0_ip_funcs,
|
||||
};
|
||||
|
||||
const struct amdgpu_ip_block_version gfx_v7_2_ip_block =
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_GFX,
|
||||
.major = 7,
|
||||
.minor = 2,
|
||||
.rev = 0,
|
||||
.funcs = &gfx_v7_0_ip_funcs,
|
||||
};
|
||||
|
||||
const struct amdgpu_ip_block_version gfx_v7_3_ip_block =
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_GFX,
|
||||
.major = 7,
|
||||
.minor = 3,
|
||||
.rev = 0,
|
||||
.funcs = &gfx_v7_0_ip_funcs,
|
||||
};
|
||||
|
|
|
@ -24,6 +24,9 @@
|
|||
#ifndef __GFX_V7_0_H__
|
||||
#define __GFX_V7_0_H__
|
||||
|
||||
extern const struct amd_ip_funcs gfx_v7_0_ip_funcs;
|
||||
extern const struct amdgpu_ip_block_version gfx_v7_0_ip_block;
|
||||
extern const struct amdgpu_ip_block_version gfx_v7_1_ip_block;
|
||||
extern const struct amdgpu_ip_block_version gfx_v7_2_ip_block;
|
||||
extern const struct amdgpu_ip_block_version gfx_v7_3_ip_block;
|
||||
|
||||
#endif
|
||||
|
|
|
@ -1058,6 +1058,19 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
|
|||
adev->firmware.fw_size +=
|
||||
ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
|
||||
|
||||
/* we need account JT in */
|
||||
cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
|
||||
adev->firmware.fw_size +=
|
||||
ALIGN(le32_to_cpu(cp_hdr->jt_size) << 2, PAGE_SIZE);
|
||||
|
||||
if (amdgpu_sriov_vf(adev)) {
|
||||
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_STORAGE];
|
||||
info->ucode_id = AMDGPU_UCODE_ID_STORAGE;
|
||||
info->fw = adev->gfx.mec_fw;
|
||||
adev->firmware.fw_size +=
|
||||
ALIGN(le32_to_cpu(64 * PAGE_SIZE), PAGE_SIZE);
|
||||
}
|
||||
|
||||
if (adev->gfx.mec2_fw) {
|
||||
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2];
|
||||
info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
|
||||
|
@ -1127,34 +1140,8 @@ static void gfx_v8_0_get_csb_buffer(struct amdgpu_device *adev,
|
|||
buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 2));
|
||||
buffer[count++] = cpu_to_le32(mmPA_SC_RASTER_CONFIG -
|
||||
PACKET3_SET_CONTEXT_REG_START);
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_TONGA:
|
||||
case CHIP_POLARIS10:
|
||||
buffer[count++] = cpu_to_le32(0x16000012);
|
||||
buffer[count++] = cpu_to_le32(0x0000002A);
|
||||
break;
|
||||
case CHIP_POLARIS11:
|
||||
buffer[count++] = cpu_to_le32(0x16000012);
|
||||
buffer[count++] = cpu_to_le32(0x00000000);
|
||||
break;
|
||||
case CHIP_FIJI:
|
||||
buffer[count++] = cpu_to_le32(0x3a00161a);
|
||||
buffer[count++] = cpu_to_le32(0x0000002e);
|
||||
break;
|
||||
case CHIP_TOPAZ:
|
||||
case CHIP_CARRIZO:
|
||||
buffer[count++] = cpu_to_le32(0x00000002);
|
||||
buffer[count++] = cpu_to_le32(0x00000000);
|
||||
break;
|
||||
case CHIP_STONEY:
|
||||
buffer[count++] = cpu_to_le32(0x00000000);
|
||||
buffer[count++] = cpu_to_le32(0x00000000);
|
||||
break;
|
||||
default:
|
||||
buffer[count++] = cpu_to_le32(0x00000000);
|
||||
buffer[count++] = cpu_to_le32(0x00000000);
|
||||
break;
|
||||
}
|
||||
buffer[count++] = cpu_to_le32(adev->gfx.config.rb_config[0][0].raster_config);
|
||||
buffer[count++] = cpu_to_le32(adev->gfx.config.rb_config[0][0].raster_config_1);
|
||||
|
||||
buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
|
||||
buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
|
||||
|
@ -1273,7 +1260,8 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev)
|
|||
if (adev->gfx.rlc.clear_state_obj == NULL) {
|
||||
r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true,
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
|
||||
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
|
||||
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
|
||||
NULL, NULL,
|
||||
&adev->gfx.rlc.clear_state_obj);
|
||||
if (r) {
|
||||
|
@ -1315,7 +1303,8 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev)
|
|||
if (adev->gfx.rlc.cp_table_obj == NULL) {
|
||||
r = amdgpu_bo_create(adev, adev->gfx.rlc.cp_table_size, PAGE_SIZE, true,
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
|
||||
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
|
||||
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
|
||||
NULL, NULL,
|
||||
&adev->gfx.rlc.cp_table_obj);
|
||||
if (r) {
|
||||
|
@ -2045,10 +2034,8 @@ static int gfx_v8_0_sw_init(void *handle)
|
|||
ring->doorbell_index = AMDGPU_DOORBELL_GFX_RING0;
|
||||
}
|
||||
|
||||
r = amdgpu_ring_init(adev, ring, 1024,
|
||||
PACKET3(PACKET3_NOP, 0x3FFF), 0xf,
|
||||
&adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP,
|
||||
AMDGPU_RING_TYPE_GFX);
|
||||
r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq,
|
||||
AMDGPU_CP_IRQ_GFX_EOP);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
@ -2072,10 +2059,8 @@ static int gfx_v8_0_sw_init(void *handle)
|
|||
sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
|
||||
irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP + ring->pipe;
|
||||
/* type-2 packets are deprecated on MEC, use type-3 instead */
|
||||
r = amdgpu_ring_init(adev, ring, 1024,
|
||||
PACKET3(PACKET3_NOP, 0x3FFF), 0xf,
|
||||
&adev->gfx.eop_irq, irq_type,
|
||||
AMDGPU_RING_TYPE_COMPUTE);
|
||||
r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq,
|
||||
irq_type);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
@ -3679,6 +3664,21 @@ static void gfx_v8_0_setup_rb(struct amdgpu_device *adev)
|
|||
num_rb_pipes);
|
||||
}
|
||||
|
||||
/* cache the values for userspace */
|
||||
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
|
||||
for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
|
||||
gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff);
|
||||
adev->gfx.config.rb_config[i][j].rb_backend_disable =
|
||||
RREG32(mmCC_RB_BACKEND_DISABLE);
|
||||
adev->gfx.config.rb_config[i][j].user_rb_backend_disable =
|
||||
RREG32(mmGC_USER_RB_BACKEND_DISABLE);
|
||||
adev->gfx.config.rb_config[i][j].raster_config =
|
||||
RREG32(mmPA_SC_RASTER_CONFIG);
|
||||
adev->gfx.config.rb_config[i][j].raster_config_1 =
|
||||
RREG32(mmPA_SC_RASTER_CONFIG_1);
|
||||
}
|
||||
}
|
||||
gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
|
||||
mutex_unlock(&adev->grbm_idx_mutex);
|
||||
}
|
||||
|
||||
|
@ -4331,7 +4331,7 @@ static int gfx_v8_0_cp_gfx_resume(struct amdgpu_device *adev)
|
|||
struct amdgpu_ring *ring;
|
||||
u32 tmp;
|
||||
u32 rb_bufsz;
|
||||
u64 rb_addr, rptr_addr;
|
||||
u64 rb_addr, rptr_addr, wptr_gpu_addr;
|
||||
int r;
|
||||
|
||||
/* Set the write pointer delay */
|
||||
|
@ -4362,6 +4362,9 @@ static int gfx_v8_0_cp_gfx_resume(struct amdgpu_device *adev)
|
|||
WREG32(mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr));
|
||||
WREG32(mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 0xFF);
|
||||
|
||||
wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
|
||||
WREG32(mmCP_RB_WPTR_POLL_ADDR_LO, lower_32_bits(wptr_gpu_addr));
|
||||
WREG32(mmCP_RB_WPTR_POLL_ADDR_HI, upper_32_bits(wptr_gpu_addr));
|
||||
mdelay(1);
|
||||
WREG32(mmCP_RB0_CNTL, tmp);
|
||||
|
||||
|
@ -5438,9 +5441,41 @@ static void gfx_v8_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
|
|||
amdgpu_ring_write(ring, (1 << (oa_size + oa_base)) - (1 << oa_base));
|
||||
}
|
||||
|
||||
static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t address)
|
||||
{
|
||||
WREG32(mmSQ_IND_INDEX, (wave & 0xF) | ((simd & 0x3) << 4) | (address << 16) | (1 << 13));
|
||||
return RREG32(mmSQ_IND_DATA);
|
||||
}
|
||||
|
||||
static void gfx_v8_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields)
|
||||
{
|
||||
/* type 0 wave data */
|
||||
dst[(*no_fields)++] = 0;
|
||||
dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_STATUS);
|
||||
dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_LO);
|
||||
dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_HI);
|
||||
dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_LO);
|
||||
dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_HI);
|
||||
dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_HW_ID);
|
||||
dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW0);
|
||||
dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW1);
|
||||
dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_GPR_ALLOC);
|
||||
dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_LDS_ALLOC);
|
||||
dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TRAPSTS);
|
||||
dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_STS);
|
||||
dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TBA_LO);
|
||||
dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TBA_HI);
|
||||
dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TMA_LO);
|
||||
dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TMA_HI);
|
||||
dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_DBG0);
|
||||
dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_M0);
|
||||
}
|
||||
|
||||
|
||||
static const struct amdgpu_gfx_funcs gfx_v8_0_gfx_funcs = {
|
||||
.get_gpu_clock_counter = &gfx_v8_0_get_gpu_clock_counter,
|
||||
.select_se_sh = &gfx_v8_0_select_se_sh,
|
||||
.read_wave_data = &gfx_v8_0_read_wave_data,
|
||||
};
|
||||
|
||||
static int gfx_v8_0_early_init(void *handle)
|
||||
|
@ -6120,7 +6155,7 @@ static void gfx_v8_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
|
|||
{
|
||||
u32 ref_and_mask, reg_mem_engine;
|
||||
|
||||
if (ring->type == AMDGPU_RING_TYPE_COMPUTE) {
|
||||
if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
|
||||
switch (ring->me) {
|
||||
case 1:
|
||||
ref_and_mask = GPU_HDP_FLUSH_DONE__CP2_MASK << ring->pipe;
|
||||
|
@ -6222,7 +6257,7 @@ static void gfx_v8_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr,
|
|||
|
||||
static void gfx_v8_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
|
||||
{
|
||||
int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
|
||||
int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
|
||||
uint32_t seq = ring->fence_drv.sync_seq;
|
||||
uint64_t addr = ring->fence_drv.gpu_addr;
|
||||
|
||||
|
@ -6240,11 +6275,7 @@ static void gfx_v8_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
|
|||
static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
|
||||
unsigned vm_id, uint64_t pd_addr)
|
||||
{
|
||||
int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
|
||||
|
||||
/* GFX8 emits 128 dw nop to prevent DE do vm_flush before CE finish CEIB */
|
||||
if (usepfp)
|
||||
amdgpu_ring_insert_nop(ring, 128);
|
||||
int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
|
||||
|
||||
amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
|
||||
amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
|
||||
|
@ -6360,42 +6391,6 @@ static void gfx_v8_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
|
|||
amdgpu_ring_write(ring, 0);
|
||||
}
|
||||
|
||||
static unsigned gfx_v8_0_ring_get_emit_ib_size_gfx(struct amdgpu_ring *ring)
|
||||
{
|
||||
return
|
||||
4; /* gfx_v8_0_ring_emit_ib_gfx */
|
||||
}
|
||||
|
||||
static unsigned gfx_v8_0_ring_get_dma_frame_size_gfx(struct amdgpu_ring *ring)
|
||||
{
|
||||
return
|
||||
20 + /* gfx_v8_0_ring_emit_gds_switch */
|
||||
7 + /* gfx_v8_0_ring_emit_hdp_flush */
|
||||
5 + /* gfx_v8_0_ring_emit_hdp_invalidate */
|
||||
6 + 6 + 6 +/* gfx_v8_0_ring_emit_fence_gfx x3 for user fence, vm fence */
|
||||
7 + /* gfx_v8_0_ring_emit_pipeline_sync */
|
||||
256 + 19 + /* gfx_v8_0_ring_emit_vm_flush */
|
||||
2 + /* gfx_v8_ring_emit_sb */
|
||||
3; /* gfx_v8_ring_emit_cntxcntl */
|
||||
}
|
||||
|
||||
static unsigned gfx_v8_0_ring_get_emit_ib_size_compute(struct amdgpu_ring *ring)
|
||||
{
|
||||
return
|
||||
4; /* gfx_v8_0_ring_emit_ib_compute */
|
||||
}
|
||||
|
||||
static unsigned gfx_v8_0_ring_get_dma_frame_size_compute(struct amdgpu_ring *ring)
|
||||
{
|
||||
return
|
||||
20 + /* gfx_v8_0_ring_emit_gds_switch */
|
||||
7 + /* gfx_v8_0_ring_emit_hdp_flush */
|
||||
5 + /* gfx_v8_0_ring_emit_hdp_invalidate */
|
||||
7 + /* gfx_v8_0_ring_emit_pipeline_sync */
|
||||
17 + /* gfx_v8_0_ring_emit_vm_flush */
|
||||
7 + 7 + 7; /* gfx_v8_0_ring_emit_fence_compute x3 for user fence, vm fence */
|
||||
}
|
||||
|
||||
static void gfx_v8_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
|
||||
enum amdgpu_interrupt_state state)
|
||||
{
|
||||
|
@ -6541,7 +6536,7 @@ static int gfx_v8_0_priv_inst_irq(struct amdgpu_device *adev,
|
|||
return 0;
|
||||
}
|
||||
|
||||
const struct amd_ip_funcs gfx_v8_0_ip_funcs = {
|
||||
static const struct amd_ip_funcs gfx_v8_0_ip_funcs = {
|
||||
.name = "gfx_v8_0",
|
||||
.early_init = gfx_v8_0_early_init,
|
||||
.late_init = gfx_v8_0_late_init,
|
||||
|
@ -6562,10 +6557,22 @@ const struct amd_ip_funcs gfx_v8_0_ip_funcs = {
|
|||
};
|
||||
|
||||
static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
|
||||
.type = AMDGPU_RING_TYPE_GFX,
|
||||
.align_mask = 0xff,
|
||||
.nop = PACKET3(PACKET3_NOP, 0x3FFF),
|
||||
.get_rptr = gfx_v8_0_ring_get_rptr,
|
||||
.get_wptr = gfx_v8_0_ring_get_wptr_gfx,
|
||||
.set_wptr = gfx_v8_0_ring_set_wptr_gfx,
|
||||
.parse_cs = NULL,
|
||||
.emit_frame_size =
|
||||
20 + /* gfx_v8_0_ring_emit_gds_switch */
|
||||
7 + /* gfx_v8_0_ring_emit_hdp_flush */
|
||||
5 + /* gfx_v8_0_ring_emit_hdp_invalidate */
|
||||
6 + 6 + 6 +/* gfx_v8_0_ring_emit_fence_gfx x3 for user fence, vm fence */
|
||||
7 + /* gfx_v8_0_ring_emit_pipeline_sync */
|
||||
128 + 19 + /* gfx_v8_0_ring_emit_vm_flush */
|
||||
2 + /* gfx_v8_ring_emit_sb */
|
||||
3, /* gfx_v8_ring_emit_cntxcntl */
|
||||
.emit_ib_size = 4, /* gfx_v8_0_ring_emit_ib_gfx */
|
||||
.emit_ib = gfx_v8_0_ring_emit_ib_gfx,
|
||||
.emit_fence = gfx_v8_0_ring_emit_fence_gfx,
|
||||
.emit_pipeline_sync = gfx_v8_0_ring_emit_pipeline_sync,
|
||||
|
@ -6579,15 +6586,23 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
|
|||
.pad_ib = amdgpu_ring_generic_pad_ib,
|
||||
.emit_switch_buffer = gfx_v8_ring_emit_sb,
|
||||
.emit_cntxcntl = gfx_v8_ring_emit_cntxcntl,
|
||||
.get_emit_ib_size = gfx_v8_0_ring_get_emit_ib_size_gfx,
|
||||
.get_dma_frame_size = gfx_v8_0_ring_get_dma_frame_size_gfx,
|
||||
};
|
||||
|
||||
static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
|
||||
.type = AMDGPU_RING_TYPE_COMPUTE,
|
||||
.align_mask = 0xff,
|
||||
.nop = PACKET3(PACKET3_NOP, 0x3FFF),
|
||||
.get_rptr = gfx_v8_0_ring_get_rptr,
|
||||
.get_wptr = gfx_v8_0_ring_get_wptr_compute,
|
||||
.set_wptr = gfx_v8_0_ring_set_wptr_compute,
|
||||
.parse_cs = NULL,
|
||||
.emit_frame_size =
|
||||
20 + /* gfx_v8_0_ring_emit_gds_switch */
|
||||
7 + /* gfx_v8_0_ring_emit_hdp_flush */
|
||||
5 + /* gfx_v8_0_ring_emit_hdp_invalidate */
|
||||
7 + /* gfx_v8_0_ring_emit_pipeline_sync */
|
||||
17 + /* gfx_v8_0_ring_emit_vm_flush */
|
||||
7 + 7 + 7, /* gfx_v8_0_ring_emit_fence_compute x3 for user fence, vm fence */
|
||||
.emit_ib_size = 4, /* gfx_v8_0_ring_emit_ib_compute */
|
||||
.emit_ib = gfx_v8_0_ring_emit_ib_compute,
|
||||
.emit_fence = gfx_v8_0_ring_emit_fence_compute,
|
||||
.emit_pipeline_sync = gfx_v8_0_ring_emit_pipeline_sync,
|
||||
|
@ -6599,8 +6614,6 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
|
|||
.test_ib = gfx_v8_0_ring_test_ib,
|
||||
.insert_nop = amdgpu_ring_insert_nop,
|
||||
.pad_ib = amdgpu_ring_generic_pad_ib,
|
||||
.get_emit_ib_size = gfx_v8_0_ring_get_emit_ib_size_compute,
|
||||
.get_dma_frame_size = gfx_v8_0_ring_get_dma_frame_size_compute,
|
||||
};
|
||||
|
||||
static void gfx_v8_0_set_ring_funcs(struct amdgpu_device *adev)
|
||||
|
@ -6753,3 +6766,21 @@ static void gfx_v8_0_get_cu_info(struct amdgpu_device *adev)
|
|||
cu_info->number = active_cu_number;
|
||||
cu_info->ao_cu_mask = ao_cu_mask;
|
||||
}
|
||||
|
||||
const struct amdgpu_ip_block_version gfx_v8_0_ip_block =
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_GFX,
|
||||
.major = 8,
|
||||
.minor = 0,
|
||||
.rev = 0,
|
||||
.funcs = &gfx_v8_0_ip_funcs,
|
||||
};
|
||||
|
||||
const struct amdgpu_ip_block_version gfx_v8_1_ip_block =
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_GFX,
|
||||
.major = 8,
|
||||
.minor = 1,
|
||||
.rev = 0,
|
||||
.funcs = &gfx_v8_0_ip_funcs,
|
||||
};
|
||||
|
|
|
@ -24,6 +24,7 @@
|
|||
#ifndef __GFX_V8_0_H__
|
||||
#define __GFX_V8_0_H__
|
||||
|
||||
extern const struct amd_ip_funcs gfx_v8_0_ip_funcs;
|
||||
extern const struct amdgpu_ip_block_version gfx_v8_0_ip_block;
|
||||
extern const struct amdgpu_ip_block_version gfx_v8_1_ip_block;
|
||||
|
||||
#endif
|
||||
|
|
|
@ -1030,7 +1030,7 @@ static int gmc_v6_0_set_powergating_state(void *handle,
|
|||
return 0;
|
||||
}
|
||||
|
||||
const struct amd_ip_funcs gmc_v6_0_ip_funcs = {
|
||||
static const struct amd_ip_funcs gmc_v6_0_ip_funcs = {
|
||||
.name = "gmc_v6_0",
|
||||
.early_init = gmc_v6_0_early_init,
|
||||
.late_init = gmc_v6_0_late_init,
|
||||
|
@ -1069,3 +1069,11 @@ static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev)
|
|||
adev->mc.vm_fault.funcs = &gmc_v6_0_irq_funcs;
|
||||
}
|
||||
|
||||
const struct amdgpu_ip_block_version gmc_v6_0_ip_block =
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_GMC,
|
||||
.major = 6,
|
||||
.minor = 0,
|
||||
.rev = 0,
|
||||
.funcs = &gmc_v6_0_ip_funcs,
|
||||
};
|
||||
|
|
|
@ -24,6 +24,6 @@
|
|||
#ifndef __GMC_V6_0_H__
|
||||
#define __GMC_V6_0_H__
|
||||
|
||||
extern const struct amd_ip_funcs gmc_v6_0_ip_funcs;
|
||||
extern const struct amdgpu_ip_block_version gmc_v6_0_ip_block;
|
||||
|
||||
#endif
|
||||
|
|
|
@ -1235,7 +1235,7 @@ static int gmc_v7_0_set_powergating_state(void *handle,
|
|||
return 0;
|
||||
}
|
||||
|
||||
const struct amd_ip_funcs gmc_v7_0_ip_funcs = {
|
||||
static const struct amd_ip_funcs gmc_v7_0_ip_funcs = {
|
||||
.name = "gmc_v7_0",
|
||||
.early_init = gmc_v7_0_early_init,
|
||||
.late_init = gmc_v7_0_late_init,
|
||||
|
@ -1273,3 +1273,21 @@ static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev)
|
|||
adev->mc.vm_fault.num_types = 1;
|
||||
adev->mc.vm_fault.funcs = &gmc_v7_0_irq_funcs;
|
||||
}
|
||||
|
||||
const struct amdgpu_ip_block_version gmc_v7_0_ip_block =
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_GMC,
|
||||
.major = 7,
|
||||
.minor = 0,
|
||||
.rev = 0,
|
||||
.funcs = &gmc_v7_0_ip_funcs,
|
||||
};
|
||||
|
||||
const struct amdgpu_ip_block_version gmc_v7_4_ip_block =
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_GMC,
|
||||
.major = 7,
|
||||
.minor = 4,
|
||||
.rev = 0,
|
||||
.funcs = &gmc_v7_0_ip_funcs,
|
||||
};
|
||||
|
|
|
@ -24,6 +24,7 @@
|
|||
#ifndef __GMC_V7_0_H__
|
||||
#define __GMC_V7_0_H__
|
||||
|
||||
extern const struct amd_ip_funcs gmc_v7_0_ip_funcs;
|
||||
extern const struct amdgpu_ip_block_version gmc_v7_0_ip_block;
|
||||
extern const struct amdgpu_ip_block_version gmc_v7_4_ip_block;
|
||||
|
||||
#endif
|
||||
|
|
|
@ -1436,7 +1436,7 @@ static int gmc_v8_0_set_powergating_state(void *handle,
|
|||
return 0;
|
||||
}
|
||||
|
||||
const struct amd_ip_funcs gmc_v8_0_ip_funcs = {
|
||||
static const struct amd_ip_funcs gmc_v8_0_ip_funcs = {
|
||||
.name = "gmc_v8_0",
|
||||
.early_init = gmc_v8_0_early_init,
|
||||
.late_init = gmc_v8_0_late_init,
|
||||
|
@ -1477,3 +1477,30 @@ static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev)
|
|||
adev->mc.vm_fault.num_types = 1;
|
||||
adev->mc.vm_fault.funcs = &gmc_v8_0_irq_funcs;
|
||||
}
|
||||
|
||||
const struct amdgpu_ip_block_version gmc_v8_0_ip_block =
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_GMC,
|
||||
.major = 8,
|
||||
.minor = 0,
|
||||
.rev = 0,
|
||||
.funcs = &gmc_v8_0_ip_funcs,
|
||||
};
|
||||
|
||||
const struct amdgpu_ip_block_version gmc_v8_1_ip_block =
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_GMC,
|
||||
.major = 8,
|
||||
.minor = 1,
|
||||
.rev = 0,
|
||||
.funcs = &gmc_v8_0_ip_funcs,
|
||||
};
|
||||
|
||||
const struct amdgpu_ip_block_version gmc_v8_5_ip_block =
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_GMC,
|
||||
.major = 8,
|
||||
.minor = 5,
|
||||
.rev = 0,
|
||||
.funcs = &gmc_v8_0_ip_funcs,
|
||||
};
|
||||
|
|
|
@ -24,6 +24,8 @@
|
|||
#ifndef __GMC_V8_0_H__
|
||||
#define __GMC_V8_0_H__
|
||||
|
||||
extern const struct amd_ip_funcs gmc_v8_0_ip_funcs;
|
||||
extern const struct amdgpu_ip_block_version gmc_v8_0_ip_block;
|
||||
extern const struct amdgpu_ip_block_version gmc_v8_1_ip_block;
|
||||
extern const struct amdgpu_ip_block_version gmc_v8_5_ip_block;
|
||||
|
||||
#endif
|
||||
|
|
|
@ -392,7 +392,7 @@ static int iceland_ih_set_powergating_state(void *handle,
|
|||
return 0;
|
||||
}
|
||||
|
||||
const struct amd_ip_funcs iceland_ih_ip_funcs = {
|
||||
static const struct amd_ip_funcs iceland_ih_ip_funcs = {
|
||||
.name = "iceland_ih",
|
||||
.early_init = iceland_ih_early_init,
|
||||
.late_init = NULL,
|
||||
|
@ -421,3 +421,11 @@ static void iceland_ih_set_interrupt_funcs(struct amdgpu_device *adev)
|
|||
adev->irq.ih_funcs = &iceland_ih_funcs;
|
||||
}
|
||||
|
||||
const struct amdgpu_ip_block_version iceland_ih_ip_block =
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_IH,
|
||||
.major = 2,
|
||||
.minor = 4,
|
||||
.rev = 0,
|
||||
.funcs = &iceland_ih_ip_funcs,
|
||||
};
|
||||
|
|
|
@ -24,6 +24,6 @@
|
|||
#ifndef __ICELAND_IH_H__
|
||||
#define __ICELAND_IH_H__
|
||||
|
||||
extern const struct amd_ip_funcs iceland_ih_ip_funcs;
|
||||
extern const struct amdgpu_ip_block_version iceland_ih_ip_block;
|
||||
|
||||
#endif /* __ICELAND_IH_H__ */
|
||||
|
|
|
@ -2796,7 +2796,7 @@ static int kv_parse_power_table(struct amdgpu_device *adev)
|
|||
adev->pm.dpm.num_ps = state_array->ucNumEntries;
|
||||
|
||||
/* fill in the vce power states */
|
||||
for (i = 0; i < AMDGPU_MAX_VCE_LEVELS; i++) {
|
||||
for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) {
|
||||
u32 sclk;
|
||||
clock_array_index = adev->pm.dpm.vce_states[i].clk_idx;
|
||||
clock_info = (union pplib_clock_info *)
|
||||
|
@ -3243,6 +3243,18 @@ static int kv_dpm_set_powergating_state(void *handle,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int kv_check_state_equal(struct amdgpu_device *adev,
|
||||
struct amdgpu_ps *cps,
|
||||
struct amdgpu_ps *rps,
|
||||
bool *equal)
|
||||
{
|
||||
if (equal == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
*equal = false;
|
||||
return 0;
|
||||
}
|
||||
|
||||
const struct amd_ip_funcs kv_dpm_ip_funcs = {
|
||||
.name = "kv_dpm",
|
||||
.early_init = kv_dpm_early_init,
|
||||
|
@ -3273,6 +3285,8 @@ static const struct amdgpu_dpm_funcs kv_dpm_funcs = {
|
|||
.force_performance_level = &kv_dpm_force_performance_level,
|
||||
.powergate_uvd = &kv_dpm_powergate_uvd,
|
||||
.enable_bapm = &kv_dpm_enable_bapm,
|
||||
.get_vce_clock_state = amdgpu_get_vce_clock_state,
|
||||
.check_state_equal = kv_check_state_equal,
|
||||
};
|
||||
|
||||
static void kv_dpm_set_dpm_funcs(struct amdgpu_device *adev)
|
||||
|
@ -3291,3 +3305,12 @@ static void kv_dpm_set_irq_funcs(struct amdgpu_device *adev)
|
|||
adev->pm.dpm.thermal.irq.num_types = AMDGPU_THERMAL_IRQ_LAST;
|
||||
adev->pm.dpm.thermal.irq.funcs = &kv_dpm_irq_funcs;
|
||||
}
|
||||
|
||||
const struct amdgpu_ip_block_version kv_dpm_ip_block =
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_SMC,
|
||||
.major = 7,
|
||||
.minor = 0,
|
||||
.rev = 0,
|
||||
.funcs = &kv_dpm_ip_funcs,
|
||||
};
|
||||
|
|
|
@ -232,10 +232,10 @@ static void sdma_v2_4_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
|
|||
|
||||
for (i = 0; i < count; i++)
|
||||
if (sdma && sdma->burst_nop && (i == 0))
|
||||
amdgpu_ring_write(ring, ring->nop |
|
||||
amdgpu_ring_write(ring, ring->funcs->nop |
|
||||
SDMA_PKT_NOP_HEADER_COUNT(count - 1));
|
||||
else
|
||||
amdgpu_ring_write(ring, ring->nop);
|
||||
amdgpu_ring_write(ring, ring->funcs->nop);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -902,22 +902,6 @@ static void sdma_v2_4_ring_emit_vm_flush(struct amdgpu_ring *ring,
|
|||
SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
|
||||
}
|
||||
|
||||
static unsigned sdma_v2_4_ring_get_emit_ib_size(struct amdgpu_ring *ring)
|
||||
{
|
||||
return
|
||||
7 + 6; /* sdma_v2_4_ring_emit_ib */
|
||||
}
|
||||
|
||||
static unsigned sdma_v2_4_ring_get_dma_frame_size(struct amdgpu_ring *ring)
|
||||
{
|
||||
return
|
||||
6 + /* sdma_v2_4_ring_emit_hdp_flush */
|
||||
3 + /* sdma_v2_4_ring_emit_hdp_invalidate */
|
||||
6 + /* sdma_v2_4_ring_emit_pipeline_sync */
|
||||
12 + /* sdma_v2_4_ring_emit_vm_flush */
|
||||
10 + 10 + 10; /* sdma_v2_4_ring_emit_fence x3 for user fence, vm fence */
|
||||
}
|
||||
|
||||
static int sdma_v2_4_early_init(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
@ -965,11 +949,10 @@ static int sdma_v2_4_sw_init(void *handle)
|
|||
ring->use_doorbell = false;
|
||||
sprintf(ring->name, "sdma%d", i);
|
||||
r = amdgpu_ring_init(adev, ring, 1024,
|
||||
SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), 0xf,
|
||||
&adev->sdma.trap_irq,
|
||||
(i == 0) ?
|
||||
AMDGPU_SDMA_IRQ_TRAP0 : AMDGPU_SDMA_IRQ_TRAP1,
|
||||
AMDGPU_RING_TYPE_SDMA);
|
||||
AMDGPU_SDMA_IRQ_TRAP0 :
|
||||
AMDGPU_SDMA_IRQ_TRAP1);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
@ -1204,7 +1187,7 @@ static int sdma_v2_4_set_powergating_state(void *handle,
|
|||
return 0;
|
||||
}
|
||||
|
||||
const struct amd_ip_funcs sdma_v2_4_ip_funcs = {
|
||||
static const struct amd_ip_funcs sdma_v2_4_ip_funcs = {
|
||||
.name = "sdma_v2_4",
|
||||
.early_init = sdma_v2_4_early_init,
|
||||
.late_init = NULL,
|
||||
|
@ -1222,10 +1205,19 @@ const struct amd_ip_funcs sdma_v2_4_ip_funcs = {
|
|||
};
|
||||
|
||||
static const struct amdgpu_ring_funcs sdma_v2_4_ring_funcs = {
|
||||
.type = AMDGPU_RING_TYPE_SDMA,
|
||||
.align_mask = 0xf,
|
||||
.nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
|
||||
.get_rptr = sdma_v2_4_ring_get_rptr,
|
||||
.get_wptr = sdma_v2_4_ring_get_wptr,
|
||||
.set_wptr = sdma_v2_4_ring_set_wptr,
|
||||
.parse_cs = NULL,
|
||||
.emit_frame_size =
|
||||
6 + /* sdma_v2_4_ring_emit_hdp_flush */
|
||||
3 + /* sdma_v2_4_ring_emit_hdp_invalidate */
|
||||
6 + /* sdma_v2_4_ring_emit_pipeline_sync */
|
||||
12 + /* sdma_v2_4_ring_emit_vm_flush */
|
||||
10 + 10 + 10, /* sdma_v2_4_ring_emit_fence x3 for user fence, vm fence */
|
||||
.emit_ib_size = 7 + 6, /* sdma_v2_4_ring_emit_ib */
|
||||
.emit_ib = sdma_v2_4_ring_emit_ib,
|
||||
.emit_fence = sdma_v2_4_ring_emit_fence,
|
||||
.emit_pipeline_sync = sdma_v2_4_ring_emit_pipeline_sync,
|
||||
|
@ -1236,8 +1228,6 @@ static const struct amdgpu_ring_funcs sdma_v2_4_ring_funcs = {
|
|||
.test_ib = sdma_v2_4_ring_test_ib,
|
||||
.insert_nop = sdma_v2_4_ring_insert_nop,
|
||||
.pad_ib = sdma_v2_4_ring_pad_ib,
|
||||
.get_emit_ib_size = sdma_v2_4_ring_get_emit_ib_size,
|
||||
.get_dma_frame_size = sdma_v2_4_ring_get_dma_frame_size,
|
||||
};
|
||||
|
||||
static void sdma_v2_4_set_ring_funcs(struct amdgpu_device *adev)
|
||||
|
@ -1350,3 +1340,12 @@ static void sdma_v2_4_set_vm_pte_funcs(struct amdgpu_device *adev)
|
|||
adev->vm_manager.vm_pte_num_rings = adev->sdma.num_instances;
|
||||
}
|
||||
}
|
||||
|
||||
const struct amdgpu_ip_block_version sdma_v2_4_ip_block =
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_SDMA,
|
||||
.major = 2,
|
||||
.minor = 4,
|
||||
.rev = 0,
|
||||
.funcs = &sdma_v2_4_ip_funcs,
|
||||
};
|
||||
|
|
|
@ -24,6 +24,6 @@
|
|||
#ifndef __SDMA_V2_4_H__
|
||||
#define __SDMA_V2_4_H__
|
||||
|
||||
extern const struct amd_ip_funcs sdma_v2_4_ip_funcs;
|
||||
extern const struct amdgpu_ip_block_version sdma_v2_4_ip_block;
|
||||
|
||||
#endif
|
||||
|
|
|
@ -392,10 +392,10 @@ static void sdma_v3_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
|
|||
|
||||
for (i = 0; i < count; i++)
|
||||
if (sdma && sdma->burst_nop && (i == 0))
|
||||
amdgpu_ring_write(ring, ring->nop |
|
||||
amdgpu_ring_write(ring, ring->funcs->nop |
|
||||
SDMA_PKT_NOP_HEADER_COUNT(count - 1));
|
||||
else
|
||||
amdgpu_ring_write(ring, ring->nop);
|
||||
amdgpu_ring_write(ring, ring->funcs->nop);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1104,22 +1104,6 @@ static void sdma_v3_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
|
|||
SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
|
||||
}
|
||||
|
||||
static unsigned sdma_v3_0_ring_get_emit_ib_size(struct amdgpu_ring *ring)
|
||||
{
|
||||
return
|
||||
7 + 6; /* sdma_v3_0_ring_emit_ib */
|
||||
}
|
||||
|
||||
static unsigned sdma_v3_0_ring_get_dma_frame_size(struct amdgpu_ring *ring)
|
||||
{
|
||||
return
|
||||
6 + /* sdma_v3_0_ring_emit_hdp_flush */
|
||||
3 + /* sdma_v3_0_ring_emit_hdp_invalidate */
|
||||
6 + /* sdma_v3_0_ring_emit_pipeline_sync */
|
||||
12 + /* sdma_v3_0_ring_emit_vm_flush */
|
||||
10 + 10 + 10; /* sdma_v3_0_ring_emit_fence x3 for user fence, vm fence */
|
||||
}
|
||||
|
||||
static int sdma_v3_0_early_init(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
@ -1177,11 +1161,10 @@ static int sdma_v3_0_sw_init(void *handle)
|
|||
|
||||
sprintf(ring->name, "sdma%d", i);
|
||||
r = amdgpu_ring_init(adev, ring, 1024,
|
||||
SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), 0xf,
|
||||
&adev->sdma.trap_irq,
|
||||
(i == 0) ?
|
||||
AMDGPU_SDMA_IRQ_TRAP0 : AMDGPU_SDMA_IRQ_TRAP1,
|
||||
AMDGPU_RING_TYPE_SDMA);
|
||||
AMDGPU_SDMA_IRQ_TRAP0 :
|
||||
AMDGPU_SDMA_IRQ_TRAP1);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
@ -1544,7 +1527,7 @@ static int sdma_v3_0_set_powergating_state(void *handle,
|
|||
return 0;
|
||||
}
|
||||
|
||||
const struct amd_ip_funcs sdma_v3_0_ip_funcs = {
|
||||
static const struct amd_ip_funcs sdma_v3_0_ip_funcs = {
|
||||
.name = "sdma_v3_0",
|
||||
.early_init = sdma_v3_0_early_init,
|
||||
.late_init = NULL,
|
||||
|
@ -1565,10 +1548,19 @@ const struct amd_ip_funcs sdma_v3_0_ip_funcs = {
|
|||
};
|
||||
|
||||
static const struct amdgpu_ring_funcs sdma_v3_0_ring_funcs = {
|
||||
.type = AMDGPU_RING_TYPE_SDMA,
|
||||
.align_mask = 0xf,
|
||||
.nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
|
||||
.get_rptr = sdma_v3_0_ring_get_rptr,
|
||||
.get_wptr = sdma_v3_0_ring_get_wptr,
|
||||
.set_wptr = sdma_v3_0_ring_set_wptr,
|
||||
.parse_cs = NULL,
|
||||
.emit_frame_size =
|
||||
6 + /* sdma_v3_0_ring_emit_hdp_flush */
|
||||
3 + /* sdma_v3_0_ring_emit_hdp_invalidate */
|
||||
6 + /* sdma_v3_0_ring_emit_pipeline_sync */
|
||||
12 + /* sdma_v3_0_ring_emit_vm_flush */
|
||||
10 + 10 + 10, /* sdma_v3_0_ring_emit_fence x3 for user fence, vm fence */
|
||||
.emit_ib_size = 7 + 6, /* sdma_v3_0_ring_emit_ib */
|
||||
.emit_ib = sdma_v3_0_ring_emit_ib,
|
||||
.emit_fence = sdma_v3_0_ring_emit_fence,
|
||||
.emit_pipeline_sync = sdma_v3_0_ring_emit_pipeline_sync,
|
||||
|
@ -1579,8 +1571,6 @@ static const struct amdgpu_ring_funcs sdma_v3_0_ring_funcs = {
|
|||
.test_ib = sdma_v3_0_ring_test_ib,
|
||||
.insert_nop = sdma_v3_0_ring_insert_nop,
|
||||
.pad_ib = sdma_v3_0_ring_pad_ib,
|
||||
.get_emit_ib_size = sdma_v3_0_ring_get_emit_ib_size,
|
||||
.get_dma_frame_size = sdma_v3_0_ring_get_dma_frame_size,
|
||||
};
|
||||
|
||||
static void sdma_v3_0_set_ring_funcs(struct amdgpu_device *adev)
|
||||
|
@ -1693,3 +1683,21 @@ static void sdma_v3_0_set_vm_pte_funcs(struct amdgpu_device *adev)
|
|||
adev->vm_manager.vm_pte_num_rings = adev->sdma.num_instances;
|
||||
}
|
||||
}
|
||||
|
||||
const struct amdgpu_ip_block_version sdma_v3_0_ip_block =
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_SDMA,
|
||||
.major = 3,
|
||||
.minor = 0,
|
||||
.rev = 0,
|
||||
.funcs = &sdma_v3_0_ip_funcs,
|
||||
};
|
||||
|
||||
const struct amdgpu_ip_block_version sdma_v3_1_ip_block =
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_SDMA,
|
||||
.major = 3,
|
||||
.minor = 1,
|
||||
.rev = 0,
|
||||
.funcs = &sdma_v3_0_ip_funcs,
|
||||
};
|
||||
|
|
|
@ -24,6 +24,7 @@
|
|||
#ifndef __SDMA_V3_0_H__
|
||||
#define __SDMA_V3_0_H__
|
||||
|
||||
extern const struct amd_ip_funcs sdma_v3_0_ip_funcs;
|
||||
extern const struct amdgpu_ip_block_version sdma_v3_0_ip_block;
|
||||
extern const struct amdgpu_ip_block_version sdma_v3_1_ip_block;
|
||||
|
||||
#endif
|
||||
|
|
|
@ -39,6 +39,7 @@
|
|||
#include "si_dma.h"
|
||||
#include "dce_v6_0.h"
|
||||
#include "si.h"
|
||||
#include "dce_virtual.h"
|
||||
|
||||
static const u32 tahiti_golden_registers[] =
|
||||
{
|
||||
|
@ -905,7 +906,7 @@ static void si_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
|
|||
spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
|
||||
}
|
||||
|
||||
u32 si_pciep_rreg(struct amdgpu_device *adev, u32 reg)
|
||||
static u32 si_pciep_rreg(struct amdgpu_device *adev, u32 reg)
|
||||
{
|
||||
unsigned long flags;
|
||||
u32 r;
|
||||
|
@ -918,7 +919,7 @@ u32 si_pciep_rreg(struct amdgpu_device *adev, u32 reg)
|
|||
return r;
|
||||
}
|
||||
|
||||
void si_pciep_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
|
||||
static void si_pciep_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
|
@ -1811,7 +1812,7 @@ static int si_common_set_powergating_state(void *handle,
|
|||
return 0;
|
||||
}
|
||||
|
||||
const struct amd_ip_funcs si_common_ip_funcs = {
|
||||
static const struct amd_ip_funcs si_common_ip_funcs = {
|
||||
.name = "si_common",
|
||||
.early_init = si_common_early_init,
|
||||
.late_init = NULL,
|
||||
|
@ -1828,119 +1829,13 @@ const struct amd_ip_funcs si_common_ip_funcs = {
|
|||
.set_powergating_state = si_common_set_powergating_state,
|
||||
};
|
||||
|
||||
static const struct amdgpu_ip_block_version verde_ip_blocks[] =
|
||||
{
|
||||
static const struct amdgpu_ip_block_version si_common_ip_block =
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_COMMON,
|
||||
.major = 1,
|
||||
.minor = 0,
|
||||
.rev = 0,
|
||||
.funcs = &si_common_ip_funcs,
|
||||
},
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_GMC,
|
||||
.major = 6,
|
||||
.minor = 0,
|
||||
.rev = 0,
|
||||
.funcs = &gmc_v6_0_ip_funcs,
|
||||
},
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_IH,
|
||||
.major = 1,
|
||||
.minor = 0,
|
||||
.rev = 0,
|
||||
.funcs = &si_ih_ip_funcs,
|
||||
},
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_SMC,
|
||||
.major = 6,
|
||||
.minor = 0,
|
||||
.rev = 0,
|
||||
.funcs = &amdgpu_pp_ip_funcs,
|
||||
},
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_DCE,
|
||||
.major = 6,
|
||||
.minor = 0,
|
||||
.rev = 0,
|
||||
.funcs = &dce_v6_0_ip_funcs,
|
||||
},
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_GFX,
|
||||
.major = 6,
|
||||
.minor = 0,
|
||||
.rev = 0,
|
||||
.funcs = &gfx_v6_0_ip_funcs,
|
||||
},
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_SDMA,
|
||||
.major = 1,
|
||||
.minor = 0,
|
||||
.rev = 0,
|
||||
.funcs = &si_dma_ip_funcs,
|
||||
},
|
||||
/* {
|
||||
.type = AMD_IP_BLOCK_TYPE_UVD,
|
||||
.major = 3,
|
||||
.minor = 1,
|
||||
.rev = 0,
|
||||
.funcs = &si_null_ip_funcs,
|
||||
},
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_VCE,
|
||||
.major = 1,
|
||||
.minor = 0,
|
||||
.rev = 0,
|
||||
.funcs = &si_null_ip_funcs,
|
||||
},
|
||||
*/
|
||||
};
|
||||
|
||||
|
||||
static const struct amdgpu_ip_block_version hainan_ip_blocks[] =
|
||||
{
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_COMMON,
|
||||
.major = 1,
|
||||
.minor = 0,
|
||||
.rev = 0,
|
||||
.funcs = &si_common_ip_funcs,
|
||||
},
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_GMC,
|
||||
.major = 6,
|
||||
.minor = 0,
|
||||
.rev = 0,
|
||||
.funcs = &gmc_v6_0_ip_funcs,
|
||||
},
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_IH,
|
||||
.major = 1,
|
||||
.minor = 0,
|
||||
.rev = 0,
|
||||
.funcs = &si_ih_ip_funcs,
|
||||
},
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_SMC,
|
||||
.major = 6,
|
||||
.minor = 0,
|
||||
.rev = 0,
|
||||
.funcs = &amdgpu_pp_ip_funcs,
|
||||
},
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_GFX,
|
||||
.major = 6,
|
||||
.minor = 0,
|
||||
.rev = 0,
|
||||
.funcs = &gfx_v6_0_ip_funcs,
|
||||
},
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_SDMA,
|
||||
.major = 1,
|
||||
.minor = 0,
|
||||
.rev = 0,
|
||||
.funcs = &si_dma_ip_funcs,
|
||||
},
|
||||
};
|
||||
|
||||
int si_set_ip_blocks(struct amdgpu_device *adev)
|
||||
|
@ -1949,13 +1844,42 @@ int si_set_ip_blocks(struct amdgpu_device *adev)
|
|||
case CHIP_VERDE:
|
||||
case CHIP_TAHITI:
|
||||
case CHIP_PITCAIRN:
|
||||
amdgpu_ip_block_add(adev, &si_common_ip_block);
|
||||
amdgpu_ip_block_add(adev, &gmc_v6_0_ip_block);
|
||||
amdgpu_ip_block_add(adev, &si_ih_ip_block);
|
||||
amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
|
||||
if (adev->enable_virtual_display)
|
||||
amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
|
||||
else
|
||||
amdgpu_ip_block_add(adev, &dce_v6_0_ip_block);
|
||||
amdgpu_ip_block_add(adev, &gfx_v6_0_ip_block);
|
||||
amdgpu_ip_block_add(adev, &si_dma_ip_block);
|
||||
/* amdgpu_ip_block_add(adev, &uvd_v3_1_ip_block); */
|
||||
/* amdgpu_ip_block_add(adev, &vce_v1_0_ip_block); */
|
||||
break;
|
||||
case CHIP_OLAND:
|
||||
adev->ip_blocks = verde_ip_blocks;
|
||||
adev->num_ip_blocks = ARRAY_SIZE(verde_ip_blocks);
|
||||
amdgpu_ip_block_add(adev, &si_common_ip_block);
|
||||
amdgpu_ip_block_add(adev, &gmc_v6_0_ip_block);
|
||||
amdgpu_ip_block_add(adev, &si_ih_ip_block);
|
||||
amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
|
||||
if (adev->enable_virtual_display)
|
||||
amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
|
||||
else
|
||||
amdgpu_ip_block_add(adev, &dce_v6_4_ip_block);
|
||||
amdgpu_ip_block_add(adev, &gfx_v6_0_ip_block);
|
||||
amdgpu_ip_block_add(adev, &si_dma_ip_block);
|
||||
/* amdgpu_ip_block_add(adev, &uvd_v3_1_ip_block); */
|
||||
/* amdgpu_ip_block_add(adev, &vce_v1_0_ip_block); */
|
||||
break;
|
||||
case CHIP_HAINAN:
|
||||
adev->ip_blocks = hainan_ip_blocks;
|
||||
adev->num_ip_blocks = ARRAY_SIZE(hainan_ip_blocks);
|
||||
amdgpu_ip_block_add(adev, &si_common_ip_block);
|
||||
amdgpu_ip_block_add(adev, &gmc_v6_0_ip_block);
|
||||
amdgpu_ip_block_add(adev, &si_ih_ip_block);
|
||||
amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
|
||||
if (adev->enable_virtual_display)
|
||||
amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
|
||||
amdgpu_ip_block_add(adev, &gfx_v6_0_ip_block);
|
||||
amdgpu_ip_block_add(adev, &si_dma_ip_block);
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
|
|
|
@ -24,8 +24,6 @@
|
|||
#ifndef __SI_H__
|
||||
#define __SI_H__
|
||||
|
||||
extern const struct amd_ip_funcs si_common_ip_funcs;
|
||||
|
||||
void si_srbm_select(struct amdgpu_device *adev,
|
||||
u32 me, u32 pipe, u32 queue, u32 vmid);
|
||||
int si_set_ip_blocks(struct amdgpu_device *adev);
|
||||
|
|
|
@ -495,22 +495,6 @@ static void si_dma_ring_emit_vm_flush(struct amdgpu_ring *ring,
|
|||
amdgpu_ring_write(ring, (0 << 28) | 0x20); /* func(always) | poll interval */
|
||||
}
|
||||
|
||||
static unsigned si_dma_ring_get_emit_ib_size(struct amdgpu_ring *ring)
|
||||
{
|
||||
return
|
||||
7 + 3; /* si_dma_ring_emit_ib */
|
||||
}
|
||||
|
||||
static unsigned si_dma_ring_get_dma_frame_size(struct amdgpu_ring *ring)
|
||||
{
|
||||
return
|
||||
3 + /* si_dma_ring_emit_hdp_flush */
|
||||
3 + /* si_dma_ring_emit_hdp_invalidate */
|
||||
6 + /* si_dma_ring_emit_pipeline_sync */
|
||||
12 + /* si_dma_ring_emit_vm_flush */
|
||||
9 + 9 + 9; /* si_dma_ring_emit_fence x3 for user fence, vm fence */
|
||||
}
|
||||
|
||||
static int si_dma_early_init(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
@ -547,11 +531,10 @@ static int si_dma_sw_init(void *handle)
|
|||
ring->use_doorbell = false;
|
||||
sprintf(ring->name, "sdma%d", i);
|
||||
r = amdgpu_ring_init(adev, ring, 1024,
|
||||
DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0), 0xf,
|
||||
&adev->sdma.trap_irq,
|
||||
(i == 0) ?
|
||||
AMDGPU_SDMA_IRQ_TRAP0 : AMDGPU_SDMA_IRQ_TRAP1,
|
||||
AMDGPU_RING_TYPE_SDMA);
|
||||
AMDGPU_SDMA_IRQ_TRAP0 :
|
||||
AMDGPU_SDMA_IRQ_TRAP1);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
@ -762,7 +745,7 @@ static int si_dma_set_powergating_state(void *handle,
|
|||
return 0;
|
||||
}
|
||||
|
||||
const struct amd_ip_funcs si_dma_ip_funcs = {
|
||||
static const struct amd_ip_funcs si_dma_ip_funcs = {
|
||||
.name = "si_dma",
|
||||
.early_init = si_dma_early_init,
|
||||
.late_init = NULL,
|
||||
|
@ -780,10 +763,19 @@ const struct amd_ip_funcs si_dma_ip_funcs = {
|
|||
};
|
||||
|
||||
static const struct amdgpu_ring_funcs si_dma_ring_funcs = {
|
||||
.type = AMDGPU_RING_TYPE_SDMA,
|
||||
.align_mask = 0xf,
|
||||
.nop = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0),
|
||||
.get_rptr = si_dma_ring_get_rptr,
|
||||
.get_wptr = si_dma_ring_get_wptr,
|
||||
.set_wptr = si_dma_ring_set_wptr,
|
||||
.parse_cs = NULL,
|
||||
.emit_frame_size =
|
||||
3 + /* si_dma_ring_emit_hdp_flush */
|
||||
3 + /* si_dma_ring_emit_hdp_invalidate */
|
||||
6 + /* si_dma_ring_emit_pipeline_sync */
|
||||
12 + /* si_dma_ring_emit_vm_flush */
|
||||
9 + 9 + 9, /* si_dma_ring_emit_fence x3 for user fence, vm fence */
|
||||
.emit_ib_size = 7 + 3, /* si_dma_ring_emit_ib */
|
||||
.emit_ib = si_dma_ring_emit_ib,
|
||||
.emit_fence = si_dma_ring_emit_fence,
|
||||
.emit_pipeline_sync = si_dma_ring_emit_pipeline_sync,
|
||||
|
@ -794,8 +786,6 @@ static const struct amdgpu_ring_funcs si_dma_ring_funcs = {
|
|||
.test_ib = si_dma_ring_test_ib,
|
||||
.insert_nop = amdgpu_ring_insert_nop,
|
||||
.pad_ib = si_dma_ring_pad_ib,
|
||||
.get_emit_ib_size = si_dma_ring_get_emit_ib_size,
|
||||
.get_dma_frame_size = si_dma_ring_get_dma_frame_size,
|
||||
};
|
||||
|
||||
static void si_dma_set_ring_funcs(struct amdgpu_device *adev)
|
||||
|
@ -913,3 +903,12 @@ static void si_dma_set_vm_pte_funcs(struct amdgpu_device *adev)
|
|||
adev->vm_manager.vm_pte_num_rings = adev->sdma.num_instances;
|
||||
}
|
||||
}
|
||||
|
||||
const struct amdgpu_ip_block_version si_dma_ip_block =
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_SDMA,
|
||||
.major = 1,
|
||||
.minor = 0,
|
||||
.rev = 0,
|
||||
.funcs = &si_dma_ip_funcs,
|
||||
};
|
||||
|
|
|
@ -24,6 +24,6 @@
|
|||
#ifndef __SI_DMA_H__
|
||||
#define __SI_DMA_H__
|
||||
|
||||
extern const struct amd_ip_funcs si_dma_ip_funcs;
|
||||
extern const struct amdgpu_ip_block_version si_dma_ip_block;
|
||||
|
||||
#endif
|
||||
|
|
|
@ -3171,6 +3171,7 @@ static void ni_update_current_ps(struct amdgpu_device *adev,
|
|||
eg_pi->current_rps = *rps;
|
||||
ni_pi->current_ps = *new_ps;
|
||||
eg_pi->current_rps.ps_priv = &ni_pi->current_ps;
|
||||
adev->pm.dpm.current_ps = &eg_pi->current_rps;
|
||||
}
|
||||
|
||||
static void ni_update_requested_ps(struct amdgpu_device *adev,
|
||||
|
@ -3183,6 +3184,7 @@ static void ni_update_requested_ps(struct amdgpu_device *adev,
|
|||
eg_pi->requested_rps = *rps;
|
||||
ni_pi->requested_ps = *new_ps;
|
||||
eg_pi->requested_rps.ps_priv = &ni_pi->requested_ps;
|
||||
adev->pm.dpm.requested_ps = &eg_pi->requested_rps;
|
||||
}
|
||||
|
||||
static void ni_set_uvd_clock_before_set_eng_clock(struct amdgpu_device *adev,
|
||||
|
@ -7320,7 +7322,7 @@ static int si_parse_power_table(struct amdgpu_device *adev)
|
|||
adev->pm.dpm.num_ps = state_array->ucNumEntries;
|
||||
|
||||
/* fill in the vce power states */
|
||||
for (i = 0; i < AMDGPU_MAX_VCE_LEVELS; i++) {
|
||||
for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) {
|
||||
u32 sclk, mclk;
|
||||
clock_array_index = adev->pm.dpm.vce_states[i].clk_idx;
|
||||
clock_info = (union pplib_clock_info *)
|
||||
|
@ -7957,6 +7959,57 @@ static int si_dpm_early_init(void *handle)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static inline bool si_are_power_levels_equal(const struct rv7xx_pl *si_cpl1,
|
||||
const struct rv7xx_pl *si_cpl2)
|
||||
{
|
||||
return ((si_cpl1->mclk == si_cpl2->mclk) &&
|
||||
(si_cpl1->sclk == si_cpl2->sclk) &&
|
||||
(si_cpl1->pcie_gen == si_cpl2->pcie_gen) &&
|
||||
(si_cpl1->vddc == si_cpl2->vddc) &&
|
||||
(si_cpl1->vddci == si_cpl2->vddci));
|
||||
}
|
||||
|
||||
static int si_check_state_equal(struct amdgpu_device *adev,
|
||||
struct amdgpu_ps *cps,
|
||||
struct amdgpu_ps *rps,
|
||||
bool *equal)
|
||||
{
|
||||
struct si_ps *si_cps;
|
||||
struct si_ps *si_rps;
|
||||
int i;
|
||||
|
||||
if (adev == NULL || cps == NULL || rps == NULL || equal == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
si_cps = si_get_ps(cps);
|
||||
si_rps = si_get_ps(rps);
|
||||
|
||||
if (si_cps == NULL) {
|
||||
printk("si_cps is NULL\n");
|
||||
*equal = false;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (si_cps->performance_level_count != si_rps->performance_level_count) {
|
||||
*equal = false;
|
||||
return 0;
|
||||
}
|
||||
|
||||
for (i = 0; i < si_cps->performance_level_count; i++) {
|
||||
if (!si_are_power_levels_equal(&(si_cps->performance_levels[i]),
|
||||
&(si_rps->performance_levels[i]))) {
|
||||
*equal = false;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
/* If all performance levels are the same try to use the UVD clocks to break the tie.*/
|
||||
*equal = ((cps->vclk == rps->vclk) && (cps->dclk == rps->dclk));
|
||||
*equal &= ((cps->evclk == rps->evclk) && (cps->ecclk == rps->ecclk));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
const struct amd_ip_funcs si_dpm_ip_funcs = {
|
||||
.name = "si_dpm",
|
||||
|
@ -7991,6 +8044,8 @@ static const struct amdgpu_dpm_funcs si_dpm_funcs = {
|
|||
.get_fan_control_mode = &si_dpm_get_fan_control_mode,
|
||||
.set_fan_speed_percent = &si_dpm_set_fan_speed_percent,
|
||||
.get_fan_speed_percent = &si_dpm_get_fan_speed_percent,
|
||||
.check_state_equal = &si_check_state_equal,
|
||||
.get_vce_clock_state = amdgpu_get_vce_clock_state,
|
||||
};
|
||||
|
||||
static void si_dpm_set_dpm_funcs(struct amdgpu_device *adev)
|
||||
|
@ -8010,3 +8065,11 @@ static void si_dpm_set_irq_funcs(struct amdgpu_device *adev)
|
|||
adev->pm.dpm.thermal.irq.funcs = &si_dpm_irq_funcs;
|
||||
}
|
||||
|
||||
const struct amdgpu_ip_block_version si_dpm_ip_block =
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_SMC,
|
||||
.major = 6,
|
||||
.minor = 0,
|
||||
.rev = 0,
|
||||
.funcs = &si_dpm_ip_funcs,
|
||||
};
|
||||
|
|
|
@ -268,7 +268,7 @@ static int si_ih_set_powergating_state(void *handle,
|
|||
return 0;
|
||||
}
|
||||
|
||||
const struct amd_ip_funcs si_ih_ip_funcs = {
|
||||
static const struct amd_ip_funcs si_ih_ip_funcs = {
|
||||
.name = "si_ih",
|
||||
.early_init = si_ih_early_init,
|
||||
.late_init = NULL,
|
||||
|
@ -297,3 +297,11 @@ static void si_ih_set_interrupt_funcs(struct amdgpu_device *adev)
|
|||
adev->irq.ih_funcs = &si_ih_funcs;
|
||||
}
|
||||
|
||||
const struct amdgpu_ip_block_version si_ih_ip_block =
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_IH,
|
||||
.major = 1,
|
||||
.minor = 0,
|
||||
.rev = 0,
|
||||
.funcs = &si_ih_ip_funcs,
|
||||
};
|
||||
|
|
|
@ -24,6 +24,6 @@
|
|||
#ifndef __SI_IH_H__
|
||||
#define __SI_IH_H__
|
||||
|
||||
extern const struct amd_ip_funcs si_ih_ip_funcs;
|
||||
extern const struct amdgpu_ip_block_version si_ih_ip_block;
|
||||
|
||||
#endif
|
||||
|
|
|
@ -455,7 +455,7 @@ static int tonga_ih_set_powergating_state(void *handle,
|
|||
return 0;
|
||||
}
|
||||
|
||||
const struct amd_ip_funcs tonga_ih_ip_funcs = {
|
||||
static const struct amd_ip_funcs tonga_ih_ip_funcs = {
|
||||
.name = "tonga_ih",
|
||||
.early_init = tonga_ih_early_init,
|
||||
.late_init = NULL,
|
||||
|
@ -487,3 +487,11 @@ static void tonga_ih_set_interrupt_funcs(struct amdgpu_device *adev)
|
|||
adev->irq.ih_funcs = &tonga_ih_funcs;
|
||||
}
|
||||
|
||||
const struct amdgpu_ip_block_version tonga_ih_ip_block =
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_IH,
|
||||
.major = 3,
|
||||
.minor = 0,
|
||||
.rev = 0,
|
||||
.funcs = &tonga_ih_ip_funcs,
|
||||
};
|
||||
|
|
|
@ -24,6 +24,6 @@
|
|||
#ifndef __TONGA_IH_H__
|
||||
#define __TONGA_IH_H__
|
||||
|
||||
extern const struct amd_ip_funcs tonga_ih_ip_funcs;
|
||||
extern const struct amdgpu_ip_block_version tonga_ih_ip_block;
|
||||
|
||||
#endif /* __CZ_IH_H__ */
|
||||
#endif /* __TONGA_IH_H__ */
|
||||
|
|
|
@ -36,6 +36,9 @@
|
|||
|
||||
#include "bif/bif_4_1_d.h"
|
||||
|
||||
#include "smu/smu_7_0_1_d.h"
|
||||
#include "smu/smu_7_0_1_sh_mask.h"
|
||||
|
||||
static void uvd_v4_2_mc_resume(struct amdgpu_device *adev);
|
||||
static void uvd_v4_2_init_cg(struct amdgpu_device *adev);
|
||||
static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev);
|
||||
|
@ -116,8 +119,7 @@ static int uvd_v4_2_sw_init(void *handle)
|
|||
|
||||
ring = &adev->uvd.ring;
|
||||
sprintf(ring->name, "uvd");
|
||||
r = amdgpu_ring_init(adev, ring, 512, PACKET0(mmUVD_NO_OP, 0), 0xf,
|
||||
&adev->uvd.irq, 0, AMDGPU_RING_TYPE_UVD);
|
||||
r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
@ -526,20 +528,6 @@ static void uvd_v4_2_ring_emit_ib(struct amdgpu_ring *ring,
|
|||
amdgpu_ring_write(ring, ib->length_dw);
|
||||
}
|
||||
|
||||
static unsigned uvd_v4_2_ring_get_emit_ib_size(struct amdgpu_ring *ring)
|
||||
{
|
||||
return
|
||||
4; /* uvd_v4_2_ring_emit_ib */
|
||||
}
|
||||
|
||||
static unsigned uvd_v4_2_ring_get_dma_frame_size(struct amdgpu_ring *ring)
|
||||
{
|
||||
return
|
||||
2 + /* uvd_v4_2_ring_emit_hdp_flush */
|
||||
2 + /* uvd_v4_2_ring_emit_hdp_invalidate */
|
||||
14; /* uvd_v4_2_ring_emit_fence x1 no user fence */
|
||||
}
|
||||
|
||||
/**
|
||||
* uvd_v4_2_mc_resume - memory controller programming
|
||||
*
|
||||
|
@ -698,18 +686,34 @@ static int uvd_v4_2_process_interrupt(struct amdgpu_device *adev,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void uvd_v5_0_set_bypass_mode(struct amdgpu_device *adev, bool enable)
|
||||
{
|
||||
u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL);
|
||||
|
||||
if (enable)
|
||||
tmp |= (GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK |
|
||||
GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK);
|
||||
else
|
||||
tmp &= ~(GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK |
|
||||
GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK);
|
||||
|
||||
WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp);
|
||||
}
|
||||
|
||||
static int uvd_v4_2_set_clockgating_state(void *handle,
|
||||
enum amd_clockgating_state state)
|
||||
{
|
||||
bool gate = false;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
|
||||
return 0;
|
||||
|
||||
if (state == AMD_CG_STATE_GATE)
|
||||
gate = true;
|
||||
|
||||
uvd_v5_0_set_bypass_mode(adev, gate);
|
||||
|
||||
if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
|
||||
return 0;
|
||||
|
||||
uvd_v4_2_enable_mgcg(adev, gate);
|
||||
|
||||
return 0;
|
||||
|
@ -738,7 +742,7 @@ static int uvd_v4_2_set_powergating_state(void *handle,
|
|||
}
|
||||
}
|
||||
|
||||
const struct amd_ip_funcs uvd_v4_2_ip_funcs = {
|
||||
static const struct amd_ip_funcs uvd_v4_2_ip_funcs = {
|
||||
.name = "uvd_v4_2",
|
||||
.early_init = uvd_v4_2_early_init,
|
||||
.late_init = NULL,
|
||||
|
@ -756,10 +760,18 @@ const struct amd_ip_funcs uvd_v4_2_ip_funcs = {
|
|||
};
|
||||
|
||||
static const struct amdgpu_ring_funcs uvd_v4_2_ring_funcs = {
|
||||
.type = AMDGPU_RING_TYPE_UVD,
|
||||
.align_mask = 0xf,
|
||||
.nop = PACKET0(mmUVD_NO_OP, 0),
|
||||
.get_rptr = uvd_v4_2_ring_get_rptr,
|
||||
.get_wptr = uvd_v4_2_ring_get_wptr,
|
||||
.set_wptr = uvd_v4_2_ring_set_wptr,
|
||||
.parse_cs = amdgpu_uvd_ring_parse_cs,
|
||||
.emit_frame_size =
|
||||
2 + /* uvd_v4_2_ring_emit_hdp_flush */
|
||||
2 + /* uvd_v4_2_ring_emit_hdp_invalidate */
|
||||
14, /* uvd_v4_2_ring_emit_fence x1 no user fence */
|
||||
.emit_ib_size = 4, /* uvd_v4_2_ring_emit_ib */
|
||||
.emit_ib = uvd_v4_2_ring_emit_ib,
|
||||
.emit_fence = uvd_v4_2_ring_emit_fence,
|
||||
.emit_hdp_flush = uvd_v4_2_ring_emit_hdp_flush,
|
||||
|
@ -770,8 +782,6 @@ static const struct amdgpu_ring_funcs uvd_v4_2_ring_funcs = {
|
|||
.pad_ib = amdgpu_ring_generic_pad_ib,
|
||||
.begin_use = amdgpu_uvd_ring_begin_use,
|
||||
.end_use = amdgpu_uvd_ring_end_use,
|
||||
.get_emit_ib_size = uvd_v4_2_ring_get_emit_ib_size,
|
||||
.get_dma_frame_size = uvd_v4_2_ring_get_dma_frame_size,
|
||||
};
|
||||
|
||||
static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev)
|
||||
|
@ -789,3 +799,12 @@ static void uvd_v4_2_set_irq_funcs(struct amdgpu_device *adev)
|
|||
adev->uvd.irq.num_types = 1;
|
||||
adev->uvd.irq.funcs = &uvd_v4_2_irq_funcs;
|
||||
}
|
||||
|
||||
const struct amdgpu_ip_block_version uvd_v4_2_ip_block =
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_UVD,
|
||||
.major = 4,
|
||||
.minor = 2,
|
||||
.rev = 0,
|
||||
.funcs = &uvd_v4_2_ip_funcs,
|
||||
};
|
||||
|
|
|
@ -24,6 +24,6 @@
|
|||
#ifndef __UVD_V4_2_H__
|
||||
#define __UVD_V4_2_H__
|
||||
|
||||
extern const struct amd_ip_funcs uvd_v4_2_ip_funcs;
|
||||
extern const struct amdgpu_ip_block_version uvd_v4_2_ip_block;
|
||||
|
||||
#endif
|
||||
|
|
|
@ -33,6 +33,8 @@
|
|||
#include "oss/oss_2_0_sh_mask.h"
|
||||
#include "bif/bif_5_0_d.h"
|
||||
#include "vi.h"
|
||||
#include "smu/smu_7_1_2_d.h"
|
||||
#include "smu/smu_7_1_2_sh_mask.h"
|
||||
|
||||
static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev);
|
||||
static void uvd_v5_0_set_irq_funcs(struct amdgpu_device *adev);
|
||||
|
@ -112,8 +114,7 @@ static int uvd_v5_0_sw_init(void *handle)
|
|||
|
||||
ring = &adev->uvd.ring;
|
||||
sprintf(ring->name, "uvd");
|
||||
r = amdgpu_ring_init(adev, ring, 512, PACKET0(mmUVD_NO_OP, 0), 0xf,
|
||||
&adev->uvd.irq, 0, AMDGPU_RING_TYPE_UVD);
|
||||
r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
@ -577,20 +578,6 @@ static void uvd_v5_0_ring_emit_ib(struct amdgpu_ring *ring,
|
|||
amdgpu_ring_write(ring, ib->length_dw);
|
||||
}
|
||||
|
||||
static unsigned uvd_v5_0_ring_get_emit_ib_size(struct amdgpu_ring *ring)
|
||||
{
|
||||
return
|
||||
6; /* uvd_v5_0_ring_emit_ib */
|
||||
}
|
||||
|
||||
static unsigned uvd_v5_0_ring_get_dma_frame_size(struct amdgpu_ring *ring)
|
||||
{
|
||||
return
|
||||
2 + /* uvd_v5_0_ring_emit_hdp_flush */
|
||||
2 + /* uvd_v5_0_ring_emit_hdp_invalidate */
|
||||
14; /* uvd_v5_0_ring_emit_fence x1 no user fence */
|
||||
}
|
||||
|
||||
static bool uvd_v5_0_is_idle(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
@ -737,6 +724,20 @@ static void uvd_v5_0_set_hw_clock_gating(struct amdgpu_device *adev)
|
|||
}
|
||||
#endif
|
||||
|
||||
static void uvd_v5_0_set_bypass_mode(struct amdgpu_device *adev, bool enable)
|
||||
{
|
||||
u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL);
|
||||
|
||||
if (enable)
|
||||
tmp |= (GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK |
|
||||
GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK);
|
||||
else
|
||||
tmp &= ~(GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK |
|
||||
GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK);
|
||||
|
||||
WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp);
|
||||
}
|
||||
|
||||
static int uvd_v5_0_set_clockgating_state(void *handle,
|
||||
enum amd_clockgating_state state)
|
||||
{
|
||||
|
@ -744,6 +745,8 @@ static int uvd_v5_0_set_clockgating_state(void *handle,
|
|||
bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
|
||||
static int curstate = -1;
|
||||
|
||||
uvd_v5_0_set_bypass_mode(adev, enable);
|
||||
|
||||
if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
|
||||
return 0;
|
||||
|
||||
|
@ -789,7 +792,7 @@ static int uvd_v5_0_set_powergating_state(void *handle,
|
|||
}
|
||||
}
|
||||
|
||||
const struct amd_ip_funcs uvd_v5_0_ip_funcs = {
|
||||
static const struct amd_ip_funcs uvd_v5_0_ip_funcs = {
|
||||
.name = "uvd_v5_0",
|
||||
.early_init = uvd_v5_0_early_init,
|
||||
.late_init = NULL,
|
||||
|
@ -807,10 +810,18 @@ const struct amd_ip_funcs uvd_v5_0_ip_funcs = {
|
|||
};
|
||||
|
||||
static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = {
|
||||
.type = AMDGPU_RING_TYPE_UVD,
|
||||
.align_mask = 0xf,
|
||||
.nop = PACKET0(mmUVD_NO_OP, 0),
|
||||
.get_rptr = uvd_v5_0_ring_get_rptr,
|
||||
.get_wptr = uvd_v5_0_ring_get_wptr,
|
||||
.set_wptr = uvd_v5_0_ring_set_wptr,
|
||||
.parse_cs = amdgpu_uvd_ring_parse_cs,
|
||||
.emit_frame_size =
|
||||
2 + /* uvd_v5_0_ring_emit_hdp_flush */
|
||||
2 + /* uvd_v5_0_ring_emit_hdp_invalidate */
|
||||
14, /* uvd_v5_0_ring_emit_fence x1 no user fence */
|
||||
.emit_ib_size = 6, /* uvd_v5_0_ring_emit_ib */
|
||||
.emit_ib = uvd_v5_0_ring_emit_ib,
|
||||
.emit_fence = uvd_v5_0_ring_emit_fence,
|
||||
.emit_hdp_flush = uvd_v5_0_ring_emit_hdp_flush,
|
||||
|
@ -821,8 +832,6 @@ static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = {
|
|||
.pad_ib = amdgpu_ring_generic_pad_ib,
|
||||
.begin_use = amdgpu_uvd_ring_begin_use,
|
||||
.end_use = amdgpu_uvd_ring_end_use,
|
||||
.get_emit_ib_size = uvd_v5_0_ring_get_emit_ib_size,
|
||||
.get_dma_frame_size = uvd_v5_0_ring_get_dma_frame_size,
|
||||
};
|
||||
|
||||
static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev)
|
||||
|
@ -840,3 +849,12 @@ static void uvd_v5_0_set_irq_funcs(struct amdgpu_device *adev)
|
|||
adev->uvd.irq.num_types = 1;
|
||||
adev->uvd.irq.funcs = &uvd_v5_0_irq_funcs;
|
||||
}
|
||||
|
||||
const struct amdgpu_ip_block_version uvd_v5_0_ip_block =
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_UVD,
|
||||
.major = 5,
|
||||
.minor = 0,
|
||||
.rev = 0,
|
||||
.funcs = &uvd_v5_0_ip_funcs,
|
||||
};
|
||||
|
|
|
@ -24,6 +24,6 @@
|
|||
#ifndef __UVD_V5_0_H__
|
||||
#define __UVD_V5_0_H__
|
||||
|
||||
extern const struct amd_ip_funcs uvd_v5_0_ip_funcs;
|
||||
extern const struct amdgpu_ip_block_version uvd_v5_0_ip_block;
|
||||
|
||||
#endif
|
||||
|
|
|
@ -116,8 +116,7 @@ static int uvd_v6_0_sw_init(void *handle)
|
|||
|
||||
ring = &adev->uvd.ring;
|
||||
sprintf(ring->name, "uvd");
|
||||
r = amdgpu_ring_init(adev, ring, 512, PACKET0(mmUVD_NO_OP, 0), 0xf,
|
||||
&adev->uvd.irq, 0, AMDGPU_RING_TYPE_UVD);
|
||||
r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
@ -725,31 +724,6 @@ static void uvd_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
|
|||
amdgpu_ring_write(ring, 0xE);
|
||||
}
|
||||
|
||||
static unsigned uvd_v6_0_ring_get_emit_ib_size(struct amdgpu_ring *ring)
|
||||
{
|
||||
return
|
||||
8; /* uvd_v6_0_ring_emit_ib */
|
||||
}
|
||||
|
||||
static unsigned uvd_v6_0_ring_get_dma_frame_size(struct amdgpu_ring *ring)
|
||||
{
|
||||
return
|
||||
2 + /* uvd_v6_0_ring_emit_hdp_flush */
|
||||
2 + /* uvd_v6_0_ring_emit_hdp_invalidate */
|
||||
10 + /* uvd_v6_0_ring_emit_pipeline_sync */
|
||||
14; /* uvd_v6_0_ring_emit_fence x1 no user fence */
|
||||
}
|
||||
|
||||
static unsigned uvd_v6_0_ring_get_dma_frame_size_vm(struct amdgpu_ring *ring)
|
||||
{
|
||||
return
|
||||
2 + /* uvd_v6_0_ring_emit_hdp_flush */
|
||||
2 + /* uvd_v6_0_ring_emit_hdp_invalidate */
|
||||
10 + /* uvd_v6_0_ring_emit_pipeline_sync */
|
||||
20 + /* uvd_v6_0_ring_emit_vm_flush */
|
||||
14 + 14; /* uvd_v6_0_ring_emit_fence x2 vm fence */
|
||||
}
|
||||
|
||||
static bool uvd_v6_0_is_idle(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
@ -961,7 +935,7 @@ static void uvd_v6_0_set_hw_clock_gating(struct amdgpu_device *adev)
|
|||
}
|
||||
#endif
|
||||
|
||||
static void uvd_v6_set_bypass_mode(struct amdgpu_device *adev, bool enable)
|
||||
static void uvd_v6_0_set_bypass_mode(struct amdgpu_device *adev, bool enable)
|
||||
{
|
||||
u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL);
|
||||
|
||||
|
@ -979,15 +953,14 @@ static int uvd_v6_0_set_clockgating_state(void *handle,
|
|||
enum amd_clockgating_state state)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
|
||||
|
||||
if (adev->asic_type == CHIP_FIJI ||
|
||||
adev->asic_type == CHIP_POLARIS10)
|
||||
uvd_v6_set_bypass_mode(adev, state == AMD_CG_STATE_GATE ? true : false);
|
||||
uvd_v6_0_set_bypass_mode(adev, enable);
|
||||
|
||||
if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
|
||||
return 0;
|
||||
|
||||
if (state == AMD_CG_STATE_GATE) {
|
||||
if (enable) {
|
||||
/* disable HW gating and enable Sw gating */
|
||||
uvd_v6_0_set_sw_clock_gating(adev);
|
||||
} else {
|
||||
|
@ -1027,7 +1000,7 @@ static int uvd_v6_0_set_powergating_state(void *handle,
|
|||
}
|
||||
}
|
||||
|
||||
const struct amd_ip_funcs uvd_v6_0_ip_funcs = {
|
||||
static const struct amd_ip_funcs uvd_v6_0_ip_funcs = {
|
||||
.name = "uvd_v6_0",
|
||||
.early_init = uvd_v6_0_early_init,
|
||||
.late_init = NULL,
|
||||
|
@ -1048,10 +1021,19 @@ const struct amd_ip_funcs uvd_v6_0_ip_funcs = {
|
|||
};
|
||||
|
||||
static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = {
|
||||
.type = AMDGPU_RING_TYPE_UVD,
|
||||
.align_mask = 0xf,
|
||||
.nop = PACKET0(mmUVD_NO_OP, 0),
|
||||
.get_rptr = uvd_v6_0_ring_get_rptr,
|
||||
.get_wptr = uvd_v6_0_ring_get_wptr,
|
||||
.set_wptr = uvd_v6_0_ring_set_wptr,
|
||||
.parse_cs = amdgpu_uvd_ring_parse_cs,
|
||||
.emit_frame_size =
|
||||
2 + /* uvd_v6_0_ring_emit_hdp_flush */
|
||||
2 + /* uvd_v6_0_ring_emit_hdp_invalidate */
|
||||
10 + /* uvd_v6_0_ring_emit_pipeline_sync */
|
||||
14, /* uvd_v6_0_ring_emit_fence x1 no user fence */
|
||||
.emit_ib_size = 8, /* uvd_v6_0_ring_emit_ib */
|
||||
.emit_ib = uvd_v6_0_ring_emit_ib,
|
||||
.emit_fence = uvd_v6_0_ring_emit_fence,
|
||||
.emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush,
|
||||
|
@ -1062,15 +1044,22 @@ static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = {
|
|||
.pad_ib = amdgpu_ring_generic_pad_ib,
|
||||
.begin_use = amdgpu_uvd_ring_begin_use,
|
||||
.end_use = amdgpu_uvd_ring_end_use,
|
||||
.get_emit_ib_size = uvd_v6_0_ring_get_emit_ib_size,
|
||||
.get_dma_frame_size = uvd_v6_0_ring_get_dma_frame_size,
|
||||
};
|
||||
|
||||
static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = {
|
||||
.type = AMDGPU_RING_TYPE_UVD,
|
||||
.align_mask = 0xf,
|
||||
.nop = PACKET0(mmUVD_NO_OP, 0),
|
||||
.get_rptr = uvd_v6_0_ring_get_rptr,
|
||||
.get_wptr = uvd_v6_0_ring_get_wptr,
|
||||
.set_wptr = uvd_v6_0_ring_set_wptr,
|
||||
.parse_cs = NULL,
|
||||
.emit_frame_size =
|
||||
2 + /* uvd_v6_0_ring_emit_hdp_flush */
|
||||
2 + /* uvd_v6_0_ring_emit_hdp_invalidate */
|
||||
10 + /* uvd_v6_0_ring_emit_pipeline_sync */
|
||||
20 + /* uvd_v6_0_ring_emit_vm_flush */
|
||||
14 + 14, /* uvd_v6_0_ring_emit_fence x2 vm fence */
|
||||
.emit_ib_size = 8, /* uvd_v6_0_ring_emit_ib */
|
||||
.emit_ib = uvd_v6_0_ring_emit_ib,
|
||||
.emit_fence = uvd_v6_0_ring_emit_fence,
|
||||
.emit_vm_flush = uvd_v6_0_ring_emit_vm_flush,
|
||||
|
@ -1083,8 +1072,6 @@ static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = {
|
|||
.pad_ib = amdgpu_ring_generic_pad_ib,
|
||||
.begin_use = amdgpu_uvd_ring_begin_use,
|
||||
.end_use = amdgpu_uvd_ring_end_use,
|
||||
.get_emit_ib_size = uvd_v6_0_ring_get_emit_ib_size,
|
||||
.get_dma_frame_size = uvd_v6_0_ring_get_dma_frame_size_vm,
|
||||
};
|
||||
|
||||
static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev)
|
||||
|
@ -1108,3 +1095,30 @@ static void uvd_v6_0_set_irq_funcs(struct amdgpu_device *adev)
|
|||
adev->uvd.irq.num_types = 1;
|
||||
adev->uvd.irq.funcs = &uvd_v6_0_irq_funcs;
|
||||
}
|
||||
|
||||
const struct amdgpu_ip_block_version uvd_v6_0_ip_block =
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_UVD,
|
||||
.major = 6,
|
||||
.minor = 0,
|
||||
.rev = 0,
|
||||
.funcs = &uvd_v6_0_ip_funcs,
|
||||
};
|
||||
|
||||
const struct amdgpu_ip_block_version uvd_v6_2_ip_block =
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_UVD,
|
||||
.major = 6,
|
||||
.minor = 2,
|
||||
.rev = 0,
|
||||
.funcs = &uvd_v6_0_ip_funcs,
|
||||
};
|
||||
|
||||
const struct amdgpu_ip_block_version uvd_v6_3_ip_block =
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_UVD,
|
||||
.major = 6,
|
||||
.minor = 3,
|
||||
.rev = 0,
|
||||
.funcs = &uvd_v6_0_ip_funcs,
|
||||
};
|
||||
|
|
|
@ -24,6 +24,8 @@
|
|||
#ifndef __UVD_V6_0_H__
|
||||
#define __UVD_V6_0_H__
|
||||
|
||||
extern const struct amd_ip_funcs uvd_v6_0_ip_funcs;
|
||||
extern const struct amdgpu_ip_block_version uvd_v6_0_ip_block;
|
||||
extern const struct amdgpu_ip_block_version uvd_v6_2_ip_block;
|
||||
extern const struct amdgpu_ip_block_version uvd_v6_3_ip_block;
|
||||
|
||||
#endif
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue