Merge branch 'drm-next-4.6' of git://people.freedesktop.org/~agd5f/linux into drm-next
A few other misc cleanups and bug fixes for 4.6. Highlights: - unify endian handling in powerplay - powerplay fixes - fix a regression in 4.5 on boards with no display connectors - fence cleanups and locking fixes - whitespace cleanups and code refactoring in radeon * 'drm-next-4.6' of git://people.freedesktop.org/~agd5f/linux: (35 commits) drm/amdgpu/gfx7: add MTYPE definition drm/amdgpu: removing BO_VAs shouldn't be interruptible drm/amd/powerplay: show uvd/vce power gate enablement for tonga. drm/amd/powerplay: show uvd/vce power gate info for fiji drm/amdgpu: use sched fence if possible drm/amdgpu: move ib.fence to job.fence drm/amdgpu: give a fence param to ib_free drm/amdgpu: include the right version of gmc header files for iceland drm/radeon: fix indentation. drm/amd/powerplay: add uvd/vce dpm enabling flag to fix the performance issue for CZ drm/amdgpu: switch back to 32bit hw fences v2 drm/amdgpu: remove amdgpu_fence_is_signaled drm/amdgpu: drop the extra fence range check v2 drm/amdgpu: signal fences directly in amdgpu_fence_process drm/amdgpu: cleanup amdgpu_fence_wait_empty v2 drm/amdgpu: keep all fences in an RCU protected array v2 drm/amdgpu: add number of hardware submissions to amdgpu_fence_driver_init_ring drm/amdgpu: RCU protected amd_sched_fence_release drm/amdgpu: RCU protected amdgpu_fence_release drm/amdgpu: merge amdgpu_fence_process and _activity ...
This commit is contained in:
commit
902d02db1f
|
@ -141,7 +141,6 @@ extern unsigned amdgpu_pcie_lane_cap;
|
|||
#define CIK_CURSOR_HEIGHT 128
|
||||
|
||||
struct amdgpu_device;
|
||||
struct amdgpu_fence;
|
||||
struct amdgpu_ib;
|
||||
struct amdgpu_vm;
|
||||
struct amdgpu_ring;
|
||||
|
@ -348,13 +347,15 @@ struct amdgpu_fence_driver {
|
|||
uint64_t gpu_addr;
|
||||
volatile uint32_t *cpu_addr;
|
||||
/* sync_seq is protected by ring emission lock */
|
||||
uint64_t sync_seq;
|
||||
atomic64_t last_seq;
|
||||
uint32_t sync_seq;
|
||||
atomic_t last_seq;
|
||||
bool initialized;
|
||||
struct amdgpu_irq_src *irq_src;
|
||||
unsigned irq_type;
|
||||
struct timer_list fallback_timer;
|
||||
wait_queue_head_t fence_queue;
|
||||
unsigned num_fences_mask;
|
||||
spinlock_t lock;
|
||||
struct fence **fences;
|
||||
};
|
||||
|
||||
/* some special values for the owner field */
|
||||
|
@ -364,16 +365,6 @@ struct amdgpu_fence_driver {
|
|||
#define AMDGPU_FENCE_FLAG_64BIT (1 << 0)
|
||||
#define AMDGPU_FENCE_FLAG_INT (1 << 1)
|
||||
|
||||
struct amdgpu_fence {
|
||||
struct fence base;
|
||||
|
||||
/* RB, DMA, etc. */
|
||||
struct amdgpu_ring *ring;
|
||||
uint64_t seq;
|
||||
|
||||
wait_queue_t fence_wake;
|
||||
};
|
||||
|
||||
struct amdgpu_user_fence {
|
||||
/* write-back bo */
|
||||
struct amdgpu_bo *bo;
|
||||
|
@ -385,7 +376,8 @@ int amdgpu_fence_driver_init(struct amdgpu_device *adev);
|
|||
void amdgpu_fence_driver_fini(struct amdgpu_device *adev);
|
||||
void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev);
|
||||
|
||||
int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring);
|
||||
int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
|
||||
unsigned num_hw_submission);
|
||||
int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
|
||||
struct amdgpu_irq_src *irq_src,
|
||||
unsigned irq_type);
|
||||
|
@ -393,7 +385,6 @@ void amdgpu_fence_driver_suspend(struct amdgpu_device *adev);
|
|||
void amdgpu_fence_driver_resume(struct amdgpu_device *adev);
|
||||
int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **fence);
|
||||
void amdgpu_fence_process(struct amdgpu_ring *ring);
|
||||
int amdgpu_fence_wait_next(struct amdgpu_ring *ring);
|
||||
int amdgpu_fence_wait_empty(struct amdgpu_ring *ring);
|
||||
unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring);
|
||||
|
||||
|
@ -539,11 +530,14 @@ int amdgpu_gem_debugfs_init(struct amdgpu_device *adev);
|
|||
* Assumption is that there won't be hole (all object on same
|
||||
* alignment).
|
||||
*/
|
||||
|
||||
#define AMDGPU_SA_NUM_FENCE_LISTS 32
|
||||
|
||||
struct amdgpu_sa_manager {
|
||||
wait_queue_head_t wq;
|
||||
struct amdgpu_bo *bo;
|
||||
struct list_head *hole;
|
||||
struct list_head flist[AMDGPU_MAX_RINGS];
|
||||
struct list_head flist[AMDGPU_SA_NUM_FENCE_LISTS];
|
||||
struct list_head olist;
|
||||
unsigned size;
|
||||
uint64_t gpu_addr;
|
||||
|
@ -727,7 +721,6 @@ struct amdgpu_ib {
|
|||
uint32_t length_dw;
|
||||
uint64_t gpu_addr;
|
||||
uint32_t *ptr;
|
||||
struct fence *fence;
|
||||
struct amdgpu_user_fence *user;
|
||||
struct amdgpu_vm *vm;
|
||||
unsigned vm_id;
|
||||
|
@ -1143,7 +1136,7 @@ struct amdgpu_gfx {
|
|||
|
||||
int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
||||
unsigned size, struct amdgpu_ib *ib);
|
||||
void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib);
|
||||
void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib, struct fence *f);
|
||||
int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
|
||||
struct amdgpu_ib *ib, struct fence *last_vm_update,
|
||||
struct fence **f);
|
||||
|
@ -1164,7 +1157,6 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
|
|||
struct amdgpu_irq_src *irq_src, unsigned irq_type,
|
||||
enum amdgpu_ring_type ring_type);
|
||||
void amdgpu_ring_fini(struct amdgpu_ring *ring);
|
||||
struct amdgpu_ring *amdgpu_ring_from_fence(struct fence *f);
|
||||
|
||||
/*
|
||||
* CS.
|
||||
|
@ -1206,6 +1198,7 @@ struct amdgpu_job {
|
|||
struct amdgpu_ring *ring;
|
||||
struct amdgpu_sync sync;
|
||||
struct amdgpu_ib *ibs;
|
||||
struct fence *fence; /* the hw fence */
|
||||
uint32_t num_ibs;
|
||||
void *owner;
|
||||
struct amdgpu_user_fence uf;
|
||||
|
@ -2066,20 +2059,6 @@ void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v);
|
|||
u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index);
|
||||
void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v);
|
||||
|
||||
/*
|
||||
* Cast helper
|
||||
*/
|
||||
extern const struct fence_ops amdgpu_fence_ops;
|
||||
static inline struct amdgpu_fence *to_amdgpu_fence(struct fence *f)
|
||||
{
|
||||
struct amdgpu_fence *__f = container_of(f, struct amdgpu_fence, base);
|
||||
|
||||
if (__f->base.ops == &amdgpu_fence_ops)
|
||||
return __f;
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Registers read & write functions.
|
||||
*/
|
||||
|
|
|
@ -47,9 +47,30 @@
|
|||
* that the the relevant GPU caches have been flushed.
|
||||
*/
|
||||
|
||||
struct amdgpu_fence {
|
||||
struct fence base;
|
||||
|
||||
/* RB, DMA, etc. */
|
||||
struct amdgpu_ring *ring;
|
||||
};
|
||||
|
||||
static struct kmem_cache *amdgpu_fence_slab;
|
||||
static atomic_t amdgpu_fence_slab_ref = ATOMIC_INIT(0);
|
||||
|
||||
/*
|
||||
* Cast helper
|
||||
*/
|
||||
static const struct fence_ops amdgpu_fence_ops;
|
||||
static inline struct amdgpu_fence *to_amdgpu_fence(struct fence *f)
|
||||
{
|
||||
struct amdgpu_fence *__f = container_of(f, struct amdgpu_fence, base);
|
||||
|
||||
if (__f->base.ops == &amdgpu_fence_ops)
|
||||
return __f;
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_fence_write - write a fence value
|
||||
*
|
||||
|
@ -82,7 +103,7 @@ static u32 amdgpu_fence_read(struct amdgpu_ring *ring)
|
|||
if (drv->cpu_addr)
|
||||
seq = le32_to_cpu(*drv->cpu_addr);
|
||||
else
|
||||
seq = lower_32_bits(atomic64_read(&drv->last_seq));
|
||||
seq = atomic_read(&drv->last_seq);
|
||||
|
||||
return seq;
|
||||
}
|
||||
|
@ -100,20 +121,32 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **f)
|
|||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
struct amdgpu_fence *fence;
|
||||
struct fence **ptr;
|
||||
uint32_t seq;
|
||||
|
||||
fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL);
|
||||
if (fence == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
fence->seq = ++ring->fence_drv.sync_seq;
|
||||
seq = ++ring->fence_drv.sync_seq;
|
||||
fence->ring = ring;
|
||||
fence_init(&fence->base, &amdgpu_fence_ops,
|
||||
&ring->fence_drv.fence_queue.lock,
|
||||
&ring->fence_drv.lock,
|
||||
adev->fence_context + ring->idx,
|
||||
fence->seq);
|
||||
seq);
|
||||
amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
|
||||
fence->seq, AMDGPU_FENCE_FLAG_INT);
|
||||
seq, AMDGPU_FENCE_FLAG_INT);
|
||||
|
||||
ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
|
||||
/* This function can't be called concurrently anyway, otherwise
|
||||
* emitting the fence would mess up the hardware ring buffer.
|
||||
*/
|
||||
BUG_ON(rcu_dereference_protected(*ptr, 1));
|
||||
|
||||
rcu_assign_pointer(*ptr, fence_get(&fence->base));
|
||||
|
||||
*f = &fence->base;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -131,89 +164,48 @@ static void amdgpu_fence_schedule_fallback(struct amdgpu_ring *ring)
|
|||
}
|
||||
|
||||
/**
|
||||
* amdgpu_fence_activity - check for fence activity
|
||||
* amdgpu_fence_process - check for fence activity
|
||||
*
|
||||
* @ring: pointer to struct amdgpu_ring
|
||||
*
|
||||
* Checks the current fence value and calculates the last
|
||||
* signalled fence value. Returns true if activity occured
|
||||
* on the ring, and the fence_queue should be waken up.
|
||||
*/
|
||||
static bool amdgpu_fence_activity(struct amdgpu_ring *ring)
|
||||
{
|
||||
uint64_t seq, last_seq, last_emitted;
|
||||
unsigned count_loop = 0;
|
||||
bool wake = false;
|
||||
|
||||
/* Note there is a scenario here for an infinite loop but it's
|
||||
* very unlikely to happen. For it to happen, the current polling
|
||||
* process need to be interrupted by another process and another
|
||||
* process needs to update the last_seq btw the atomic read and
|
||||
* xchg of the current process.
|
||||
*
|
||||
* More over for this to go in infinite loop there need to be
|
||||
* continuously new fence signaled ie amdgpu_fence_read needs
|
||||
* to return a different value each time for both the currently
|
||||
* polling process and the other process that xchg the last_seq
|
||||
* btw atomic read and xchg of the current process. And the
|
||||
* value the other process set as last seq must be higher than
|
||||
* the seq value we just read. Which means that current process
|
||||
* need to be interrupted after amdgpu_fence_read and before
|
||||
* atomic xchg.
|
||||
*
|
||||
* To be even more safe we count the number of time we loop and
|
||||
* we bail after 10 loop just accepting the fact that we might
|
||||
* have temporarly set the last_seq not to the true real last
|
||||
* seq but to an older one.
|
||||
*/
|
||||
last_seq = atomic64_read(&ring->fence_drv.last_seq);
|
||||
do {
|
||||
last_emitted = ring->fence_drv.sync_seq;
|
||||
seq = amdgpu_fence_read(ring);
|
||||
seq |= last_seq & 0xffffffff00000000LL;
|
||||
if (seq < last_seq) {
|
||||
seq &= 0xffffffff;
|
||||
seq |= last_emitted & 0xffffffff00000000LL;
|
||||
}
|
||||
|
||||
if (seq <= last_seq || seq > last_emitted) {
|
||||
break;
|
||||
}
|
||||
/* If we loop over we don't want to return without
|
||||
* checking if a fence is signaled as it means that the
|
||||
* seq we just read is different from the previous on.
|
||||
*/
|
||||
wake = true;
|
||||
last_seq = seq;
|
||||
if ((count_loop++) > 10) {
|
||||
/* We looped over too many time leave with the
|
||||
* fact that we might have set an older fence
|
||||
* seq then the current real last seq as signaled
|
||||
* by the hw.
|
||||
*/
|
||||
break;
|
||||
}
|
||||
} while (atomic64_xchg(&ring->fence_drv.last_seq, seq) > seq);
|
||||
|
||||
if (seq < last_emitted)
|
||||
amdgpu_fence_schedule_fallback(ring);
|
||||
|
||||
return wake;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_fence_process - process a fence
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
* @ring: ring index the fence is associated with
|
||||
*
|
||||
* Checks the current fence value and wakes the fence queue
|
||||
* if the sequence number has increased (all asics).
|
||||
* signalled fence value. Wakes the fence queue if the
|
||||
* sequence number has increased.
|
||||
*/
|
||||
void amdgpu_fence_process(struct amdgpu_ring *ring)
|
||||
{
|
||||
if (amdgpu_fence_activity(ring))
|
||||
wake_up_all(&ring->fence_drv.fence_queue);
|
||||
struct amdgpu_fence_driver *drv = &ring->fence_drv;
|
||||
uint32_t seq, last_seq;
|
||||
int r;
|
||||
|
||||
do {
|
||||
last_seq = atomic_read(&ring->fence_drv.last_seq);
|
||||
seq = amdgpu_fence_read(ring);
|
||||
|
||||
} while (atomic_cmpxchg(&drv->last_seq, last_seq, seq) != last_seq);
|
||||
|
||||
if (seq != ring->fence_drv.sync_seq)
|
||||
amdgpu_fence_schedule_fallback(ring);
|
||||
|
||||
while (last_seq != seq) {
|
||||
struct fence *fence, **ptr;
|
||||
|
||||
ptr = &drv->fences[++last_seq & drv->num_fences_mask];
|
||||
|
||||
/* There is always exactly one thread signaling this fence slot */
|
||||
fence = rcu_dereference_protected(*ptr, 1);
|
||||
rcu_assign_pointer(*ptr, NULL);
|
||||
|
||||
BUG_ON(!fence);
|
||||
|
||||
r = fence_signal(fence);
|
||||
if (!r)
|
||||
FENCE_TRACE(fence, "signaled from irq context\n");
|
||||
else
|
||||
BUG();
|
||||
|
||||
fence_put(fence);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -230,77 +222,6 @@ static void amdgpu_fence_fallback(unsigned long arg)
|
|||
amdgpu_fence_process(ring);
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_fence_seq_signaled - check if a fence sequence number has signaled
|
||||
*
|
||||
* @ring: ring the fence is associated with
|
||||
* @seq: sequence number
|
||||
*
|
||||
* Check if the last signaled fence sequnce number is >= the requested
|
||||
* sequence number (all asics).
|
||||
* Returns true if the fence has signaled (current fence value
|
||||
* is >= requested value) or false if it has not (current fence
|
||||
* value is < the requested value. Helper function for
|
||||
* amdgpu_fence_signaled().
|
||||
*/
|
||||
static bool amdgpu_fence_seq_signaled(struct amdgpu_ring *ring, u64 seq)
|
||||
{
|
||||
if (atomic64_read(&ring->fence_drv.last_seq) >= seq)
|
||||
return true;
|
||||
|
||||
/* poll new last sequence at least once */
|
||||
amdgpu_fence_process(ring);
|
||||
if (atomic64_read(&ring->fence_drv.last_seq) >= seq)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* amdgpu_ring_wait_seq - wait for seq of the specific ring to signal
|
||||
* @ring: ring to wait on for the seq number
|
||||
* @seq: seq number wait for
|
||||
*
|
||||
* return value:
|
||||
* 0: seq signaled, and gpu not hang
|
||||
* -EINVAL: some paramter is not valid
|
||||
*/
|
||||
static int amdgpu_fence_ring_wait_seq(struct amdgpu_ring *ring, uint64_t seq)
|
||||
{
|
||||
BUG_ON(!ring);
|
||||
if (seq > ring->fence_drv.sync_seq)
|
||||
return -EINVAL;
|
||||
|
||||
if (atomic64_read(&ring->fence_drv.last_seq) >= seq)
|
||||
return 0;
|
||||
|
||||
amdgpu_fence_schedule_fallback(ring);
|
||||
wait_event(ring->fence_drv.fence_queue,
|
||||
amdgpu_fence_seq_signaled(ring, seq));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_fence_wait_next - wait for the next fence to signal
|
||||
*
|
||||
* @adev: amdgpu device pointer
|
||||
* @ring: ring index the fence is associated with
|
||||
*
|
||||
* Wait for the next fence on the requested ring to signal (all asics).
|
||||
* Returns 0 if the next fence has passed, error for all other cases.
|
||||
* Caller must hold ring lock.
|
||||
*/
|
||||
int amdgpu_fence_wait_next(struct amdgpu_ring *ring)
|
||||
{
|
||||
uint64_t seq = atomic64_read(&ring->fence_drv.last_seq) + 1ULL;
|
||||
|
||||
if (seq >= ring->fence_drv.sync_seq)
|
||||
return -ENOENT;
|
||||
|
||||
return amdgpu_fence_ring_wait_seq(ring, seq);
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_fence_wait_empty - wait for all fences to signal
|
||||
*
|
||||
|
@ -309,16 +230,28 @@ int amdgpu_fence_wait_next(struct amdgpu_ring *ring)
|
|||
*
|
||||
* Wait for all fences on the requested ring to signal (all asics).
|
||||
* Returns 0 if the fences have passed, error for all other cases.
|
||||
* Caller must hold ring lock.
|
||||
*/
|
||||
int amdgpu_fence_wait_empty(struct amdgpu_ring *ring)
|
||||
{
|
||||
uint64_t seq = ring->fence_drv.sync_seq;
|
||||
uint64_t seq = ACCESS_ONCE(ring->fence_drv.sync_seq);
|
||||
struct fence *fence, **ptr;
|
||||
int r;
|
||||
|
||||
if (!seq)
|
||||
return 0;
|
||||
|
||||
return amdgpu_fence_ring_wait_seq(ring, seq);
|
||||
ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
|
||||
rcu_read_lock();
|
||||
fence = rcu_dereference(*ptr);
|
||||
if (!fence || !fence_get_rcu(fence)) {
|
||||
rcu_read_unlock();
|
||||
return 0;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
r = fence_wait(fence, false);
|
||||
fence_put(fence);
|
||||
return r;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -338,13 +271,10 @@ unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring)
|
|||
* but it's ok to report slightly wrong fence count here.
|
||||
*/
|
||||
amdgpu_fence_process(ring);
|
||||
emitted = ring->fence_drv.sync_seq
|
||||
- atomic64_read(&ring->fence_drv.last_seq);
|
||||
/* to avoid 32bits warp around */
|
||||
if (emitted > 0x10000000)
|
||||
emitted = 0x10000000;
|
||||
|
||||
return (unsigned)emitted;
|
||||
emitted = 0x100000000ull;
|
||||
emitted -= atomic_read(&ring->fence_drv.last_seq);
|
||||
emitted += ACCESS_ONCE(ring->fence_drv.sync_seq);
|
||||
return lower_32_bits(emitted);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -376,7 +306,7 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
|
|||
ring->fence_drv.cpu_addr = adev->uvd.cpu_addr + index;
|
||||
ring->fence_drv.gpu_addr = adev->uvd.gpu_addr + index;
|
||||
}
|
||||
amdgpu_fence_write(ring, atomic64_read(&ring->fence_drv.last_seq));
|
||||
amdgpu_fence_write(ring, atomic_read(&ring->fence_drv.last_seq));
|
||||
amdgpu_irq_get(adev, irq_src, irq_type);
|
||||
|
||||
ring->fence_drv.irq_src = irq_src;
|
||||
|
@ -394,25 +324,36 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
|
|||
* for the requested ring.
|
||||
*
|
||||
* @ring: ring to init the fence driver on
|
||||
* @num_hw_submission: number of entries on the hardware queue
|
||||
*
|
||||
* Init the fence driver for the requested ring (all asics).
|
||||
* Helper function for amdgpu_fence_driver_init().
|
||||
*/
|
||||
int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring)
|
||||
int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
|
||||
unsigned num_hw_submission)
|
||||
{
|
||||
long timeout;
|
||||
int r;
|
||||
|
||||
/* Check that num_hw_submission is a power of two */
|
||||
if ((num_hw_submission & (num_hw_submission - 1)) != 0)
|
||||
return -EINVAL;
|
||||
|
||||
ring->fence_drv.cpu_addr = NULL;
|
||||
ring->fence_drv.gpu_addr = 0;
|
||||
ring->fence_drv.sync_seq = 0;
|
||||
atomic64_set(&ring->fence_drv.last_seq, 0);
|
||||
atomic_set(&ring->fence_drv.last_seq, 0);
|
||||
ring->fence_drv.initialized = false;
|
||||
|
||||
setup_timer(&ring->fence_drv.fallback_timer, amdgpu_fence_fallback,
|
||||
(unsigned long)ring);
|
||||
|
||||
init_waitqueue_head(&ring->fence_drv.fence_queue);
|
||||
ring->fence_drv.num_fences_mask = num_hw_submission - 1;
|
||||
spin_lock_init(&ring->fence_drv.lock);
|
||||
ring->fence_drv.fences = kcalloc(num_hw_submission, sizeof(void *),
|
||||
GFP_KERNEL);
|
||||
if (!ring->fence_drv.fences)
|
||||
return -ENOMEM;
|
||||
|
||||
timeout = msecs_to_jiffies(amdgpu_lockup_timeout);
|
||||
if (timeout == 0) {
|
||||
|
@ -426,7 +367,7 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring)
|
|||
timeout = MAX_SCHEDULE_TIMEOUT;
|
||||
}
|
||||
r = amd_sched_init(&ring->sched, &amdgpu_sched_ops,
|
||||
amdgpu_sched_hw_submission,
|
||||
num_hw_submission,
|
||||
timeout, ring->name);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to create scheduler on ring %s.\n",
|
||||
|
@ -474,10 +415,9 @@ int amdgpu_fence_driver_init(struct amdgpu_device *adev)
|
|||
*/
|
||||
void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
|
||||
{
|
||||
int i, r;
|
||||
unsigned i, j;
|
||||
int r;
|
||||
|
||||
if (atomic_dec_and_test(&amdgpu_fence_slab_ref))
|
||||
kmem_cache_destroy(amdgpu_fence_slab);
|
||||
for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
|
||||
struct amdgpu_ring *ring = adev->rings[i];
|
||||
|
||||
|
@ -488,13 +428,18 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
|
|||
/* no need to trigger GPU reset as we are unloading */
|
||||
amdgpu_fence_driver_force_completion(adev);
|
||||
}
|
||||
wake_up_all(&ring->fence_drv.fence_queue);
|
||||
amdgpu_irq_put(adev, ring->fence_drv.irq_src,
|
||||
ring->fence_drv.irq_type);
|
||||
amd_sched_fini(&ring->sched);
|
||||
del_timer_sync(&ring->fence_drv.fallback_timer);
|
||||
for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j)
|
||||
fence_put(ring->fence_drv.fences[i]);
|
||||
kfree(ring->fence_drv.fences);
|
||||
ring->fence_drv.initialized = false;
|
||||
}
|
||||
|
||||
if (atomic_dec_and_test(&amdgpu_fence_slab_ref))
|
||||
kmem_cache_destroy(amdgpu_fence_slab);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -590,66 +535,6 @@ static const char *amdgpu_fence_get_timeline_name(struct fence *f)
|
|||
return (const char *)fence->ring->name;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_fence_is_signaled - test if fence is signaled
|
||||
*
|
||||
* @f: fence to test
|
||||
*
|
||||
* Test the fence sequence number if it is already signaled. If it isn't
|
||||
* signaled start fence processing. Returns True if the fence is signaled.
|
||||
*/
|
||||
static bool amdgpu_fence_is_signaled(struct fence *f)
|
||||
{
|
||||
struct amdgpu_fence *fence = to_amdgpu_fence(f);
|
||||
struct amdgpu_ring *ring = fence->ring;
|
||||
|
||||
if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq)
|
||||
return true;
|
||||
|
||||
amdgpu_fence_process(ring);
|
||||
|
||||
if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_fence_check_signaled - callback from fence_queue
|
||||
*
|
||||
* this function is called with fence_queue lock held, which is also used
|
||||
* for the fence locking itself, so unlocked variants are used for
|
||||
* fence_signal, and remove_wait_queue.
|
||||
*/
|
||||
static int amdgpu_fence_check_signaled(wait_queue_t *wait, unsigned mode, int flags, void *key)
|
||||
{
|
||||
struct amdgpu_fence *fence;
|
||||
struct amdgpu_device *adev;
|
||||
u64 seq;
|
||||
int ret;
|
||||
|
||||
fence = container_of(wait, struct amdgpu_fence, fence_wake);
|
||||
adev = fence->ring->adev;
|
||||
|
||||
/*
|
||||
* We cannot use amdgpu_fence_process here because we're already
|
||||
* in the waitqueue, in a call from wake_up_all.
|
||||
*/
|
||||
seq = atomic64_read(&fence->ring->fence_drv.last_seq);
|
||||
if (seq >= fence->seq) {
|
||||
ret = fence_signal_locked(&fence->base);
|
||||
if (!ret)
|
||||
FENCE_TRACE(&fence->base, "signaled from irq context\n");
|
||||
else
|
||||
FENCE_TRACE(&fence->base, "was already signaled\n");
|
||||
|
||||
__remove_wait_queue(&fence->ring->fence_drv.fence_queue, &fence->fence_wake);
|
||||
fence_put(&fence->base);
|
||||
} else
|
||||
FENCE_TRACE(&fence->base, "pending\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_fence_enable_signaling - enable signalling on fence
|
||||
* @fence: fence
|
||||
|
@ -663,31 +548,45 @@ static bool amdgpu_fence_enable_signaling(struct fence *f)
|
|||
struct amdgpu_fence *fence = to_amdgpu_fence(f);
|
||||
struct amdgpu_ring *ring = fence->ring;
|
||||
|
||||
if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq)
|
||||
return false;
|
||||
|
||||
fence->fence_wake.flags = 0;
|
||||
fence->fence_wake.private = NULL;
|
||||
fence->fence_wake.func = amdgpu_fence_check_signaled;
|
||||
__add_wait_queue(&ring->fence_drv.fence_queue, &fence->fence_wake);
|
||||
fence_get(f);
|
||||
if (!timer_pending(&ring->fence_drv.fallback_timer))
|
||||
amdgpu_fence_schedule_fallback(ring);
|
||||
|
||||
FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void amdgpu_fence_release(struct fence *f)
|
||||
/**
|
||||
* amdgpu_fence_free - free up the fence memory
|
||||
*
|
||||
* @rcu: RCU callback head
|
||||
*
|
||||
* Free up the fence memory after the RCU grace period.
|
||||
*/
|
||||
static void amdgpu_fence_free(struct rcu_head *rcu)
|
||||
{
|
||||
struct fence *f = container_of(rcu, struct fence, rcu);
|
||||
struct amdgpu_fence *fence = to_amdgpu_fence(f);
|
||||
kmem_cache_free(amdgpu_fence_slab, fence);
|
||||
}
|
||||
|
||||
const struct fence_ops amdgpu_fence_ops = {
|
||||
/**
|
||||
* amdgpu_fence_release - callback that fence can be freed
|
||||
*
|
||||
* @fence: fence
|
||||
*
|
||||
* This function is called when the reference count becomes zero.
|
||||
* It just RCU schedules freeing up the fence.
|
||||
*/
|
||||
static void amdgpu_fence_release(struct fence *f)
|
||||
{
|
||||
call_rcu(&f->rcu, amdgpu_fence_free);
|
||||
}
|
||||
|
||||
static const struct fence_ops amdgpu_fence_ops = {
|
||||
.get_driver_name = amdgpu_fence_get_driver_name,
|
||||
.get_timeline_name = amdgpu_fence_get_timeline_name,
|
||||
.enable_signaling = amdgpu_fence_enable_signaling,
|
||||
.signaled = amdgpu_fence_is_signaled,
|
||||
.wait = fence_default_wait,
|
||||
.release = amdgpu_fence_release,
|
||||
};
|
||||
|
@ -711,9 +610,9 @@ static int amdgpu_debugfs_fence_info(struct seq_file *m, void *data)
|
|||
amdgpu_fence_process(ring);
|
||||
|
||||
seq_printf(m, "--- ring %d (%s) ---\n", i, ring->name);
|
||||
seq_printf(m, "Last signaled fence 0x%016llx\n",
|
||||
(unsigned long long)atomic64_read(&ring->fence_drv.last_seq));
|
||||
seq_printf(m, "Last emitted 0x%016llx\n",
|
||||
seq_printf(m, "Last signaled fence 0x%08x\n",
|
||||
atomic_read(&ring->fence_drv.last_seq));
|
||||
seq_printf(m, "Last emitted 0x%08x\n",
|
||||
ring->fence_drv.sync_seq);
|
||||
}
|
||||
return 0;
|
||||
|
|
|
@ -161,7 +161,7 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
|
|||
|
||||
amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);
|
||||
|
||||
r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
|
||||
r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates);
|
||||
if (r) {
|
||||
dev_err(adev->dev, "leaking bo va because "
|
||||
"we fail to reserve bo (%d)\n", r);
|
||||
|
@ -258,12 +258,10 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
|
|||
AMDGPU_GEM_USERPTR_REGISTER))
|
||||
return -EINVAL;
|
||||
|
||||
if (!(args->flags & AMDGPU_GEM_USERPTR_READONLY) && (
|
||||
!(args->flags & AMDGPU_GEM_USERPTR_ANONONLY) ||
|
||||
!(args->flags & AMDGPU_GEM_USERPTR_REGISTER))) {
|
||||
if (!(args->flags & AMDGPU_GEM_USERPTR_READONLY) &&
|
||||
!(args->flags & AMDGPU_GEM_USERPTR_REGISTER)) {
|
||||
|
||||
/* if we want to write to it we must require anonymous
|
||||
memory and install a MMU notifier */
|
||||
/* if we want to write to it we must install a MMU notifier */
|
||||
return -EACCES;
|
||||
}
|
||||
|
||||
|
|
|
@ -85,13 +85,13 @@ int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
|||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
* @ib: IB object to free
|
||||
* @f: the fence SA bo need wait on for the ib alloation
|
||||
*
|
||||
* Free an IB (all asics).
|
||||
*/
|
||||
void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib)
|
||||
void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib, struct fence *f)
|
||||
{
|
||||
amdgpu_sa_bo_free(adev, &ib->sa_bo, ib->fence);
|
||||
fence_put(ib->fence);
|
||||
amdgpu_sa_bo_free(adev, &ib->sa_bo, f);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -123,6 +123,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
|
|||
struct amdgpu_ib *ib = &ibs[0];
|
||||
struct amdgpu_ctx *ctx, *old_ctx;
|
||||
struct amdgpu_vm *vm;
|
||||
struct fence *hwf;
|
||||
unsigned i;
|
||||
int r = 0;
|
||||
|
||||
|
@ -179,7 +180,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
|
|||
amdgpu_ring_emit_hdp_invalidate(ring);
|
||||
}
|
||||
|
||||
r = amdgpu_fence_emit(ring, &ib->fence);
|
||||
r = amdgpu_fence_emit(ring, &hwf);
|
||||
if (r) {
|
||||
dev_err(adev->dev, "failed to emit fence (%d)\n", r);
|
||||
ring->current_ctx = old_ctx;
|
||||
|
@ -198,7 +199,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
|
|||
}
|
||||
|
||||
if (f)
|
||||
*f = fence_get(ib->fence);
|
||||
*f = fence_get(hwf);
|
||||
|
||||
amdgpu_ring_commit(ring);
|
||||
return 0;
|
||||
|
|
|
@ -70,9 +70,13 @@ int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
|
|||
void amdgpu_job_free(struct amdgpu_job *job)
|
||||
{
|
||||
unsigned i;
|
||||
struct fence *f;
|
||||
/* use sched fence if available */
|
||||
f = (job->base.s_fence)? &job->base.s_fence->base : job->fence;
|
||||
|
||||
for (i = 0; i < job->num_ibs; ++i)
|
||||
amdgpu_ib_free(job->adev, &job->ibs[i]);
|
||||
amdgpu_sa_bo_free(job->adev, &job->ibs[i].sa_bo, f);
|
||||
fence_put(job->fence);
|
||||
|
||||
amdgpu_bo_unref(&job->uf.bo);
|
||||
amdgpu_sync_free(&job->sync);
|
||||
|
@ -156,6 +160,7 @@ static struct fence *amdgpu_job_run(struct amd_sched_job *sched_job)
|
|||
}
|
||||
|
||||
err:
|
||||
job->fence = fence;
|
||||
amdgpu_job_free(job);
|
||||
return fence;
|
||||
}
|
||||
|
|
|
@ -308,7 +308,7 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
|
|||
int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
|
||||
{
|
||||
bool is_iomem;
|
||||
int r;
|
||||
long r;
|
||||
|
||||
if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
|
||||
return -EPERM;
|
||||
|
@ -319,14 +319,20 @@ int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
|
|||
}
|
||||
return 0;
|
||||
}
|
||||
r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
|
||||
if (r) {
|
||||
|
||||
r = reservation_object_wait_timeout_rcu(bo->tbo.resv, false, false,
|
||||
MAX_SCHEDULE_TIMEOUT);
|
||||
if (r < 0)
|
||||
return r;
|
||||
}
|
||||
|
||||
r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
|
||||
if (ptr) {
|
||||
if (ptr)
|
||||
*ptr = bo->kptr;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -236,7 +236,8 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
|
|||
ring->adev = adev;
|
||||
ring->idx = adev->num_rings++;
|
||||
adev->rings[ring->idx] = ring;
|
||||
r = amdgpu_fence_driver_init_ring(ring);
|
||||
r = amdgpu_fence_driver_init_ring(ring,
|
||||
amdgpu_sched_hw_submission);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
@ -352,30 +353,6 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring)
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_ring_from_fence - get ring from fence
|
||||
*
|
||||
* @f: fence structure
|
||||
*
|
||||
* Extract the ring a fence belongs to. Handles both scheduler as
|
||||
* well as hardware fences.
|
||||
*/
|
||||
struct amdgpu_ring *amdgpu_ring_from_fence(struct fence *f)
|
||||
{
|
||||
struct amdgpu_fence *a_fence;
|
||||
struct amd_sched_fence *s_fence;
|
||||
|
||||
s_fence = to_amd_sched_fence(f);
|
||||
if (s_fence)
|
||||
return container_of(s_fence->sched, struct amdgpu_ring, sched);
|
||||
|
||||
a_fence = to_amdgpu_fence(f);
|
||||
if (a_fence)
|
||||
return a_fence->ring;
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Debugfs info
|
||||
*/
|
||||
|
|
|
@ -60,9 +60,8 @@ int amdgpu_sa_bo_manager_init(struct amdgpu_device *adev,
|
|||
sa_manager->align = align;
|
||||
sa_manager->hole = &sa_manager->olist;
|
||||
INIT_LIST_HEAD(&sa_manager->olist);
|
||||
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
|
||||
for (i = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i)
|
||||
INIT_LIST_HEAD(&sa_manager->flist[i]);
|
||||
}
|
||||
|
||||
r = amdgpu_bo_create(adev, size, align, true, domain,
|
||||
0, NULL, NULL, &sa_manager->bo);
|
||||
|
@ -228,11 +227,9 @@ static bool amdgpu_sa_event(struct amdgpu_sa_manager *sa_manager,
|
|||
unsigned soffset, eoffset, wasted;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
|
||||
if (!list_empty(&sa_manager->flist[i])) {
|
||||
for (i = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i)
|
||||
if (!list_empty(&sa_manager->flist[i]))
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
soffset = amdgpu_sa_bo_hole_soffset(sa_manager);
|
||||
eoffset = amdgpu_sa_bo_hole_eoffset(sa_manager);
|
||||
|
@ -265,12 +262,11 @@ static bool amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager *sa_manager,
|
|||
/* go over all fence list and try to find the closest sa_bo
|
||||
* of the current last
|
||||
*/
|
||||
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
|
||||
for (i = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i) {
|
||||
struct amdgpu_sa_bo *sa_bo;
|
||||
|
||||
if (list_empty(&sa_manager->flist[i])) {
|
||||
if (list_empty(&sa_manager->flist[i]))
|
||||
continue;
|
||||
}
|
||||
|
||||
sa_bo = list_first_entry(&sa_manager->flist[i],
|
||||
struct amdgpu_sa_bo, flist);
|
||||
|
@ -299,7 +295,9 @@ static bool amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager *sa_manager,
|
|||
}
|
||||
|
||||
if (best_bo) {
|
||||
uint32_t idx = amdgpu_ring_from_fence(best_bo->fence)->idx;
|
||||
uint32_t idx = best_bo->fence->context;
|
||||
|
||||
idx %= AMDGPU_SA_NUM_FENCE_LISTS;
|
||||
++tries[idx];
|
||||
sa_manager->hole = best_bo->olist.prev;
|
||||
|
||||
|
@ -315,8 +313,8 @@ int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
|
|||
struct amdgpu_sa_bo **sa_bo,
|
||||
unsigned size, unsigned align)
|
||||
{
|
||||
struct fence *fences[AMDGPU_MAX_RINGS];
|
||||
unsigned tries[AMDGPU_MAX_RINGS];
|
||||
struct fence *fences[AMDGPU_SA_NUM_FENCE_LISTS];
|
||||
unsigned tries[AMDGPU_SA_NUM_FENCE_LISTS];
|
||||
unsigned count;
|
||||
int i, r;
|
||||
signed long t;
|
||||
|
@ -338,7 +336,7 @@ int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
|
|||
|
||||
spin_lock(&sa_manager->wq.lock);
|
||||
do {
|
||||
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
|
||||
for (i = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i) {
|
||||
fences[i] = NULL;
|
||||
tries[i] = 0;
|
||||
}
|
||||
|
@ -355,7 +353,7 @@ int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
|
|||
/* see if we can skip over some allocations */
|
||||
} while (amdgpu_sa_bo_next_hole(sa_manager, fences, tries));
|
||||
|
||||
for (i = 0, count = 0; i < AMDGPU_MAX_RINGS; ++i)
|
||||
for (i = 0, count = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i)
|
||||
if (fences[i])
|
||||
fences[count++] = fence_get(fences[i]);
|
||||
|
||||
|
@ -397,8 +395,9 @@ void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo,
|
|||
spin_lock(&sa_manager->wq.lock);
|
||||
if (fence && !fence_is_signaled(fence)) {
|
||||
uint32_t idx;
|
||||
|
||||
(*sa_bo)->fence = fence_get(fence);
|
||||
idx = amdgpu_ring_from_fence(fence)->idx;
|
||||
idx = fence->context % AMDGPU_SA_NUM_FENCE_LISTS;
|
||||
list_add_tail(&(*sa_bo)->flist, &sa_manager->flist[idx]);
|
||||
} else {
|
||||
amdgpu_sa_bo_remove_locked(*sa_bo);
|
||||
|
@ -410,25 +409,6 @@ void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo,
|
|||
|
||||
#if defined(CONFIG_DEBUG_FS)
|
||||
|
||||
static void amdgpu_sa_bo_dump_fence(struct fence *fence, struct seq_file *m)
|
||||
{
|
||||
struct amdgpu_fence *a_fence = to_amdgpu_fence(fence);
|
||||
struct amd_sched_fence *s_fence = to_amd_sched_fence(fence);
|
||||
|
||||
if (a_fence)
|
||||
seq_printf(m, " protected by 0x%016llx on ring %d",
|
||||
a_fence->seq, a_fence->ring->idx);
|
||||
|
||||
if (s_fence) {
|
||||
struct amdgpu_ring *ring;
|
||||
|
||||
|
||||
ring = container_of(s_fence->sched, struct amdgpu_ring, sched);
|
||||
seq_printf(m, " protected by 0x%016x on ring %d",
|
||||
s_fence->base.seqno, ring->idx);
|
||||
}
|
||||
}
|
||||
|
||||
void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
|
||||
struct seq_file *m)
|
||||
{
|
||||
|
@ -445,8 +425,11 @@ void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
|
|||
}
|
||||
seq_printf(m, "[0x%010llx 0x%010llx] size %8lld",
|
||||
soffset, eoffset, eoffset - soffset);
|
||||
|
||||
if (i->fence)
|
||||
amdgpu_sa_bo_dump_fence(i->fence, m);
|
||||
seq_printf(m, " protected by 0x%08x on context %d",
|
||||
i->fence->seqno, i->fence->context);
|
||||
|
||||
seq_printf(m, "\n");
|
||||
}
|
||||
spin_unlock(&sa_manager->wq.lock);
|
||||
|
|
|
@ -539,13 +539,6 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
r = reservation_object_wait_timeout_rcu(bo->tbo.resv, true, false,
|
||||
MAX_SCHEDULE_TIMEOUT);
|
||||
if (r < 0) {
|
||||
DRM_ERROR("Failed waiting for UVD message (%ld)!\n", r);
|
||||
return r;
|
||||
}
|
||||
|
||||
r = amdgpu_bo_kmap(bo, &ptr);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed mapping the UVD message (%ld)!\n", r);
|
||||
|
@ -887,6 +880,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
|
|||
|
||||
if (direct) {
|
||||
r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
|
||||
job->fence = f;
|
||||
if (r)
|
||||
goto err_free;
|
||||
|
||||
|
|
|
@ -426,6 +426,7 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
|
|||
ib->ptr[i] = 0x0;
|
||||
|
||||
r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
|
||||
job->fence = f;
|
||||
if (r)
|
||||
goto err;
|
||||
|
||||
|
@ -487,6 +488,7 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
|
|||
|
||||
if (direct) {
|
||||
r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
|
||||
job->fence = f;
|
||||
if (r)
|
||||
goto err;
|
||||
|
||||
|
|
|
@ -669,7 +669,8 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring)
|
|||
|
||||
err1:
|
||||
fence_put(f);
|
||||
amdgpu_ib_free(adev, &ib);
|
||||
amdgpu_ib_free(adev, &ib, NULL);
|
||||
fence_put(f);
|
||||
err0:
|
||||
amdgpu_wb_free(adev, index);
|
||||
return r;
|
||||
|
|
|
@ -2163,7 +2163,8 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring)
|
|||
|
||||
err2:
|
||||
fence_put(f);
|
||||
amdgpu_ib_free(adev, &ib);
|
||||
amdgpu_ib_free(adev, &ib, NULL);
|
||||
fence_put(f);
|
||||
err1:
|
||||
amdgpu_gfx_scratch_free(adev, scratch);
|
||||
return r;
|
||||
|
|
|
@ -732,7 +732,8 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring)
|
|||
}
|
||||
err2:
|
||||
fence_put(f);
|
||||
amdgpu_ib_free(adev, &ib);
|
||||
amdgpu_ib_free(adev, &ib, NULL);
|
||||
fence_put(f);
|
||||
err1:
|
||||
amdgpu_gfx_scratch_free(adev, scratch);
|
||||
return r;
|
||||
|
@ -1289,7 +1290,8 @@ static int gfx_v8_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
|
|||
|
||||
fail:
|
||||
fence_put(f);
|
||||
amdgpu_ib_free(adev, &ib);
|
||||
amdgpu_ib_free(adev, &ib, NULL);
|
||||
fence_put(f);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
|
|
@ -32,8 +32,8 @@
|
|||
#include "oss/oss_2_4_d.h"
|
||||
#include "oss/oss_2_4_sh_mask.h"
|
||||
|
||||
#include "gmc/gmc_8_1_d.h"
|
||||
#include "gmc/gmc_8_1_sh_mask.h"
|
||||
#include "gmc/gmc_7_1_d.h"
|
||||
#include "gmc/gmc_7_1_sh_mask.h"
|
||||
|
||||
#include "gca/gfx_8_0_d.h"
|
||||
#include "gca/gfx_8_0_enum.h"
|
||||
|
@ -727,7 +727,8 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring)
|
|||
|
||||
err1:
|
||||
fence_put(f);
|
||||
amdgpu_ib_free(adev, &ib);
|
||||
amdgpu_ib_free(adev, &ib, NULL);
|
||||
fence_put(f);
|
||||
err0:
|
||||
amdgpu_wb_free(adev, index);
|
||||
return r;
|
||||
|
|
|
@ -878,7 +878,8 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring)
|
|||
}
|
||||
err1:
|
||||
fence_put(f);
|
||||
amdgpu_ib_free(adev, &ib);
|
||||
amdgpu_ib_free(adev, &ib, NULL);
|
||||
fence_put(f);
|
||||
err0:
|
||||
amdgpu_wb_free(adev, index);
|
||||
return r;
|
||||
|
|
|
@ -6225,6 +6225,12 @@ typedef enum TCC_CACHE_POLICIES {
|
|||
TCC_CACHE_POLICY_STREAM = 0x1,
|
||||
TCC_CACHE_POLICY_BYPASS = 0x2,
|
||||
} TCC_CACHE_POLICIES;
|
||||
typedef enum MTYPE {
|
||||
MTYPE_NC_NV = 0x0,
|
||||
MTYPE_NC = 0x1,
|
||||
MTYPE_CC = 0x2,
|
||||
MTYPE_UC = 0x3,
|
||||
} MTYPE;
|
||||
typedef enum PERFMON_COUNTER_MODE {
|
||||
PERFMON_COUNTER_MODE_ACCUM = 0x0,
|
||||
PERFMON_COUNTER_MODE_ACTIVE_CYCLES = 0x1,
|
||||
|
|
|
@ -241,6 +241,11 @@ static int cz_initialize_dpm_defaults(struct pp_hwmgr *hwmgr)
|
|||
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
|
||||
PHM_PlatformCaps_DynamicUVDState);
|
||||
|
||||
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
|
||||
PHM_PlatformCaps_UVDDPM);
|
||||
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
|
||||
PHM_PlatformCaps_VCEDPM);
|
||||
|
||||
cz_hwmgr->cc6_settings.cpu_cc6_disable = false;
|
||||
cz_hwmgr->cc6_settings.cpu_pstate_disable = false;
|
||||
cz_hwmgr->cc6_settings.nb_pstate_switch_disable = false;
|
||||
|
|
|
@ -4275,7 +4275,6 @@ static int fiji_populate_and_upload_sclk_mclk_dpm_levels(
|
|||
if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK) {
|
||||
dpm_table->mclk_table.dpm_levels
|
||||
[dpm_table->mclk_table.count - 1].value = mclk;
|
||||
|
||||
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
|
||||
PHM_PlatformCaps_OD6PlusinACSupport) ||
|
||||
phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
|
||||
|
@ -4886,6 +4885,10 @@ static void fiji_print_current_perforce_level(
|
|||
activity_percent >>= 8;
|
||||
|
||||
seq_printf(m, "\n [GPU load]: %u%%\n\n", activity_percent > 100 ? 100 : activity_percent);
|
||||
|
||||
seq_printf(m, "uvd %sabled\n", data->uvd_power_gated ? "dis" : "en");
|
||||
|
||||
seq_printf(m, "vce %sabled\n", data->vce_power_gated ? "dis" : "en");
|
||||
}
|
||||
|
||||
static int fiji_program_display_gap(struct pp_hwmgr *hwmgr)
|
||||
|
|
|
@ -29,6 +29,7 @@
|
|||
#include "smu73_discrete.h"
|
||||
#include "ppatomctrl.h"
|
||||
#include "fiji_ppsmc.h"
|
||||
#include "pp_endian.h"
|
||||
|
||||
#define FIJI_MAX_HARDWARE_POWERLEVELS 2
|
||||
#define FIJI_AT_DFLT 30
|
||||
|
@ -347,15 +348,4 @@ int fiji_update_samu_dpm(struct pp_hwmgr *hwmgr, bool bgate);
|
|||
int fiji_update_acp_dpm(struct pp_hwmgr *hwmgr, bool bgate);
|
||||
int fiji_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable);
|
||||
|
||||
#define PP_HOST_TO_SMC_UL(X) cpu_to_be32(X)
|
||||
#define PP_SMC_TO_HOST_UL(X) be32_to_cpu(X)
|
||||
|
||||
#define PP_HOST_TO_SMC_US(X) cpu_to_be16(X)
|
||||
#define PP_SMC_TO_HOST_US(X) be16_to_cpu(X)
|
||||
|
||||
#define CONVERT_FROM_HOST_TO_SMC_UL(X) ((X) = PP_HOST_TO_SMC_UL(X))
|
||||
#define CONVERT_FROM_SMC_TO_HOST_UL(X) ((X) = PP_SMC_TO_HOST_UL(X))
|
||||
|
||||
#define CONVERT_FROM_HOST_TO_SMC_US(X) ((X) = PP_HOST_TO_SMC_US(X))
|
||||
|
||||
#endif /* _FIJI_HWMGR_H_ */
|
||||
|
|
|
@ -5185,7 +5185,6 @@ tonga_print_current_perforce_level(struct pp_hwmgr *hwmgr, struct seq_file *m)
|
|||
mclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
|
||||
seq_printf(m, "\n [ mclk ]: %u MHz\n\n [ sclk ]: %u MHz\n", mclk/100, sclk/100);
|
||||
|
||||
|
||||
offset = data->soft_regs_start + offsetof(SMU72_SoftRegisters, AverageGraphicsActivity);
|
||||
activity_percent = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset);
|
||||
activity_percent += 0x80;
|
||||
|
@ -5193,6 +5192,9 @@ tonga_print_current_perforce_level(struct pp_hwmgr *hwmgr, struct seq_file *m)
|
|||
|
||||
seq_printf(m, "\n [GPU load]: %u%%\n\n", activity_percent > 100 ? 100 : activity_percent);
|
||||
|
||||
seq_printf(m, "uvd %sabled\n", data->uvd_power_gated ? "dis" : "en");
|
||||
|
||||
seq_printf(m, "vce %sabled\n", data->vce_power_gated ? "dis" : "en");
|
||||
}
|
||||
|
||||
static int tonga_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, const void *input)
|
||||
|
|
|
@ -28,6 +28,7 @@
|
|||
#include "ppatomctrl.h"
|
||||
#include "ppinterrupt.h"
|
||||
#include "tonga_powertune.h"
|
||||
#include "pp_endian.h"
|
||||
|
||||
#define TONGA_MAX_HARDWARE_POWERLEVELS 2
|
||||
#define TONGA_DYNCLK_NUMBER_OF_TREND_COEFFICIENTS 15
|
||||
|
@ -386,17 +387,6 @@ typedef struct tonga_hwmgr tonga_hwmgr;
|
|||
|
||||
#define TONGA_UNUSED_GPIO_PIN 0x7F
|
||||
|
||||
#define PP_HOST_TO_SMC_UL(X) cpu_to_be32(X)
|
||||
#define PP_SMC_TO_HOST_UL(X) be32_to_cpu(X)
|
||||
|
||||
#define PP_HOST_TO_SMC_US(X) cpu_to_be16(X)
|
||||
#define PP_SMC_TO_HOST_US(X) be16_to_cpu(X)
|
||||
|
||||
#define CONVERT_FROM_HOST_TO_SMC_UL(X) ((X) = PP_HOST_TO_SMC_UL(X))
|
||||
#define CONVERT_FROM_SMC_TO_HOST_UL(X) ((X) = PP_SMC_TO_HOST_UL(X))
|
||||
|
||||
#define CONVERT_FROM_HOST_TO_SMC_US(X) ((X) = PP_HOST_TO_SMC_US(X))
|
||||
|
||||
int tonga_hwmgr_init(struct pp_hwmgr *hwmgr);
|
||||
int tonga_update_vce_dpm(struct pp_hwmgr *hwmgr, const void *input);
|
||||
int tonga_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate);
|
||||
|
|
|
@ -0,0 +1,38 @@
|
|||
/*
|
||||
* Copyright 2016 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef _PP_ENDIAN_H_
|
||||
#define _PP_ENDIAN_H_
|
||||
|
||||
#define PP_HOST_TO_SMC_UL(X) cpu_to_be32(X)
|
||||
#define PP_SMC_TO_HOST_UL(X) be32_to_cpu(X)
|
||||
|
||||
#define PP_HOST_TO_SMC_US(X) cpu_to_be16(X)
|
||||
#define PP_SMC_TO_HOST_US(X) be16_to_cpu(X)
|
||||
|
||||
#define CONVERT_FROM_HOST_TO_SMC_UL(X) ((X) = PP_HOST_TO_SMC_UL(X))
|
||||
#define CONVERT_FROM_SMC_TO_HOST_UL(X) ((X) = PP_SMC_TO_HOST_UL(X))
|
||||
|
||||
#define CONVERT_FROM_HOST_TO_SMC_US(X) ((X) = PP_HOST_TO_SMC_US(X))
|
||||
|
||||
#endif /* _PP_ENDIAN_H_ */
|
|
@ -32,6 +32,27 @@ struct pp_instance;
|
|||
#define smu_lower_32_bits(n) ((uint32_t)(n))
|
||||
#define smu_upper_32_bits(n) ((uint32_t)(((n)>>16)>>16))
|
||||
|
||||
enum AVFS_BTC_STATUS {
|
||||
AVFS_BTC_BOOT = 0,
|
||||
AVFS_BTC_BOOT_STARTEDSMU,
|
||||
AVFS_LOAD_VIRUS,
|
||||
AVFS_BTC_VIRUS_LOADED,
|
||||
AVFS_BTC_VIRUS_FAIL,
|
||||
AVFS_BTC_COMPLETED_PREVIOUSLY,
|
||||
AVFS_BTC_ENABLEAVFS,
|
||||
AVFS_BTC_STARTED,
|
||||
AVFS_BTC_FAILED,
|
||||
AVFS_BTC_RESTOREVFT_FAILED,
|
||||
AVFS_BTC_SAVEVFT_FAILED,
|
||||
AVFS_BTC_DPMTABLESETUP_FAILED,
|
||||
AVFS_BTC_COMPLETED_UNSAVED,
|
||||
AVFS_BTC_COMPLETED_SAVED,
|
||||
AVFS_BTC_COMPLETED_RESTORED,
|
||||
AVFS_BTC_DISABLED,
|
||||
AVFS_BTC_NOTSUPPORTED,
|
||||
AVFS_BTC_SMUMSG_ERROR
|
||||
};
|
||||
|
||||
struct pp_smumgr_func {
|
||||
int (*smu_init)(struct pp_smumgr *smumgr);
|
||||
int (*smu_fini)(struct pp_smumgr *smumgr);
|
||||
|
|
|
@ -23,24 +23,6 @@
|
|||
#ifndef _FIJI_SMUMANAGER_H_
|
||||
#define _FIJI_SMUMANAGER_H_
|
||||
|
||||
enum AVFS_BTC_STATUS {
|
||||
AVFS_BTC_BOOT = 0,
|
||||
AVFS_BTC_BOOT_STARTEDSMU,
|
||||
AVFS_LOAD_VIRUS,
|
||||
AVFS_BTC_VIRUS_LOADED,
|
||||
AVFS_BTC_VIRUS_FAIL,
|
||||
AVFS_BTC_STARTED,
|
||||
AVFS_BTC_FAILED,
|
||||
AVFS_BTC_RESTOREVFT_FAILED,
|
||||
AVFS_BTC_SAVEVFT_FAILED,
|
||||
AVFS_BTC_DPMTABLESETUP_FAILED,
|
||||
AVFS_BTC_COMPLETED_UNSAVED,
|
||||
AVFS_BTC_COMPLETED_SAVED,
|
||||
AVFS_BTC_COMPLETED_RESTORED,
|
||||
AVFS_BTC_DISABLED,
|
||||
AVFS_BTC_NOTSUPPORTED,
|
||||
AVFS_BTC_SMUMSG_ERROR
|
||||
};
|
||||
|
||||
struct fiji_smu_avfs {
|
||||
enum AVFS_BTC_STATUS AvfsBtcStatus;
|
||||
|
|
|
@ -84,12 +84,33 @@ static bool amd_sched_fence_enable_signaling(struct fence *f)
|
|||
return true;
|
||||
}
|
||||
|
||||
static void amd_sched_fence_release(struct fence *f)
|
||||
/**
|
||||
* amd_sched_fence_free - free up the fence memory
|
||||
*
|
||||
* @rcu: RCU callback head
|
||||
*
|
||||
* Free up the fence memory after the RCU grace period.
|
||||
*/
|
||||
static void amd_sched_fence_free(struct rcu_head *rcu)
|
||||
{
|
||||
struct fence *f = container_of(rcu, struct fence, rcu);
|
||||
struct amd_sched_fence *fence = to_amd_sched_fence(f);
|
||||
kmem_cache_free(sched_fence_slab, fence);
|
||||
}
|
||||
|
||||
/**
|
||||
* amd_sched_fence_release - callback that fence can be freed
|
||||
*
|
||||
* @fence: fence
|
||||
*
|
||||
* This function is called when the reference count becomes zero.
|
||||
* It just RCU schedules freeing up the fence.
|
||||
*/
|
||||
static void amd_sched_fence_release(struct fence *f)
|
||||
{
|
||||
call_rcu(&f->rcu, amd_sched_fence_free);
|
||||
}
|
||||
|
||||
const struct fence_ops amd_sched_fence_ops = {
|
||||
.get_driver_name = amd_sched_fence_get_driver_name,
|
||||
.get_timeline_name = amd_sched_fence_get_timeline_name,
|
||||
|
|
|
@ -66,9 +66,10 @@ int atom_debug = 0;
|
|||
static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params);
|
||||
int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params);
|
||||
|
||||
static uint32_t atom_arg_mask[8] =
|
||||
{ 0xFFFFFFFF, 0xFFFF, 0xFFFF00, 0xFFFF0000, 0xFF, 0xFF00, 0xFF0000,
|
||||
0xFF000000 };
|
||||
static uint32_t atom_arg_mask[8] = {
|
||||
0xFFFFFFFF, 0x0000FFFF, 0x00FFFF00, 0xFFFF0000,
|
||||
0x000000FF, 0x0000FF00, 0x00FF0000, 0xFF000000
|
||||
};
|
||||
static int atom_arg_shift[8] = { 0, 0, 8, 16, 0, 8, 16, 24 };
|
||||
|
||||
static int atom_dst_to_src[8][4] = {
|
||||
|
|
|
@ -1665,11 +1665,11 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
|
|||
}
|
||||
|
||||
int atombios_crtc_set_base_atomic(struct drm_crtc *crtc,
|
||||
struct drm_framebuffer *fb,
|
||||
struct drm_framebuffer *fb,
|
||||
int x, int y, enum mode_set_atomic state)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
|
||||
if (ASIC_IS_DCE4(rdev))
|
||||
return dce4_crtc_do_set_base(crtc, fb, x, y, 1);
|
||||
|
|
|
@ -37,10 +37,10 @@
|
|||
#define DP_DPCD_SIZE DP_RECEIVER_CAP_SIZE
|
||||
|
||||
static char *voltage_names[] = {
|
||||
"0.4V", "0.6V", "0.8V", "1.2V"
|
||||
"0.4V", "0.6V", "0.8V", "1.2V"
|
||||
};
|
||||
static char *pre_emph_names[] = {
|
||||
"0dB", "3.5dB", "6dB", "9.5dB"
|
||||
"0dB", "3.5dB", "6dB", "9.5dB"
|
||||
};
|
||||
|
||||
/***** radeon AUX functions *****/
|
||||
|
|
|
@ -1163,12 +1163,11 @@ u32 btc_valid_sclk[40] =
|
|||
155000, 160000, 165000, 170000, 175000, 180000, 185000, 190000, 195000, 200000
|
||||
};
|
||||
|
||||
static const struct radeon_blacklist_clocks btc_blacklist_clocks[] =
|
||||
{
|
||||
{ 10000, 30000, RADEON_SCLK_UP },
|
||||
{ 15000, 30000, RADEON_SCLK_UP },
|
||||
{ 20000, 30000, RADEON_SCLK_UP },
|
||||
{ 25000, 30000, RADEON_SCLK_UP }
|
||||
static const struct radeon_blacklist_clocks btc_blacklist_clocks[] = {
|
||||
{ 10000, 30000, RADEON_SCLK_UP },
|
||||
{ 15000, 30000, RADEON_SCLK_UP },
|
||||
{ 20000, 30000, RADEON_SCLK_UP },
|
||||
{ 25000, 30000, RADEON_SCLK_UP }
|
||||
};
|
||||
|
||||
void btc_get_max_clock_from_voltage_dependency_table(struct radeon_clock_voltage_dependency_table *table,
|
||||
|
@ -1637,14 +1636,14 @@ static int btc_init_smc_table(struct radeon_device *rdev,
|
|||
cypress_populate_smc_voltage_tables(rdev, table);
|
||||
|
||||
switch (rdev->pm.int_thermal_type) {
|
||||
case THERMAL_TYPE_EVERGREEN:
|
||||
case THERMAL_TYPE_EMC2103_WITH_INTERNAL:
|
||||
case THERMAL_TYPE_EVERGREEN:
|
||||
case THERMAL_TYPE_EMC2103_WITH_INTERNAL:
|
||||
table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_INTERNAL;
|
||||
break;
|
||||
case THERMAL_TYPE_NONE:
|
||||
case THERMAL_TYPE_NONE:
|
||||
table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_NONE;
|
||||
break;
|
||||
default:
|
||||
default:
|
||||
table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL;
|
||||
break;
|
||||
}
|
||||
|
@ -1860,37 +1859,37 @@ static bool btc_check_s0_mc_reg_index(u16 in_reg, u16 *out_reg)
|
|||
case MC_SEQ_RAS_TIMING >> 2:
|
||||
*out_reg = MC_SEQ_RAS_TIMING_LP >> 2;
|
||||
break;
|
||||
case MC_SEQ_CAS_TIMING >> 2:
|
||||
case MC_SEQ_CAS_TIMING >> 2:
|
||||
*out_reg = MC_SEQ_CAS_TIMING_LP >> 2;
|
||||
break;
|
||||
case MC_SEQ_MISC_TIMING >> 2:
|
||||
case MC_SEQ_MISC_TIMING >> 2:
|
||||
*out_reg = MC_SEQ_MISC_TIMING_LP >> 2;
|
||||
break;
|
||||
case MC_SEQ_MISC_TIMING2 >> 2:
|
||||
case MC_SEQ_MISC_TIMING2 >> 2:
|
||||
*out_reg = MC_SEQ_MISC_TIMING2_LP >> 2;
|
||||
break;
|
||||
case MC_SEQ_RD_CTL_D0 >> 2:
|
||||
case MC_SEQ_RD_CTL_D0 >> 2:
|
||||
*out_reg = MC_SEQ_RD_CTL_D0_LP >> 2;
|
||||
break;
|
||||
case MC_SEQ_RD_CTL_D1 >> 2:
|
||||
case MC_SEQ_RD_CTL_D1 >> 2:
|
||||
*out_reg = MC_SEQ_RD_CTL_D1_LP >> 2;
|
||||
break;
|
||||
case MC_SEQ_WR_CTL_D0 >> 2:
|
||||
case MC_SEQ_WR_CTL_D0 >> 2:
|
||||
*out_reg = MC_SEQ_WR_CTL_D0_LP >> 2;
|
||||
break;
|
||||
case MC_SEQ_WR_CTL_D1 >> 2:
|
||||
case MC_SEQ_WR_CTL_D1 >> 2:
|
||||
*out_reg = MC_SEQ_WR_CTL_D1_LP >> 2;
|
||||
break;
|
||||
case MC_PMG_CMD_EMRS >> 2:
|
||||
case MC_PMG_CMD_EMRS >> 2:
|
||||
*out_reg = MC_SEQ_PMG_CMD_EMRS_LP >> 2;
|
||||
break;
|
||||
case MC_PMG_CMD_MRS >> 2:
|
||||
case MC_PMG_CMD_MRS >> 2:
|
||||
*out_reg = MC_SEQ_PMG_CMD_MRS_LP >> 2;
|
||||
break;
|
||||
case MC_PMG_CMD_MRS1 >> 2:
|
||||
case MC_PMG_CMD_MRS1 >> 2:
|
||||
*out_reg = MC_SEQ_PMG_CMD_MRS1_LP >> 2;
|
||||
break;
|
||||
default:
|
||||
default:
|
||||
result = false;
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -192,9 +192,9 @@ static void ci_fan_ctrl_set_default_mode(struct radeon_device *rdev);
|
|||
|
||||
static struct ci_power_info *ci_get_pi(struct radeon_device *rdev)
|
||||
{
|
||||
struct ci_power_info *pi = rdev->pm.dpm.priv;
|
||||
struct ci_power_info *pi = rdev->pm.dpm.priv;
|
||||
|
||||
return pi;
|
||||
return pi;
|
||||
}
|
||||
|
||||
static struct ci_ps *ci_get_ps(struct radeon_ps *rps)
|
||||
|
@ -1632,7 +1632,7 @@ static int ci_notify_hw_of_power_source(struct radeon_device *rdev,
|
|||
else
|
||||
power_limit = (u32)(cac_tdp_table->battery_power_limit * 256);
|
||||
|
||||
ci_set_power_limit(rdev, power_limit);
|
||||
ci_set_power_limit(rdev, power_limit);
|
||||
|
||||
if (pi->caps_automatic_dc_transition) {
|
||||
if (ac_power)
|
||||
|
@ -2017,9 +2017,9 @@ static void ci_enable_display_gap(struct radeon_device *rdev)
|
|||
{
|
||||
u32 tmp = RREG32_SMC(CG_DISPLAY_GAP_CNTL);
|
||||
|
||||
tmp &= ~(DISP_GAP_MASK | DISP_GAP_MCHG_MASK);
|
||||
tmp |= (DISP_GAP(R600_PM_DISPLAY_GAP_IGNORE) |
|
||||
DISP_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK));
|
||||
tmp &= ~(DISP_GAP_MASK | DISP_GAP_MCHG_MASK);
|
||||
tmp |= (DISP_GAP(R600_PM_DISPLAY_GAP_IGNORE) |
|
||||
DISP_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK));
|
||||
|
||||
WREG32_SMC(CG_DISPLAY_GAP_CNTL, tmp);
|
||||
}
|
||||
|
@ -2938,8 +2938,8 @@ static int ci_populate_single_memory_level(struct radeon_device *rdev,
|
|||
|
||||
memory_level->MinVddc = cpu_to_be32(memory_level->MinVddc * VOLTAGE_SCALE);
|
||||
memory_level->MinVddcPhases = cpu_to_be32(memory_level->MinVddcPhases);
|
||||
memory_level->MinVddci = cpu_to_be32(memory_level->MinVddci * VOLTAGE_SCALE);
|
||||
memory_level->MinMvdd = cpu_to_be32(memory_level->MinMvdd * VOLTAGE_SCALE);
|
||||
memory_level->MinVddci = cpu_to_be32(memory_level->MinVddci * VOLTAGE_SCALE);
|
||||
memory_level->MinMvdd = cpu_to_be32(memory_level->MinMvdd * VOLTAGE_SCALE);
|
||||
|
||||
memory_level->MclkFrequency = cpu_to_be32(memory_level->MclkFrequency);
|
||||
memory_level->ActivityLevel = cpu_to_be16(memory_level->ActivityLevel);
|
||||
|
@ -3152,7 +3152,7 @@ static int ci_calculate_sclk_params(struct radeon_device *rdev,
|
|||
|
||||
spll_func_cntl_3 &= ~SPLL_FB_DIV_MASK;
|
||||
spll_func_cntl_3 |= SPLL_FB_DIV(fbdiv);
|
||||
spll_func_cntl_3 |= SPLL_DITHEN;
|
||||
spll_func_cntl_3 |= SPLL_DITHEN;
|
||||
|
||||
if (pi->caps_sclk_ss_support) {
|
||||
struct radeon_atom_ss ss;
|
||||
|
@ -3229,7 +3229,7 @@ static int ci_populate_single_graphic_level(struct radeon_device *rdev,
|
|||
graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
|
||||
|
||||
graphic_level->Flags = cpu_to_be32(graphic_level->Flags);
|
||||
graphic_level->MinVddc = cpu_to_be32(graphic_level->MinVddc * VOLTAGE_SCALE);
|
||||
graphic_level->MinVddc = cpu_to_be32(graphic_level->MinVddc * VOLTAGE_SCALE);
|
||||
graphic_level->MinVddcPhases = cpu_to_be32(graphic_level->MinVddcPhases);
|
||||
graphic_level->SclkFrequency = cpu_to_be32(graphic_level->SclkFrequency);
|
||||
graphic_level->ActivityLevel = cpu_to_be16(graphic_level->ActivityLevel);
|
||||
|
@ -4393,7 +4393,7 @@ static bool ci_check_s0_mc_reg_index(u16 in_reg, u16 *out_reg)
|
|||
break;
|
||||
case MC_SEQ_CAS_TIMING >> 2:
|
||||
*out_reg = MC_SEQ_CAS_TIMING_LP >> 2;
|
||||
break;
|
||||
break;
|
||||
case MC_SEQ_MISC_TIMING >> 2:
|
||||
*out_reg = MC_SEQ_MISC_TIMING_LP >> 2;
|
||||
break;
|
||||
|
@ -4625,7 +4625,7 @@ static int ci_initialize_mc_reg_table(struct radeon_device *rdev)
|
|||
if (ret)
|
||||
goto init_mc_done;
|
||||
|
||||
ret = ci_copy_vbios_mc_reg_table(table, ci_table);
|
||||
ret = ci_copy_vbios_mc_reg_table(table, ci_table);
|
||||
if (ret)
|
||||
goto init_mc_done;
|
||||
|
||||
|
@ -4916,7 +4916,7 @@ static int ci_set_private_data_variables_based_on_pptable(struct radeon_device *
|
|||
allowed_mclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
|
||||
rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddc =
|
||||
allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
|
||||
rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddci =
|
||||
rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddci =
|
||||
allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
|
||||
|
||||
return 0;
|
||||
|
@ -5517,7 +5517,7 @@ static int ci_parse_power_table(struct radeon_device *rdev)
|
|||
struct _NonClockInfoArray *non_clock_info_array;
|
||||
union power_info *power_info;
|
||||
int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
|
||||
u16 data_offset;
|
||||
u16 data_offset;
|
||||
u8 frev, crev;
|
||||
u8 *power_state_offset;
|
||||
struct ci_ps *ps;
|
||||
|
@ -5693,8 +5693,8 @@ int ci_dpm_init(struct radeon_device *rdev)
|
|||
return ret;
|
||||
}
|
||||
|
||||
pi->dll_default_on = false;
|
||||
pi->sram_end = SMC_RAM_END;
|
||||
pi->dll_default_on = false;
|
||||
pi->sram_end = SMC_RAM_END;
|
||||
|
||||
pi->activity_target[0] = CISLAND_TARGETACTIVITY_DFLT;
|
||||
pi->activity_target[1] = CISLAND_TARGETACTIVITY_DFLT;
|
||||
|
@ -5734,9 +5734,9 @@ int ci_dpm_init(struct radeon_device *rdev)
|
|||
pi->caps_uvd_dpm = true;
|
||||
pi->caps_vce_dpm = true;
|
||||
|
||||
ci_get_leakage_voltages(rdev);
|
||||
ci_patch_dependency_tables_with_leakage(rdev);
|
||||
ci_set_private_data_variables_based_on_pptable(rdev);
|
||||
ci_get_leakage_voltages(rdev);
|
||||
ci_patch_dependency_tables_with_leakage(rdev);
|
||||
ci_set_private_data_variables_based_on_pptable(rdev);
|
||||
|
||||
rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries =
|
||||
kzalloc(4 * sizeof(struct radeon_clock_voltage_dependency_entry), GFP_KERNEL);
|
||||
|
@ -5839,7 +5839,7 @@ int ci_dpm_init(struct radeon_device *rdev)
|
|||
pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
|
||||
else
|
||||
rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL;
|
||||
}
|
||||
}
|
||||
|
||||
if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_MVDDCONTROL) {
|
||||
if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT))
|
||||
|
@ -5860,7 +5860,7 @@ int ci_dpm_init(struct radeon_device *rdev)
|
|||
#endif
|
||||
|
||||
if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
|
||||
&frev, &crev, &data_offset)) {
|
||||
&frev, &crev, &data_offset)) {
|
||||
pi->caps_sclk_ss_support = true;
|
||||
pi->caps_mclk_ss_support = true;
|
||||
pi->dynamic_ss = true;
|
||||
|
|
|
@ -194,11 +194,11 @@ PPSMC_Result ci_wait_for_smc_inactive(struct radeon_device *rdev)
|
|||
return PPSMC_Result_OK;
|
||||
|
||||
for (i = 0; i < rdev->usec_timeout; i++) {
|
||||
tmp = RREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0);
|
||||
if ((tmp & CKEN) == 0)
|
||||
tmp = RREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0);
|
||||
if ((tmp & CKEN) == 0)
|
||||
break;
|
||||
udelay(1);
|
||||
}
|
||||
udelay(1);
|
||||
}
|
||||
|
||||
return PPSMC_Result_OK;
|
||||
}
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1620,14 +1620,14 @@ static int cypress_init_smc_table(struct radeon_device *rdev,
|
|||
cypress_populate_smc_voltage_tables(rdev, table);
|
||||
|
||||
switch (rdev->pm.int_thermal_type) {
|
||||
case THERMAL_TYPE_EVERGREEN:
|
||||
case THERMAL_TYPE_EMC2103_WITH_INTERNAL:
|
||||
case THERMAL_TYPE_EVERGREEN:
|
||||
case THERMAL_TYPE_EMC2103_WITH_INTERNAL:
|
||||
table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_INTERNAL;
|
||||
break;
|
||||
case THERMAL_TYPE_NONE:
|
||||
case THERMAL_TYPE_NONE:
|
||||
table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_NONE;
|
||||
break;
|
||||
default:
|
||||
default:
|
||||
table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL;
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -1140,7 +1140,7 @@ static int sumo_set_uvd_clock(struct radeon_device *rdev, u32 clock,
|
|||
int r, i;
|
||||
struct atom_clock_dividers dividers;
|
||||
|
||||
r = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
|
||||
r = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
|
||||
clock, false, ÷rs);
|
||||
if (r)
|
||||
return r;
|
||||
|
|
|
@ -1816,8 +1816,8 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
|
|||
}
|
||||
|
||||
offset = reloc->gpu_offset +
|
||||
(idx_value & 0xfffffff0) +
|
||||
((u64)(tmp & 0xff) << 32);
|
||||
(idx_value & 0xfffffff0) +
|
||||
((u64)(tmp & 0xff) << 32);
|
||||
|
||||
ib[idx + 0] = offset;
|
||||
ib[idx + 1] = (tmp & 0xffffff00) | (upper_32_bits(offset) & 0xff);
|
||||
|
@ -1862,8 +1862,8 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
|
|||
}
|
||||
|
||||
offset = reloc->gpu_offset +
|
||||
idx_value +
|
||||
((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
|
||||
idx_value +
|
||||
((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
|
||||
|
||||
ib[idx+0] = offset;
|
||||
ib[idx+1] = upper_32_bits(offset) & 0xff;
|
||||
|
@ -1897,8 +1897,8 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
|
|||
}
|
||||
|
||||
offset = reloc->gpu_offset +
|
||||
idx_value +
|
||||
((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
|
||||
idx_value +
|
||||
((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
|
||||
|
||||
ib[idx+0] = offset;
|
||||
ib[idx+1] = upper_32_bits(offset) & 0xff;
|
||||
|
@ -1925,8 +1925,8 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
|
|||
}
|
||||
|
||||
offset = reloc->gpu_offset +
|
||||
radeon_get_ib_value(p, idx+1) +
|
||||
((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
|
||||
radeon_get_ib_value(p, idx+1) +
|
||||
((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
|
||||
|
||||
ib[idx+1] = offset;
|
||||
ib[idx+2] = upper_32_bits(offset) & 0xff;
|
||||
|
@ -2098,8 +2098,8 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
|
|||
}
|
||||
|
||||
offset = reloc->gpu_offset +
|
||||
(radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
|
||||
((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
|
||||
(radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
|
||||
((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
|
||||
|
||||
ib[idx+1] = (ib[idx+1] & 0x3) | (offset & 0xfffffffc);
|
||||
ib[idx+2] = upper_32_bits(offset) & 0xff;
|
||||
|
@ -2239,8 +2239,8 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
|
|||
return -EINVAL;
|
||||
}
|
||||
offset = reloc->gpu_offset +
|
||||
(radeon_get_ib_value(p, idx+1) & 0xfffffff8) +
|
||||
((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
|
||||
(radeon_get_ib_value(p, idx+1) & 0xfffffff8) +
|
||||
((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
|
||||
|
||||
ib[idx+1] = offset & 0xfffffff8;
|
||||
ib[idx+2] = upper_32_bits(offset) & 0xff;
|
||||
|
@ -2261,8 +2261,8 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
|
|||
}
|
||||
|
||||
offset = reloc->gpu_offset +
|
||||
(radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
|
||||
((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
|
||||
(radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
|
||||
((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
|
||||
|
||||
ib[idx+1] = offset & 0xfffffffc;
|
||||
ib[idx+2] = (ib[idx+2] & 0xffffff00) | (upper_32_bits(offset) & 0xff);
|
||||
|
@ -2283,8 +2283,8 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
|
|||
}
|
||||
|
||||
offset = reloc->gpu_offset +
|
||||
(radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
|
||||
((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
|
||||
(radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
|
||||
((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
|
||||
|
||||
ib[idx+1] = offset & 0xfffffffc;
|
||||
ib[idx+2] = (ib[idx+2] & 0xffffff00) | (upper_32_bits(offset) & 0xff);
|
||||
|
|
|
@ -206,7 +206,7 @@ void evergreen_hdmi_write_sad_regs(struct drm_encoder *encoder,
|
|||
* build a AVI Info Frame
|
||||
*/
|
||||
void evergreen_set_avi_packet(struct radeon_device *rdev, u32 offset,
|
||||
unsigned char *buffer, size_t size)
|
||||
unsigned char *buffer, size_t size)
|
||||
{
|
||||
uint8_t *frame = buffer + 3;
|
||||
|
||||
|
|
|
@ -2640,7 +2640,7 @@ static int kv_parse_power_table(struct radeon_device *rdev)
|
|||
struct _NonClockInfoArray *non_clock_info_array;
|
||||
union power_info *power_info;
|
||||
int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
|
||||
u16 data_offset;
|
||||
u16 data_offset;
|
||||
u8 frev, crev;
|
||||
u8 *power_state_offset;
|
||||
struct kv_ps *ps;
|
||||
|
@ -2738,7 +2738,7 @@ int kv_dpm_init(struct radeon_device *rdev)
|
|||
for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++)
|
||||
pi->at[i] = TRINITY_AT_DFLT;
|
||||
|
||||
pi->sram_end = SMC_RAM_END;
|
||||
pi->sram_end = SMC_RAM_END;
|
||||
|
||||
/* Enabling nb dpm on an asrock system prevents dpm from working */
|
||||
if (rdev->pdev->subsystem_vendor == 0x1849)
|
||||
|
|
|
@ -1257,7 +1257,7 @@ static void cayman_gpu_init(struct radeon_device *rdev)
|
|||
tmp = RREG32_CG(CG_CGTT_LOCAL_0);
|
||||
tmp &= ~0x00380000;
|
||||
WREG32_CG(CG_CGTT_LOCAL_0, tmp);
|
||||
tmp = RREG32_CG(CG_CGTT_LOCAL_1);
|
||||
tmp = RREG32_CG(CG_CGTT_LOCAL_1);
|
||||
tmp &= ~0x0e000000;
|
||||
WREG32_CG(CG_CGTT_LOCAL_1, tmp);
|
||||
}
|
||||
|
@ -2634,7 +2634,7 @@ int tn_set_vce_clocks(struct radeon_device *rdev, u32 evclk, u32 ecclk)
|
|||
struct atom_clock_dividers dividers;
|
||||
int r, i;
|
||||
|
||||
r = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
|
||||
r = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
|
||||
ecclk, false, ÷rs);
|
||||
if (r)
|
||||
return r;
|
||||
|
|
|
@ -725,9 +725,9 @@ extern int ni_mc_load_microcode(struct radeon_device *rdev);
|
|||
|
||||
struct ni_power_info *ni_get_pi(struct radeon_device *rdev)
|
||||
{
|
||||
struct ni_power_info *pi = rdev->pm.dpm.priv;
|
||||
struct ni_power_info *pi = rdev->pm.dpm.priv;
|
||||
|
||||
return pi;
|
||||
return pi;
|
||||
}
|
||||
|
||||
struct ni_ps *ni_get_ps(struct radeon_ps *rps)
|
||||
|
@ -1096,9 +1096,9 @@ static void ni_stop_smc(struct radeon_device *rdev)
|
|||
|
||||
static int ni_process_firmware_header(struct radeon_device *rdev)
|
||||
{
|
||||
struct rv7xx_power_info *pi = rv770_get_pi(rdev);
|
||||
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
|
||||
struct ni_power_info *ni_pi = ni_get_pi(rdev);
|
||||
struct rv7xx_power_info *pi = rv770_get_pi(rdev);
|
||||
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
|
||||
struct ni_power_info *ni_pi = ni_get_pi(rdev);
|
||||
u32 tmp;
|
||||
int ret;
|
||||
|
||||
|
@ -1202,14 +1202,14 @@ static int ni_enter_ulp_state(struct radeon_device *rdev)
|
|||
struct rv7xx_power_info *pi = rv770_get_pi(rdev);
|
||||
|
||||
if (pi->gfx_clock_gating) {
|
||||
WREG32_P(SCLK_PWRMGT_CNTL, 0, ~DYN_GFX_CLK_OFF_EN);
|
||||
WREG32_P(SCLK_PWRMGT_CNTL, 0, ~DYN_GFX_CLK_OFF_EN);
|
||||
WREG32_P(SCLK_PWRMGT_CNTL, GFX_CLK_FORCE_ON, ~GFX_CLK_FORCE_ON);
|
||||
WREG32_P(SCLK_PWRMGT_CNTL, 0, ~GFX_CLK_FORCE_ON);
|
||||
WREG32_P(SCLK_PWRMGT_CNTL, 0, ~GFX_CLK_FORCE_ON);
|
||||
RREG32(GB_ADDR_CONFIG);
|
||||
}
|
||||
}
|
||||
|
||||
WREG32_P(SMC_MSG, HOST_SMC_MSG(PPSMC_MSG_SwitchToMinimumPower),
|
||||
~HOST_SMC_MSG_MASK);
|
||||
~HOST_SMC_MSG_MASK);
|
||||
|
||||
udelay(25000);
|
||||
|
||||
|
@ -1321,12 +1321,12 @@ static void ni_populate_mvdd_value(struct radeon_device *rdev,
|
|||
u32 mclk,
|
||||
NISLANDS_SMC_VOLTAGE_VALUE *voltage)
|
||||
{
|
||||
struct rv7xx_power_info *pi = rv770_get_pi(rdev);
|
||||
struct rv7xx_power_info *pi = rv770_get_pi(rdev);
|
||||
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
|
||||
|
||||
if (!pi->mvdd_control) {
|
||||
voltage->index = eg_pi->mvdd_high_index;
|
||||
voltage->value = cpu_to_be16(MVDD_HIGH_VALUE);
|
||||
voltage->value = cpu_to_be16(MVDD_HIGH_VALUE);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -1510,47 +1510,47 @@ int ni_copy_and_switch_arb_sets(struct radeon_device *rdev,
|
|||
u32 mc_cg_config;
|
||||
|
||||
switch (arb_freq_src) {
|
||||
case MC_CG_ARB_FREQ_F0:
|
||||
case MC_CG_ARB_FREQ_F0:
|
||||
mc_arb_dram_timing = RREG32(MC_ARB_DRAM_TIMING);
|
||||
mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2);
|
||||
burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE0_MASK) >> STATE0_SHIFT;
|
||||
break;
|
||||
case MC_CG_ARB_FREQ_F1:
|
||||
case MC_CG_ARB_FREQ_F1:
|
||||
mc_arb_dram_timing = RREG32(MC_ARB_DRAM_TIMING_1);
|
||||
mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2_1);
|
||||
burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE1_MASK) >> STATE1_SHIFT;
|
||||
break;
|
||||
case MC_CG_ARB_FREQ_F2:
|
||||
case MC_CG_ARB_FREQ_F2:
|
||||
mc_arb_dram_timing = RREG32(MC_ARB_DRAM_TIMING_2);
|
||||
mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2_2);
|
||||
burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE2_MASK) >> STATE2_SHIFT;
|
||||
break;
|
||||
case MC_CG_ARB_FREQ_F3:
|
||||
case MC_CG_ARB_FREQ_F3:
|
||||
mc_arb_dram_timing = RREG32(MC_ARB_DRAM_TIMING_3);
|
||||
mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2_3);
|
||||
burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE3_MASK) >> STATE3_SHIFT;
|
||||
break;
|
||||
default:
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
switch (arb_freq_dest) {
|
||||
case MC_CG_ARB_FREQ_F0:
|
||||
case MC_CG_ARB_FREQ_F0:
|
||||
WREG32(MC_ARB_DRAM_TIMING, mc_arb_dram_timing);
|
||||
WREG32(MC_ARB_DRAM_TIMING2, mc_arb_dram_timing2);
|
||||
WREG32_P(MC_ARB_BURST_TIME, STATE0(burst_time), ~STATE0_MASK);
|
||||
break;
|
||||
case MC_CG_ARB_FREQ_F1:
|
||||
case MC_CG_ARB_FREQ_F1:
|
||||
WREG32(MC_ARB_DRAM_TIMING_1, mc_arb_dram_timing);
|
||||
WREG32(MC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2);
|
||||
WREG32_P(MC_ARB_BURST_TIME, STATE1(burst_time), ~STATE1_MASK);
|
||||
break;
|
||||
case MC_CG_ARB_FREQ_F2:
|
||||
case MC_CG_ARB_FREQ_F2:
|
||||
WREG32(MC_ARB_DRAM_TIMING_2, mc_arb_dram_timing);
|
||||
WREG32(MC_ARB_DRAM_TIMING2_2, mc_arb_dram_timing2);
|
||||
WREG32_P(MC_ARB_BURST_TIME, STATE2(burst_time), ~STATE2_MASK);
|
||||
break;
|
||||
case MC_CG_ARB_FREQ_F3:
|
||||
case MC_CG_ARB_FREQ_F3:
|
||||
WREG32(MC_ARB_DRAM_TIMING_3, mc_arb_dram_timing);
|
||||
WREG32(MC_ARB_DRAM_TIMING2_3, mc_arb_dram_timing2);
|
||||
WREG32_P(MC_ARB_BURST_TIME, STATE3(burst_time), ~STATE3_MASK);
|
||||
|
@ -1621,9 +1621,7 @@ static int ni_populate_memory_timing_parameters(struct radeon_device *rdev,
|
|||
(u8)rv770_calculate_memory_refresh_rate(rdev, pl->sclk);
|
||||
|
||||
|
||||
radeon_atom_set_engine_dram_timings(rdev,
|
||||
pl->sclk,
|
||||
pl->mclk);
|
||||
radeon_atom_set_engine_dram_timings(rdev, pl->sclk, pl->mclk);
|
||||
|
||||
dram_timing = RREG32(MC_ARB_DRAM_TIMING);
|
||||
dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2);
|
||||
|
@ -1867,9 +1865,9 @@ static int ni_populate_smc_acpi_state(struct radeon_device *rdev,
|
|||
|
||||
mpll_ad_func_cntl_2 |= BIAS_GEN_PDNB | RESET_EN;
|
||||
|
||||
if (pi->mem_gddr5)
|
||||
mpll_dq_func_cntl &= ~PDNB;
|
||||
mpll_dq_func_cntl_2 |= BIAS_GEN_PDNB | RESET_EN | BYPASS;
|
||||
if (pi->mem_gddr5)
|
||||
mpll_dq_func_cntl &= ~PDNB;
|
||||
mpll_dq_func_cntl_2 |= BIAS_GEN_PDNB | RESET_EN | BYPASS;
|
||||
|
||||
|
||||
mclk_pwrmgt_cntl |= (MRDCKA0_RESET |
|
||||
|
@ -1891,15 +1889,15 @@ static int ni_populate_smc_acpi_state(struct radeon_device *rdev,
|
|||
MRDCKD1_PDNB);
|
||||
|
||||
dll_cntl |= (MRDCKA0_BYPASS |
|
||||
MRDCKA1_BYPASS |
|
||||
MRDCKB0_BYPASS |
|
||||
MRDCKB1_BYPASS |
|
||||
MRDCKC0_BYPASS |
|
||||
MRDCKC1_BYPASS |
|
||||
MRDCKD0_BYPASS |
|
||||
MRDCKD1_BYPASS);
|
||||
MRDCKA1_BYPASS |
|
||||
MRDCKB0_BYPASS |
|
||||
MRDCKB1_BYPASS |
|
||||
MRDCKC0_BYPASS |
|
||||
MRDCKC1_BYPASS |
|
||||
MRDCKD0_BYPASS |
|
||||
MRDCKD1_BYPASS);
|
||||
|
||||
spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
|
||||
spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
|
||||
spll_func_cntl_2 |= SCLK_MUX_SEL(4);
|
||||
|
||||
table->ACPIState.levels[0].mclk.vMPLL_AD_FUNC_CNTL = cpu_to_be32(mpll_ad_func_cntl);
|
||||
|
@ -2089,7 +2087,7 @@ static int ni_populate_sclk_value(struct radeon_device *rdev,
|
|||
|
||||
static int ni_init_smc_spll_table(struct radeon_device *rdev)
|
||||
{
|
||||
struct rv7xx_power_info *pi = rv770_get_pi(rdev);
|
||||
struct rv7xx_power_info *pi = rv770_get_pi(rdev);
|
||||
struct ni_power_info *ni_pi = ni_get_pi(rdev);
|
||||
SMC_NISLANDS_SPLL_DIV_TABLE *spll_table;
|
||||
NISLANDS_SMC_SCLK_VALUE sclk_params;
|
||||
|
@ -2311,8 +2309,8 @@ static int ni_convert_power_level_to_smc(struct radeon_device *rdev,
|
|||
NISLANDS_SMC_HW_PERFORMANCE_LEVEL *level)
|
||||
{
|
||||
struct rv7xx_power_info *pi = rv770_get_pi(rdev);
|
||||
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
|
||||
struct ni_power_info *ni_pi = ni_get_pi(rdev);
|
||||
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
|
||||
struct ni_power_info *ni_pi = ni_get_pi(rdev);
|
||||
int ret;
|
||||
bool dll_state_on;
|
||||
u16 std_vddc;
|
||||
|
@ -2391,8 +2389,8 @@ static int ni_populate_smc_t(struct radeon_device *rdev,
|
|||
struct radeon_ps *radeon_state,
|
||||
NISLANDS_SMC_SWSTATE *smc_state)
|
||||
{
|
||||
struct rv7xx_power_info *pi = rv770_get_pi(rdev);
|
||||
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
|
||||
struct rv7xx_power_info *pi = rv770_get_pi(rdev);
|
||||
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
|
||||
struct ni_ps *state = ni_get_ps(radeon_state);
|
||||
u32 a_t;
|
||||
u32 t_l, t_h;
|
||||
|
@ -2451,8 +2449,8 @@ static int ni_populate_power_containment_values(struct radeon_device *rdev,
|
|||
struct radeon_ps *radeon_state,
|
||||
NISLANDS_SMC_SWSTATE *smc_state)
|
||||
{
|
||||
struct rv7xx_power_info *pi = rv770_get_pi(rdev);
|
||||
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
|
||||
struct rv7xx_power_info *pi = rv770_get_pi(rdev);
|
||||
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
|
||||
struct ni_power_info *ni_pi = ni_get_pi(rdev);
|
||||
struct ni_ps *state = ni_get_ps(radeon_state);
|
||||
u32 prev_sclk;
|
||||
|
@ -2595,7 +2593,7 @@ static int ni_enable_power_containment(struct radeon_device *rdev,
|
|||
struct radeon_ps *radeon_new_state,
|
||||
bool enable)
|
||||
{
|
||||
struct ni_power_info *ni_pi = ni_get_pi(rdev);
|
||||
struct ni_power_info *ni_pi = ni_get_pi(rdev);
|
||||
PPSMC_Result smc_result;
|
||||
int ret = 0;
|
||||
|
||||
|
@ -2625,7 +2623,7 @@ static int ni_convert_power_state_to_smc(struct radeon_device *rdev,
|
|||
struct radeon_ps *radeon_state,
|
||||
NISLANDS_SMC_SWSTATE *smc_state)
|
||||
{
|
||||
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
|
||||
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
|
||||
struct ni_power_info *ni_pi = ni_get_pi(rdev);
|
||||
struct ni_ps *state = ni_get_ps(radeon_state);
|
||||
int i, ret;
|
||||
|
@ -2770,46 +2768,46 @@ static bool ni_check_s0_mc_reg_index(u16 in_reg, u16 *out_reg)
|
|||
bool result = true;
|
||||
|
||||
switch (in_reg) {
|
||||
case MC_SEQ_RAS_TIMING >> 2:
|
||||
case MC_SEQ_RAS_TIMING >> 2:
|
||||
*out_reg = MC_SEQ_RAS_TIMING_LP >> 2;
|
||||
break;
|
||||
case MC_SEQ_CAS_TIMING >> 2:
|
||||
case MC_SEQ_CAS_TIMING >> 2:
|
||||
*out_reg = MC_SEQ_CAS_TIMING_LP >> 2;
|
||||
break;
|
||||
case MC_SEQ_MISC_TIMING >> 2:
|
||||
case MC_SEQ_MISC_TIMING >> 2:
|
||||
*out_reg = MC_SEQ_MISC_TIMING_LP >> 2;
|
||||
break;
|
||||
case MC_SEQ_MISC_TIMING2 >> 2:
|
||||
case MC_SEQ_MISC_TIMING2 >> 2:
|
||||
*out_reg = MC_SEQ_MISC_TIMING2_LP >> 2;
|
||||
break;
|
||||
case MC_SEQ_RD_CTL_D0 >> 2:
|
||||
case MC_SEQ_RD_CTL_D0 >> 2:
|
||||
*out_reg = MC_SEQ_RD_CTL_D0_LP >> 2;
|
||||
break;
|
||||
case MC_SEQ_RD_CTL_D1 >> 2:
|
||||
case MC_SEQ_RD_CTL_D1 >> 2:
|
||||
*out_reg = MC_SEQ_RD_CTL_D1_LP >> 2;
|
||||
break;
|
||||
case MC_SEQ_WR_CTL_D0 >> 2:
|
||||
case MC_SEQ_WR_CTL_D0 >> 2:
|
||||
*out_reg = MC_SEQ_WR_CTL_D0_LP >> 2;
|
||||
break;
|
||||
case MC_SEQ_WR_CTL_D1 >> 2:
|
||||
case MC_SEQ_WR_CTL_D1 >> 2:
|
||||
*out_reg = MC_SEQ_WR_CTL_D1_LP >> 2;
|
||||
break;
|
||||
case MC_PMG_CMD_EMRS >> 2:
|
||||
case MC_PMG_CMD_EMRS >> 2:
|
||||
*out_reg = MC_SEQ_PMG_CMD_EMRS_LP >> 2;
|
||||
break;
|
||||
case MC_PMG_CMD_MRS >> 2:
|
||||
case MC_PMG_CMD_MRS >> 2:
|
||||
*out_reg = MC_SEQ_PMG_CMD_MRS_LP >> 2;
|
||||
break;
|
||||
case MC_PMG_CMD_MRS1 >> 2:
|
||||
case MC_PMG_CMD_MRS1 >> 2:
|
||||
*out_reg = MC_SEQ_PMG_CMD_MRS1_LP >> 2;
|
||||
break;
|
||||
case MC_SEQ_PMG_TIMING >> 2:
|
||||
case MC_SEQ_PMG_TIMING >> 2:
|
||||
*out_reg = MC_SEQ_PMG_TIMING_LP >> 2;
|
||||
break;
|
||||
case MC_PMG_CMD_MRS2 >> 2:
|
||||
case MC_PMG_CMD_MRS2 >> 2:
|
||||
*out_reg = MC_SEQ_PMG_CMD_MRS2_LP >> 2;
|
||||
break;
|
||||
default:
|
||||
default:
|
||||
result = false;
|
||||
break;
|
||||
}
|
||||
|
@ -2876,9 +2874,9 @@ static int ni_initialize_mc_reg_table(struct radeon_device *rdev)
|
|||
struct ni_mc_reg_table *ni_table = &ni_pi->mc_reg_table;
|
||||
u8 module_index = rv770_get_memory_module_index(rdev);
|
||||
|
||||
table = kzalloc(sizeof(struct atom_mc_reg_table), GFP_KERNEL);
|
||||
if (!table)
|
||||
return -ENOMEM;
|
||||
table = kzalloc(sizeof(struct atom_mc_reg_table), GFP_KERNEL);
|
||||
if (!table)
|
||||
return -ENOMEM;
|
||||
|
||||
WREG32(MC_SEQ_RAS_TIMING_LP, RREG32(MC_SEQ_RAS_TIMING));
|
||||
WREG32(MC_SEQ_CAS_TIMING_LP, RREG32(MC_SEQ_CAS_TIMING));
|
||||
|
@ -2896,25 +2894,25 @@ static int ni_initialize_mc_reg_table(struct radeon_device *rdev)
|
|||
|
||||
ret = radeon_atom_init_mc_reg_table(rdev, module_index, table);
|
||||
|
||||
if (ret)
|
||||
goto init_mc_done;
|
||||
if (ret)
|
||||
goto init_mc_done;
|
||||
|
||||
ret = ni_copy_vbios_mc_reg_table(table, ni_table);
|
||||
|
||||
if (ret)
|
||||
goto init_mc_done;
|
||||
if (ret)
|
||||
goto init_mc_done;
|
||||
|
||||
ni_set_s0_mc_reg_index(ni_table);
|
||||
|
||||
ret = ni_set_mc_special_registers(rdev, ni_table);
|
||||
|
||||
if (ret)
|
||||
goto init_mc_done;
|
||||
if (ret)
|
||||
goto init_mc_done;
|
||||
|
||||
ni_set_valid_flag(ni_table);
|
||||
|
||||
init_mc_done:
|
||||
kfree(table);
|
||||
kfree(table);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -2994,7 +2992,7 @@ static int ni_populate_mc_reg_table(struct radeon_device *rdev,
|
|||
{
|
||||
struct rv7xx_power_info *pi = rv770_get_pi(rdev);
|
||||
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
|
||||
struct ni_power_info *ni_pi = ni_get_pi(rdev);
|
||||
struct ni_power_info *ni_pi = ni_get_pi(rdev);
|
||||
struct ni_ps *boot_state = ni_get_ps(radeon_boot_state);
|
||||
SMC_NIslands_MCRegisters *mc_reg_table = &ni_pi->smc_mc_reg_table;
|
||||
|
||||
|
@ -3025,7 +3023,7 @@ static int ni_upload_mc_reg_table(struct radeon_device *rdev,
|
|||
{
|
||||
struct rv7xx_power_info *pi = rv770_get_pi(rdev);
|
||||
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
|
||||
struct ni_power_info *ni_pi = ni_get_pi(rdev);
|
||||
struct ni_power_info *ni_pi = ni_get_pi(rdev);
|
||||
struct ni_ps *ni_new_state = ni_get_ps(radeon_new_state);
|
||||
SMC_NIslands_MCRegisters *mc_reg_table = &ni_pi->smc_mc_reg_table;
|
||||
u16 address;
|
||||
|
@ -3142,7 +3140,7 @@ static int ni_initialize_smc_cac_tables(struct radeon_device *rdev)
|
|||
struct ni_power_info *ni_pi = ni_get_pi(rdev);
|
||||
PP_NIslands_CACTABLES *cac_tables = NULL;
|
||||
int i, ret;
|
||||
u32 reg;
|
||||
u32 reg;
|
||||
|
||||
if (ni_pi->enable_cac == false)
|
||||
return 0;
|
||||
|
@ -3422,13 +3420,13 @@ static int ni_pcie_performance_request(struct radeon_device *rdev,
|
|||
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
|
||||
|
||||
if ((perf_req == PCIE_PERF_REQ_PECI_GEN1) ||
|
||||
(perf_req == PCIE_PERF_REQ_PECI_GEN2)) {
|
||||
(perf_req == PCIE_PERF_REQ_PECI_GEN2)) {
|
||||
if (eg_pi->pcie_performance_request_registered == false)
|
||||
radeon_acpi_pcie_notify_device_ready(rdev);
|
||||
eg_pi->pcie_performance_request_registered = true;
|
||||
return radeon_acpi_pcie_performance_request(rdev, perf_req, advertise);
|
||||
} else if ((perf_req == PCIE_PERF_REQ_REMOVE_REGISTRY) &&
|
||||
eg_pi->pcie_performance_request_registered) {
|
||||
eg_pi->pcie_performance_request_registered) {
|
||||
eg_pi->pcie_performance_request_registered = false;
|
||||
return radeon_acpi_pcie_performance_request(rdev, perf_req, advertise);
|
||||
}
|
||||
|
@ -3441,12 +3439,12 @@ static int ni_advertise_gen2_capability(struct radeon_device *rdev)
|
|||
struct rv7xx_power_info *pi = rv770_get_pi(rdev);
|
||||
u32 tmp;
|
||||
|
||||
tmp = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
|
||||
tmp = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
|
||||
|
||||
if ((tmp & LC_OTHER_SIDE_EVER_SENT_GEN2) &&
|
||||
(tmp & LC_OTHER_SIDE_SUPPORTS_GEN2))
|
||||
pi->pcie_gen2 = true;
|
||||
else
|
||||
if ((tmp & LC_OTHER_SIDE_EVER_SENT_GEN2) &&
|
||||
(tmp & LC_OTHER_SIDE_SUPPORTS_GEN2))
|
||||
pi->pcie_gen2 = true;
|
||||
else
|
||||
pi->pcie_gen2 = false;
|
||||
|
||||
if (!pi->pcie_gen2)
|
||||
|
@ -3458,8 +3456,8 @@ static int ni_advertise_gen2_capability(struct radeon_device *rdev)
|
|||
static void ni_enable_bif_dynamic_pcie_gen2(struct radeon_device *rdev,
|
||||
bool enable)
|
||||
{
|
||||
struct rv7xx_power_info *pi = rv770_get_pi(rdev);
|
||||
u32 tmp, bif;
|
||||
struct rv7xx_power_info *pi = rv770_get_pi(rdev);
|
||||
u32 tmp, bif;
|
||||
|
||||
tmp = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
|
||||
|
||||
|
@ -3502,7 +3500,7 @@ static void ni_enable_dynamic_pcie_gen2(struct radeon_device *rdev,
|
|||
if (enable)
|
||||
WREG32_P(GENERAL_PWRMGT, ENABLE_GEN2PCIE, ~ENABLE_GEN2PCIE);
|
||||
else
|
||||
WREG32_P(GENERAL_PWRMGT, 0, ~ENABLE_GEN2PCIE);
|
||||
WREG32_P(GENERAL_PWRMGT, 0, ~ENABLE_GEN2PCIE);
|
||||
}
|
||||
|
||||
void ni_set_uvd_clock_before_set_eng_clock(struct radeon_device *rdev,
|
||||
|
@ -3563,7 +3561,7 @@ void ni_update_current_ps(struct radeon_device *rdev,
|
|||
{
|
||||
struct ni_ps *new_ps = ni_get_ps(rps);
|
||||
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
|
||||
struct ni_power_info *ni_pi = ni_get_pi(rdev);
|
||||
struct ni_power_info *ni_pi = ni_get_pi(rdev);
|
||||
|
||||
eg_pi->current_rps = *rps;
|
||||
ni_pi->current_ps = *new_ps;
|
||||
|
@ -3575,7 +3573,7 @@ void ni_update_requested_ps(struct radeon_device *rdev,
|
|||
{
|
||||
struct ni_ps *new_ps = ni_get_ps(rps);
|
||||
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
|
||||
struct ni_power_info *ni_pi = ni_get_pi(rdev);
|
||||
struct ni_power_info *ni_pi = ni_get_pi(rdev);
|
||||
|
||||
eg_pi->requested_rps = *rps;
|
||||
ni_pi->requested_ps = *new_ps;
|
||||
|
@ -3591,8 +3589,8 @@ int ni_dpm_enable(struct radeon_device *rdev)
|
|||
|
||||
if (pi->gfx_clock_gating)
|
||||
ni_cg_clockgating_default(rdev);
|
||||
if (btc_dpm_enabled(rdev))
|
||||
return -EINVAL;
|
||||
if (btc_dpm_enabled(rdev))
|
||||
return -EINVAL;
|
||||
if (pi->mg_clock_gating)
|
||||
ni_mg_clockgating_default(rdev);
|
||||
if (eg_pi->ls_clock_gating)
|
||||
|
@ -3991,7 +3989,7 @@ static int ni_parse_power_table(struct radeon_device *rdev)
|
|||
union pplib_clock_info *clock_info;
|
||||
union power_info *power_info;
|
||||
int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
|
||||
u16 data_offset;
|
||||
u16 data_offset;
|
||||
u8 frev, crev;
|
||||
struct ni_ps *ps;
|
||||
|
||||
|
|
|
@ -235,8 +235,8 @@ int r600_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
|
|||
fb_div |= 1;
|
||||
|
||||
r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL);
|
||||
if (r)
|
||||
return r;
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
/* assert PLL_RESET */
|
||||
WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_RESET_MASK, ~UPLL_RESET_MASK);
|
||||
|
@ -1490,7 +1490,7 @@ static int r600_mc_init(struct radeon_device *rdev)
|
|||
rdev->fastfb_working = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
radeon_update_bandwidth_info(rdev);
|
||||
|
@ -4574,7 +4574,7 @@ uint64_t r600_get_gpu_clock_counter(struct radeon_device *rdev)
|
|||
mutex_lock(&rdev->gpu_clock_mutex);
|
||||
WREG32(RLC_CAPTURE_GPU_CLOCK_COUNT, 1);
|
||||
clock = (uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_LSB) |
|
||||
((uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
|
||||
((uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
|
||||
mutex_unlock(&rdev->gpu_clock_mutex);
|
||||
return clock;
|
||||
}
|
||||
|
|
|
@ -1671,8 +1671,8 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
|
|||
}
|
||||
|
||||
offset = reloc->gpu_offset +
|
||||
(idx_value & 0xfffffff0) +
|
||||
((u64)(tmp & 0xff) << 32);
|
||||
(idx_value & 0xfffffff0) +
|
||||
((u64)(tmp & 0xff) << 32);
|
||||
|
||||
ib[idx + 0] = offset;
|
||||
ib[idx + 1] = (tmp & 0xffffff00) | (upper_32_bits(offset) & 0xff);
|
||||
|
@ -1712,8 +1712,8 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
|
|||
}
|
||||
|
||||
offset = reloc->gpu_offset +
|
||||
idx_value +
|
||||
((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
|
||||
idx_value +
|
||||
((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
|
||||
|
||||
ib[idx+0] = offset;
|
||||
ib[idx+1] = upper_32_bits(offset) & 0xff;
|
||||
|
@ -1764,8 +1764,8 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
|
|||
}
|
||||
|
||||
offset = reloc->gpu_offset +
|
||||
(radeon_get_ib_value(p, idx+1) & 0xfffffff0) +
|
||||
((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
|
||||
(radeon_get_ib_value(p, idx+1) & 0xfffffff0) +
|
||||
((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
|
||||
|
||||
ib[idx+1] = (ib[idx+1] & 0x3) | (offset & 0xfffffff0);
|
||||
ib[idx+2] = upper_32_bits(offset) & 0xff;
|
||||
|
@ -1876,8 +1876,8 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
|
|||
return -EINVAL;
|
||||
}
|
||||
offset = reloc->gpu_offset +
|
||||
(radeon_get_ib_value(p, idx+1) & 0xfffffff8) +
|
||||
((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
|
||||
(radeon_get_ib_value(p, idx+1) & 0xfffffff8) +
|
||||
((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
|
||||
|
||||
ib[idx+1] = offset & 0xfffffff8;
|
||||
ib[idx+2] = upper_32_bits(offset) & 0xff;
|
||||
|
@ -1898,8 +1898,8 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
|
|||
}
|
||||
|
||||
offset = reloc->gpu_offset +
|
||||
(radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
|
||||
((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
|
||||
(radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
|
||||
((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
|
||||
|
||||
ib[idx+1] = offset & 0xfffffffc;
|
||||
ib[idx+2] = (ib[idx+2] & 0xffffff00) | (upper_32_bits(offset) & 0xff);
|
||||
|
|
|
@ -844,7 +844,7 @@ int r600_get_platform_caps(struct radeon_device *rdev)
|
|||
struct radeon_mode_info *mode_info = &rdev->mode_info;
|
||||
union power_info *power_info;
|
||||
int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
|
||||
u16 data_offset;
|
||||
u16 data_offset;
|
||||
u8 frev, crev;
|
||||
|
||||
if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
|
||||
|
@ -874,7 +874,7 @@ int r600_parse_extended_power_table(struct radeon_device *rdev)
|
|||
union fan_info *fan_info;
|
||||
ATOM_PPLIB_Clock_Voltage_Dependency_Table *dep_table;
|
||||
int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
|
||||
u16 data_offset;
|
||||
u16 data_offset;
|
||||
u8 frev, crev;
|
||||
int ret, i;
|
||||
|
||||
|
@ -1070,7 +1070,7 @@ int r600_parse_extended_power_table(struct radeon_device *rdev)
|
|||
ext_hdr->usVCETableOffset) {
|
||||
VCEClockInfoArray *array = (VCEClockInfoArray *)
|
||||
(mode_info->atom_context->bios + data_offset +
|
||||
le16_to_cpu(ext_hdr->usVCETableOffset) + 1);
|
||||
le16_to_cpu(ext_hdr->usVCETableOffset) + 1);
|
||||
ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *limits =
|
||||
(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *)
|
||||
(mode_info->atom_context->bios + data_offset +
|
||||
|
|
|
@ -215,7 +215,7 @@ void r600_hdmi_update_acr(struct drm_encoder *encoder, long offset,
|
|||
* build a HDMI Video Info Frame
|
||||
*/
|
||||
void r600_set_avi_packet(struct radeon_device *rdev, u32 offset,
|
||||
unsigned char *buffer, size_t size)
|
||||
unsigned char *buffer, size_t size)
|
||||
{
|
||||
uint8_t *frame = buffer + 3;
|
||||
|
||||
|
@ -312,7 +312,7 @@ void r600_hdmi_audio_workaround(struct drm_encoder *encoder)
|
|||
}
|
||||
|
||||
void r600_hdmi_audio_set_dto(struct radeon_device *rdev,
|
||||
struct radeon_crtc *crtc, unsigned int clock)
|
||||
struct radeon_crtc *crtc, unsigned int clock)
|
||||
{
|
||||
struct radeon_encoder *radeon_encoder;
|
||||
struct radeon_encoder_atom_dig *dig;
|
||||
|
|
|
@ -2095,7 +2095,7 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
|
|||
struct radeon_i2c_bus_rec i2c_bus;
|
||||
union power_info *power_info;
|
||||
int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
|
||||
u16 data_offset;
|
||||
u16 data_offset;
|
||||
u8 frev, crev;
|
||||
|
||||
if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
|
||||
|
@ -2575,7 +2575,7 @@ static int radeon_atombios_parse_power_table_4_5(struct radeon_device *rdev)
|
|||
bool valid;
|
||||
union power_info *power_info;
|
||||
int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
|
||||
u16 data_offset;
|
||||
u16 data_offset;
|
||||
u8 frev, crev;
|
||||
|
||||
if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
|
||||
|
@ -2666,7 +2666,7 @@ static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev)
|
|||
bool valid;
|
||||
union power_info *power_info;
|
||||
int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
|
||||
u16 data_offset;
|
||||
u16 data_offset;
|
||||
u8 frev, crev;
|
||||
u8 *power_state_offset;
|
||||
|
||||
|
|
|
@ -1161,9 +1161,9 @@ static void radeon_check_arguments(struct radeon_device *rdev)
|
|||
radeon_vm_size = 4;
|
||||
}
|
||||
|
||||
/*
|
||||
* Max GPUVM size for Cayman, SI and CI are 40 bits.
|
||||
*/
|
||||
/*
|
||||
* Max GPUVM size for Cayman, SI and CI are 40 bits.
|
||||
*/
|
||||
if (radeon_vm_size > 1024) {
|
||||
dev_warn(rdev->dev, "VM size (%d) too large, max is 1TB\n",
|
||||
radeon_vm_size);
|
||||
|
@ -1901,7 +1901,7 @@ int radeon_debugfs_add_files(struct radeon_device *rdev,
|
|||
if (i > RADEON_DEBUGFS_MAX_COMPONENTS) {
|
||||
DRM_ERROR("Reached maximum number of debugfs components.\n");
|
||||
DRM_ERROR("Report so we increase "
|
||||
"RADEON_DEBUGFS_MAX_COMPONENTS.\n");
|
||||
"RADEON_DEBUGFS_MAX_COMPONENTS.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
rdev->debugfs[rdev->debugfs_count].files = files;
|
||||
|
|
|
@ -407,7 +407,7 @@ static void radeon_flip_work_func(struct work_struct *__work)
|
|||
unsigned repcnt = 4;
|
||||
struct drm_vblank_crtc *vblank = &crtc->dev->vblank[work->crtc_id];
|
||||
|
||||
down_read(&rdev->exclusive_lock);
|
||||
down_read(&rdev->exclusive_lock);
|
||||
if (work->fence) {
|
||||
struct radeon_fence *fence;
|
||||
|
||||
|
@ -919,7 +919,7 @@ static void avivo_reduce_ratio(unsigned *nom, unsigned *den,
|
|||
*den /= tmp;
|
||||
|
||||
/* make sure nominator is large enough */
|
||||
if (*nom < nom_min) {
|
||||
if (*nom < nom_min) {
|
||||
tmp = DIV_ROUND_UP(nom_min, *nom);
|
||||
*nom *= tmp;
|
||||
*den *= tmp;
|
||||
|
@ -959,7 +959,7 @@ static void avivo_get_fb_ref_div(unsigned nom, unsigned den, unsigned post_div,
|
|||
*fb_div = DIV_ROUND_CLOSEST(nom * *ref_div * post_div, den);
|
||||
|
||||
/* limit fb divider to its maximum */
|
||||
if (*fb_div > fb_div_max) {
|
||||
if (*fb_div > fb_div_max) {
|
||||
*ref_div = DIV_ROUND_CLOSEST(*ref_div * fb_div_max, *fb_div);
|
||||
*fb_div = fb_div_max;
|
||||
}
|
||||
|
@ -1683,10 +1683,8 @@ int radeon_modeset_init(struct radeon_device *rdev)
|
|||
/* setup afmt */
|
||||
radeon_afmt_init(rdev);
|
||||
|
||||
if (!list_empty(&rdev->ddev->mode_config.connector_list)) {
|
||||
radeon_fbdev_init(rdev);
|
||||
drm_kms_helper_poll_init(rdev->ddev);
|
||||
}
|
||||
radeon_fbdev_init(rdev);
|
||||
drm_kms_helper_poll_init(rdev->ddev);
|
||||
|
||||
/* do pm late init */
|
||||
ret = radeon_pm_late_init(rdev);
|
||||
|
|
|
@ -38,9 +38,9 @@
|
|||
#include <linux/vga_switcheroo.h>
|
||||
|
||||
/* object hierarchy -
|
||||
this contains a helper + a radeon fb
|
||||
the helper contains a pointer to radeon framebuffer baseclass.
|
||||
*/
|
||||
* this contains a helper + a radeon fb
|
||||
* the helper contains a pointer to radeon framebuffer baseclass.
|
||||
*/
|
||||
struct radeon_fbdev {
|
||||
struct drm_fb_helper helper;
|
||||
struct radeon_framebuffer rfb;
|
||||
|
@ -292,7 +292,8 @@ out_unref:
|
|||
|
||||
void radeon_fb_output_poll_changed(struct radeon_device *rdev)
|
||||
{
|
||||
drm_fb_helper_hotplug_event(&rdev->mode_info.rfbdev->helper);
|
||||
if (rdev->mode_info.rfbdev)
|
||||
drm_fb_helper_hotplug_event(&rdev->mode_info.rfbdev->helper);
|
||||
}
|
||||
|
||||
static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfbdev)
|
||||
|
@ -325,6 +326,10 @@ int radeon_fbdev_init(struct radeon_device *rdev)
|
|||
int bpp_sel = 32;
|
||||
int ret;
|
||||
|
||||
/* don't enable fbdev if no connectors */
|
||||
if (list_empty(&rdev->ddev->mode_config.connector_list))
|
||||
return 0;
|
||||
|
||||
/* select 8 bpp console on RN50 or 16MB cards */
|
||||
if (ASIC_IS_RN50(rdev) || rdev->mc.real_vram_size <= (32*1024*1024))
|
||||
bpp_sel = 8;
|
||||
|
@ -377,11 +382,15 @@ void radeon_fbdev_fini(struct radeon_device *rdev)
|
|||
|
||||
void radeon_fbdev_set_suspend(struct radeon_device *rdev, int state)
|
||||
{
|
||||
fb_set_suspend(rdev->mode_info.rfbdev->helper.fbdev, state);
|
||||
if (rdev->mode_info.rfbdev)
|
||||
fb_set_suspend(rdev->mode_info.rfbdev->helper.fbdev, state);
|
||||
}
|
||||
|
||||
bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj)
|
||||
{
|
||||
if (!rdev->mode_info.rfbdev)
|
||||
return false;
|
||||
|
||||
if (robj == gem_to_radeon_bo(rdev->mode_info.rfbdev->rfb.obj))
|
||||
return true;
|
||||
return false;
|
||||
|
@ -389,12 +398,14 @@ bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj)
|
|||
|
||||
void radeon_fb_add_connector(struct radeon_device *rdev, struct drm_connector *connector)
|
||||
{
|
||||
drm_fb_helper_add_one_connector(&rdev->mode_info.rfbdev->helper, connector);
|
||||
if (rdev->mode_info.rfbdev)
|
||||
drm_fb_helper_add_one_connector(&rdev->mode_info.rfbdev->helper, connector);
|
||||
}
|
||||
|
||||
void radeon_fb_remove_connector(struct radeon_device *rdev, struct drm_connector *connector)
|
||||
{
|
||||
drm_fb_helper_remove_one_connector(&rdev->mode_info.rfbdev->helper, connector);
|
||||
if (rdev->mode_info.rfbdev)
|
||||
drm_fb_helper_remove_one_connector(&rdev->mode_info.rfbdev->helper, connector);
|
||||
}
|
||||
|
||||
void radeon_fbdev_restore_mode(struct radeon_device *rdev)
|
||||
|
|
|
@ -274,7 +274,7 @@ int radeon_ib_ring_tests(struct radeon_device *rdev)
|
|||
if (i == RADEON_RING_TYPE_GFX_INDEX) {
|
||||
/* oh, oh, that's really bad */
|
||||
DRM_ERROR("radeon: failed testing IB on GFX ring (%d).\n", r);
|
||||
rdev->accel_working = false;
|
||||
rdev->accel_working = false;
|
||||
return r;
|
||||
|
||||
} else {
|
||||
|
@ -304,7 +304,7 @@ static int radeon_debugfs_sa_info(struct seq_file *m, void *data)
|
|||
}
|
||||
|
||||
static struct drm_info_list radeon_debugfs_sa_list[] = {
|
||||
{"radeon_sa_info", &radeon_debugfs_sa_info, 0, NULL},
|
||||
{"radeon_sa_info", &radeon_debugfs_sa_info, 0, NULL},
|
||||
};
|
||||
|
||||
#endif
|
||||
|
|
|
@ -818,52 +818,52 @@ static void radeon_legacy_tmds_int_mode_set(struct drm_encoder *encoder,
|
|||
tmds_transmitter_cntl = RREG32(RADEON_TMDS_TRANSMITTER_CNTL) &
|
||||
~(RADEON_TMDS_TRANSMITTER_PLLRST);
|
||||
|
||||
if (rdev->family == CHIP_R200 ||
|
||||
rdev->family == CHIP_R100 ||
|
||||
ASIC_IS_R300(rdev))
|
||||
tmds_transmitter_cntl &= ~(RADEON_TMDS_TRANSMITTER_PLLEN);
|
||||
else /* RV chips got this bit reversed */
|
||||
tmds_transmitter_cntl |= RADEON_TMDS_TRANSMITTER_PLLEN;
|
||||
if (rdev->family == CHIP_R200 ||
|
||||
rdev->family == CHIP_R100 ||
|
||||
ASIC_IS_R300(rdev))
|
||||
tmds_transmitter_cntl &= ~(RADEON_TMDS_TRANSMITTER_PLLEN);
|
||||
else /* RV chips got this bit reversed */
|
||||
tmds_transmitter_cntl |= RADEON_TMDS_TRANSMITTER_PLLEN;
|
||||
|
||||
fp_gen_cntl = (RREG32(RADEON_FP_GEN_CNTL) |
|
||||
(RADEON_FP_CRTC_DONT_SHADOW_VPAR |
|
||||
RADEON_FP_CRTC_DONT_SHADOW_HEND));
|
||||
fp_gen_cntl = (RREG32(RADEON_FP_GEN_CNTL) |
|
||||
(RADEON_FP_CRTC_DONT_SHADOW_VPAR |
|
||||
RADEON_FP_CRTC_DONT_SHADOW_HEND));
|
||||
|
||||
fp_gen_cntl &= ~(RADEON_FP_FPON | RADEON_FP_TMDS_EN);
|
||||
fp_gen_cntl &= ~(RADEON_FP_FPON | RADEON_FP_TMDS_EN);
|
||||
|
||||
fp_gen_cntl &= ~(RADEON_FP_RMX_HVSYNC_CONTROL_EN |
|
||||
RADEON_FP_DFP_SYNC_SEL |
|
||||
RADEON_FP_CRT_SYNC_SEL |
|
||||
RADEON_FP_CRTC_LOCK_8DOT |
|
||||
RADEON_FP_USE_SHADOW_EN |
|
||||
RADEON_FP_CRTC_USE_SHADOW_VEND |
|
||||
RADEON_FP_CRT_SYNC_ALT);
|
||||
fp_gen_cntl &= ~(RADEON_FP_RMX_HVSYNC_CONTROL_EN |
|
||||
RADEON_FP_DFP_SYNC_SEL |
|
||||
RADEON_FP_CRT_SYNC_SEL |
|
||||
RADEON_FP_CRTC_LOCK_8DOT |
|
||||
RADEON_FP_USE_SHADOW_EN |
|
||||
RADEON_FP_CRTC_USE_SHADOW_VEND |
|
||||
RADEON_FP_CRT_SYNC_ALT);
|
||||
|
||||
if (1) /* FIXME rgbBits == 8 */
|
||||
fp_gen_cntl |= RADEON_FP_PANEL_FORMAT; /* 24 bit format */
|
||||
else
|
||||
fp_gen_cntl &= ~RADEON_FP_PANEL_FORMAT;/* 18 bit format */
|
||||
if (1) /* FIXME rgbBits == 8 */
|
||||
fp_gen_cntl |= RADEON_FP_PANEL_FORMAT; /* 24 bit format */
|
||||
else
|
||||
fp_gen_cntl &= ~RADEON_FP_PANEL_FORMAT;/* 18 bit format */
|
||||
|
||||
if (radeon_crtc->crtc_id == 0) {
|
||||
if (ASIC_IS_R300(rdev) || rdev->family == CHIP_R200) {
|
||||
fp_gen_cntl &= ~R200_FP_SOURCE_SEL_MASK;
|
||||
if (radeon_encoder->rmx_type != RMX_OFF)
|
||||
fp_gen_cntl |= R200_FP_SOURCE_SEL_RMX;
|
||||
else
|
||||
fp_gen_cntl |= R200_FP_SOURCE_SEL_CRTC1;
|
||||
} else
|
||||
fp_gen_cntl &= ~RADEON_FP_SEL_CRTC2;
|
||||
} else {
|
||||
if (ASIC_IS_R300(rdev) || rdev->family == CHIP_R200) {
|
||||
fp_gen_cntl &= ~R200_FP_SOURCE_SEL_MASK;
|
||||
fp_gen_cntl |= R200_FP_SOURCE_SEL_CRTC2;
|
||||
} else
|
||||
fp_gen_cntl |= RADEON_FP_SEL_CRTC2;
|
||||
}
|
||||
if (radeon_crtc->crtc_id == 0) {
|
||||
if (ASIC_IS_R300(rdev) || rdev->family == CHIP_R200) {
|
||||
fp_gen_cntl &= ~R200_FP_SOURCE_SEL_MASK;
|
||||
if (radeon_encoder->rmx_type != RMX_OFF)
|
||||
fp_gen_cntl |= R200_FP_SOURCE_SEL_RMX;
|
||||
else
|
||||
fp_gen_cntl |= R200_FP_SOURCE_SEL_CRTC1;
|
||||
} else
|
||||
fp_gen_cntl &= ~RADEON_FP_SEL_CRTC2;
|
||||
} else {
|
||||
if (ASIC_IS_R300(rdev) || rdev->family == CHIP_R200) {
|
||||
fp_gen_cntl &= ~R200_FP_SOURCE_SEL_MASK;
|
||||
fp_gen_cntl |= R200_FP_SOURCE_SEL_CRTC2;
|
||||
} else
|
||||
fp_gen_cntl |= RADEON_FP_SEL_CRTC2;
|
||||
}
|
||||
|
||||
WREG32(RADEON_TMDS_PLL_CNTL, tmds_pll_cntl);
|
||||
WREG32(RADEON_TMDS_TRANSMITTER_CNTL, tmds_transmitter_cntl);
|
||||
WREG32(RADEON_FP_GEN_CNTL, fp_gen_cntl);
|
||||
WREG32(RADEON_TMDS_PLL_CNTL, tmds_pll_cntl);
|
||||
WREG32(RADEON_TMDS_TRANSMITTER_CNTL, tmds_transmitter_cntl);
|
||||
WREG32(RADEON_FP_GEN_CNTL, fp_gen_cntl);
|
||||
|
||||
if (rdev->is_atom_bios)
|
||||
radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
|
||||
|
|
|
@ -214,8 +214,8 @@ int radeon_bo_create(struct radeon_device *rdev,
|
|||
INIT_LIST_HEAD(&bo->list);
|
||||
INIT_LIST_HEAD(&bo->va);
|
||||
bo->initial_domain = domain & (RADEON_GEM_DOMAIN_VRAM |
|
||||
RADEON_GEM_DOMAIN_GTT |
|
||||
RADEON_GEM_DOMAIN_CPU);
|
||||
RADEON_GEM_DOMAIN_GTT |
|
||||
RADEON_GEM_DOMAIN_CPU);
|
||||
|
||||
bo->flags = flags;
|
||||
/* PCI GART is always snooped */
|
||||
|
@ -848,7 +848,7 @@ int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, bool no_wait)
|
|||
*
|
||||
*/
|
||||
void radeon_bo_fence(struct radeon_bo *bo, struct radeon_fence *fence,
|
||||
bool shared)
|
||||
bool shared)
|
||||
{
|
||||
struct reservation_object *resv = bo->tbo.resv;
|
||||
|
||||
|
|
|
@ -79,7 +79,7 @@ void radeon_pm_acpi_event_handler(struct radeon_device *rdev)
|
|||
radeon_dpm_enable_bapm(rdev, rdev->pm.dpm.ac_power);
|
||||
}
|
||||
mutex_unlock(&rdev->pm.mutex);
|
||||
} else if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
|
||||
} else if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
|
||||
if (rdev->pm.profile == PM_PROFILE_AUTO) {
|
||||
mutex_lock(&rdev->pm.mutex);
|
||||
radeon_pm_update_profile(rdev);
|
||||
|
|
|
@ -56,7 +56,7 @@ int radeon_semaphore_create(struct radeon_device *rdev,
|
|||
}
|
||||
|
||||
bool radeon_semaphore_emit_signal(struct radeon_device *rdev, int ridx,
|
||||
struct radeon_semaphore *semaphore)
|
||||
struct radeon_semaphore *semaphore)
|
||||
{
|
||||
struct radeon_ring *ring = &rdev->ring[ridx];
|
||||
|
||||
|
@ -73,7 +73,7 @@ bool radeon_semaphore_emit_signal(struct radeon_device *rdev, int ridx,
|
|||
}
|
||||
|
||||
bool radeon_semaphore_emit_wait(struct radeon_device *rdev, int ridx,
|
||||
struct radeon_semaphore *semaphore)
|
||||
struct radeon_semaphore *semaphore)
|
||||
{
|
||||
struct radeon_ring *ring = &rdev->ring[ridx];
|
||||
|
||||
|
|
|
@ -722,9 +722,11 @@ static int radeon_uvd_send_msg(struct radeon_device *rdev,
|
|||
return r;
|
||||
}
|
||||
|
||||
/* multiple fence commands without any stream commands in between can
|
||||
crash the vcpu so just try to emmit a dummy create/destroy msg to
|
||||
avoid this */
|
||||
/*
|
||||
* multiple fence commands without any stream commands in between can
|
||||
* crash the vcpu so just try to emmit a dummy create/destroy msg to
|
||||
* avoid this
|
||||
*/
|
||||
int radeon_uvd_get_create_msg(struct radeon_device *rdev, int ring,
|
||||
uint32_t handle, struct radeon_fence **fence)
|
||||
{
|
||||
|
|
|
@ -166,7 +166,7 @@ int radeon_vce_init(struct radeon_device *rdev)
|
|||
for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) {
|
||||
atomic_set(&rdev->vce.handles[i], 0);
|
||||
rdev->vce.filp[i] = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -389,7 +389,7 @@ int radeon_vce_get_create_msg(struct radeon_device *rdev, int ring,
|
|||
|
||||
r = radeon_ib_schedule(rdev, &ib, NULL, false);
|
||||
if (r) {
|
||||
DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
|
||||
DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
|
||||
}
|
||||
|
||||
if (fence)
|
||||
|
@ -446,7 +446,7 @@ int radeon_vce_get_destroy_msg(struct radeon_device *rdev, int ring,
|
|||
|
||||
r = radeon_ib_schedule(rdev, &ib, NULL, false);
|
||||
if (r) {
|
||||
DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
|
||||
DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
|
||||
}
|
||||
|
||||
if (fence)
|
||||
|
@ -769,18 +769,18 @@ int radeon_vce_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
|
|||
radeon_ring_unlock_commit(rdev, ring, false);
|
||||
|
||||
for (i = 0; i < rdev->usec_timeout; i++) {
|
||||
if (vce_v1_0_get_rptr(rdev, ring) != rptr)
|
||||
break;
|
||||
DRM_UDELAY(1);
|
||||
if (vce_v1_0_get_rptr(rdev, ring) != rptr)
|
||||
break;
|
||||
DRM_UDELAY(1);
|
||||
}
|
||||
|
||||
if (i < rdev->usec_timeout) {
|
||||
DRM_INFO("ring test on %d succeeded in %d usecs\n",
|
||||
ring->idx, i);
|
||||
DRM_INFO("ring test on %d succeeded in %d usecs\n",
|
||||
ring->idx, i);
|
||||
} else {
|
||||
DRM_ERROR("radeon: ring %d test failed\n",
|
||||
ring->idx);
|
||||
r = -ETIMEDOUT;
|
||||
DRM_ERROR("radeon: ring %d test failed\n",
|
||||
ring->idx);
|
||||
r = -ETIMEDOUT;
|
||||
}
|
||||
|
||||
return r;
|
||||
|
|
|
@ -611,15 +611,16 @@ uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr)
|
|||
*/
|
||||
static uint32_t radeon_vm_page_flags(uint32_t flags)
|
||||
{
|
||||
uint32_t hw_flags = 0;
|
||||
hw_flags |= (flags & RADEON_VM_PAGE_VALID) ? R600_PTE_VALID : 0;
|
||||
hw_flags |= (flags & RADEON_VM_PAGE_READABLE) ? R600_PTE_READABLE : 0;
|
||||
hw_flags |= (flags & RADEON_VM_PAGE_WRITEABLE) ? R600_PTE_WRITEABLE : 0;
|
||||
if (flags & RADEON_VM_PAGE_SYSTEM) {
|
||||
hw_flags |= R600_PTE_SYSTEM;
|
||||
hw_flags |= (flags & RADEON_VM_PAGE_SNOOPED) ? R600_PTE_SNOOPED : 0;
|
||||
}
|
||||
return hw_flags;
|
||||
uint32_t hw_flags = 0;
|
||||
|
||||
hw_flags |= (flags & RADEON_VM_PAGE_VALID) ? R600_PTE_VALID : 0;
|
||||
hw_flags |= (flags & RADEON_VM_PAGE_READABLE) ? R600_PTE_READABLE : 0;
|
||||
hw_flags |= (flags & RADEON_VM_PAGE_WRITEABLE) ? R600_PTE_WRITEABLE : 0;
|
||||
if (flags & RADEON_VM_PAGE_SYSTEM) {
|
||||
hw_flags |= R600_PTE_SYSTEM;
|
||||
hw_flags |= (flags & RADEON_VM_PAGE_SNOOPED) ? R600_PTE_SNOOPED : 0;
|
||||
}
|
||||
return hw_flags;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -795,7 +795,7 @@ static int rs780_parse_power_table(struct radeon_device *rdev)
|
|||
union pplib_clock_info *clock_info;
|
||||
union power_info *power_info;
|
||||
int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
|
||||
u16 data_offset;
|
||||
u16 data_offset;
|
||||
u8 frev, crev;
|
||||
struct igp_ps *ps;
|
||||
|
||||
|
|
|
@ -209,7 +209,7 @@ static struct rv6xx_sclk_stepping rv6xx_next_vco_step(struct radeon_device *rdev
|
|||
|
||||
static bool rv6xx_can_step_post_div(struct radeon_device *rdev,
|
||||
struct rv6xx_sclk_stepping *cur,
|
||||
struct rv6xx_sclk_stepping *target)
|
||||
struct rv6xx_sclk_stepping *target)
|
||||
{
|
||||
return (cur->post_divider > target->post_divider) &&
|
||||
((cur->vco_frequency * target->post_divider) <=
|
||||
|
@ -239,7 +239,7 @@ static bool rv6xx_reached_stepping_target(struct radeon_device *rdev,
|
|||
|
||||
static void rv6xx_generate_steps(struct radeon_device *rdev,
|
||||
u32 low, u32 high,
|
||||
u32 start_index, u8 *end_index)
|
||||
u32 start_index, u8 *end_index)
|
||||
{
|
||||
struct rv6xx_sclk_stepping cur;
|
||||
struct rv6xx_sclk_stepping target;
|
||||
|
@ -1356,23 +1356,23 @@ static void rv6xx_set_dpm_event_sources(struct radeon_device *rdev, u32 sources)
|
|||
enum radeon_dpm_event_src dpm_event_src;
|
||||
|
||||
switch (sources) {
|
||||
case 0:
|
||||
default:
|
||||
case 0:
|
||||
default:
|
||||
want_thermal_protection = false;
|
||||
break;
|
||||
case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL):
|
||||
case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL):
|
||||
want_thermal_protection = true;
|
||||
dpm_event_src = RADEON_DPM_EVENT_SRC_DIGITAL;
|
||||
break;
|
||||
|
||||
case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL):
|
||||
case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL):
|
||||
want_thermal_protection = true;
|
||||
dpm_event_src = RADEON_DPM_EVENT_SRC_EXTERNAL;
|
||||
break;
|
||||
|
||||
case ((1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL) |
|
||||
case ((1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL) |
|
||||
(1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL)):
|
||||
want_thermal_protection = true;
|
||||
want_thermal_protection = true;
|
||||
dpm_event_src = RADEON_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL;
|
||||
break;
|
||||
}
|
||||
|
@ -1879,7 +1879,7 @@ static int rv6xx_parse_power_table(struct radeon_device *rdev)
|
|||
union pplib_clock_info *clock_info;
|
||||
union power_info *power_info;
|
||||
int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
|
||||
u16 data_offset;
|
||||
u16 data_offset;
|
||||
u8 frev, crev;
|
||||
struct rv6xx_ps *ps;
|
||||
|
||||
|
|
|
@ -36,28 +36,28 @@ u32 rv740_get_decoded_reference_divider(u32 encoded_ref)
|
|||
u32 ref = 0;
|
||||
|
||||
switch (encoded_ref) {
|
||||
case 0:
|
||||
case 0:
|
||||
ref = 1;
|
||||
break;
|
||||
case 16:
|
||||
case 16:
|
||||
ref = 2;
|
||||
break;
|
||||
case 17:
|
||||
case 17:
|
||||
ref = 3;
|
||||
break;
|
||||
case 18:
|
||||
case 18:
|
||||
ref = 2;
|
||||
break;
|
||||
case 19:
|
||||
case 19:
|
||||
ref = 3;
|
||||
break;
|
||||
case 20:
|
||||
case 20:
|
||||
ref = 4;
|
||||
break;
|
||||
case 21:
|
||||
case 21:
|
||||
ref = 5;
|
||||
break;
|
||||
default:
|
||||
default:
|
||||
DRM_ERROR("Invalid encoded Reference Divider\n");
|
||||
ref = 0;
|
||||
break;
|
||||
|
|
|
@ -345,27 +345,27 @@ static int rv770_encode_yclk_post_div(u32 postdiv, u32 *encoded_postdiv)
|
|||
int ret = 0;
|
||||
|
||||
switch (postdiv) {
|
||||
case 1:
|
||||
case 1:
|
||||
*encoded_postdiv = 0;
|
||||
break;
|
||||
case 2:
|
||||
case 2:
|
||||
*encoded_postdiv = 1;
|
||||
break;
|
||||
case 4:
|
||||
case 4:
|
||||
*encoded_postdiv = 2;
|
||||
break;
|
||||
case 8:
|
||||
case 8:
|
||||
*encoded_postdiv = 3;
|
||||
break;
|
||||
case 16:
|
||||
case 16:
|
||||
*encoded_postdiv = 4;
|
||||
break;
|
||||
default:
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
return ret;
|
||||
return ret;
|
||||
}
|
||||
|
||||
u32 rv770_map_clkf_to_ibias(struct radeon_device *rdev, u32 clkf)
|
||||
|
@ -1175,15 +1175,15 @@ static int rv770_init_smc_table(struct radeon_device *rdev,
|
|||
rv770_populate_smc_mvdd_table(rdev, table);
|
||||
|
||||
switch (rdev->pm.int_thermal_type) {
|
||||
case THERMAL_TYPE_RV770:
|
||||
case THERMAL_TYPE_ADT7473_WITH_INTERNAL:
|
||||
case THERMAL_TYPE_RV770:
|
||||
case THERMAL_TYPE_ADT7473_WITH_INTERNAL:
|
||||
table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_INTERNAL;
|
||||
break;
|
||||
case THERMAL_TYPE_NONE:
|
||||
case THERMAL_TYPE_NONE:
|
||||
table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_NONE;
|
||||
break;
|
||||
case THERMAL_TYPE_EXTERNAL_GPIO:
|
||||
default:
|
||||
case THERMAL_TYPE_EXTERNAL_GPIO:
|
||||
default:
|
||||
table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL;
|
||||
break;
|
||||
}
|
||||
|
@ -1567,18 +1567,18 @@ void rv770_reset_smio_status(struct radeon_device *rdev)
|
|||
sw_smio_index =
|
||||
(RREG32(GENERAL_PWRMGT) & SW_SMIO_INDEX_MASK) >> SW_SMIO_INDEX_SHIFT;
|
||||
switch (sw_smio_index) {
|
||||
case 3:
|
||||
case 3:
|
||||
vid_smio_cntl = RREG32(S3_VID_LOWER_SMIO_CNTL);
|
||||
break;
|
||||
case 2:
|
||||
case 2:
|
||||
vid_smio_cntl = RREG32(S2_VID_LOWER_SMIO_CNTL);
|
||||
break;
|
||||
case 1:
|
||||
case 1:
|
||||
vid_smio_cntl = RREG32(S1_VID_LOWER_SMIO_CNTL);
|
||||
break;
|
||||
case 0:
|
||||
case 0:
|
||||
return;
|
||||
default:
|
||||
default:
|
||||
vid_smio_cntl = pi->s0_vid_lower_smio_cntl;
|
||||
break;
|
||||
}
|
||||
|
@ -1817,21 +1817,21 @@ static void rv770_set_dpm_event_sources(struct radeon_device *rdev, u32 sources)
|
|||
enum radeon_dpm_event_src dpm_event_src;
|
||||
|
||||
switch (sources) {
|
||||
case 0:
|
||||
default:
|
||||
case 0:
|
||||
default:
|
||||
want_thermal_protection = false;
|
||||
break;
|
||||
case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL):
|
||||
case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL):
|
||||
want_thermal_protection = true;
|
||||
dpm_event_src = RADEON_DPM_EVENT_SRC_DIGITAL;
|
||||
break;
|
||||
|
||||
case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL):
|
||||
case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL):
|
||||
want_thermal_protection = true;
|
||||
dpm_event_src = RADEON_DPM_EVENT_SRC_EXTERNAL;
|
||||
break;
|
||||
|
||||
case ((1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL) |
|
||||
case ((1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL) |
|
||||
(1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL)):
|
||||
want_thermal_protection = true;
|
||||
dpm_event_src = RADEON_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL;
|
||||
|
@ -2273,7 +2273,7 @@ int rv7xx_parse_power_table(struct radeon_device *rdev)
|
|||
union pplib_clock_info *clock_info;
|
||||
union power_info *power_info;
|
||||
int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
|
||||
u16 data_offset;
|
||||
u16 data_offset;
|
||||
u8 frev, crev;
|
||||
struct rv7xx_ps *ps;
|
||||
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -499,7 +499,7 @@ static const struct si_cac_config_reg lcac_pitcairn[] =
|
|||
|
||||
static const struct si_cac_config_reg cac_override_pitcairn[] =
|
||||
{
|
||||
{ 0xFFFFFFFF }
|
||||
{ 0xFFFFFFFF }
|
||||
};
|
||||
|
||||
static const struct si_powertune_data powertune_data_pitcairn =
|
||||
|
@ -991,7 +991,7 @@ static const struct si_cac_config_reg lcac_cape_verde[] =
|
|||
|
||||
static const struct si_cac_config_reg cac_override_cape_verde[] =
|
||||
{
|
||||
{ 0xFFFFFFFF }
|
||||
{ 0xFFFFFFFF }
|
||||
};
|
||||
|
||||
static const struct si_powertune_data powertune_data_cape_verde =
|
||||
|
@ -1762,9 +1762,9 @@ static void si_fan_ctrl_set_default_mode(struct radeon_device *rdev);
|
|||
|
||||
static struct si_power_info *si_get_pi(struct radeon_device *rdev)
|
||||
{
|
||||
struct si_power_info *pi = rdev->pm.dpm.priv;
|
||||
struct si_power_info *pi = rdev->pm.dpm.priv;
|
||||
|
||||
return pi;
|
||||
return pi;
|
||||
}
|
||||
|
||||
static void si_calculate_leakage_for_v_and_t_formula(const struct ni_leakage_coeffients *coeff,
|
||||
|
@ -3150,9 +3150,9 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
|
|||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < ps->performance_level_count; i++)
|
||||
btc_adjust_clock_combinations(rdev, max_limits,
|
||||
&ps->performance_levels[i]);
|
||||
for (i = 0; i < ps->performance_level_count; i++)
|
||||
btc_adjust_clock_combinations(rdev, max_limits,
|
||||
&ps->performance_levels[i]);
|
||||
|
||||
for (i = 0; i < ps->performance_level_count; i++) {
|
||||
if (ps->performance_levels[i].vddc < min_vce_voltage)
|
||||
|
@ -3291,7 +3291,7 @@ static void si_set_dpm_event_sources(struct radeon_device *rdev, u32 sources)
|
|||
case 0:
|
||||
default:
|
||||
want_thermal_protection = false;
|
||||
break;
|
||||
break;
|
||||
case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL):
|
||||
want_thermal_protection = true;
|
||||
dpm_event_src = RADEON_DPM_EVENT_SRC_DIGITAL;
|
||||
|
@ -3493,7 +3493,7 @@ static int si_process_firmware_header(struct radeon_device *rdev)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
si_pi->state_table_start = tmp;
|
||||
si_pi->state_table_start = tmp;
|
||||
|
||||
ret = si_read_smc_sram_dword(rdev,
|
||||
SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
|
||||
|
@ -3652,7 +3652,7 @@ static void si_program_response_times(struct radeon_device *rdev)
|
|||
si_write_smc_soft_register(rdev, SI_SMC_SOFT_REGISTER_mvdd_chg_time, 1);
|
||||
|
||||
voltage_response_time = (u32)rdev->pm.dpm.voltage_response_time;
|
||||
backbias_response_time = (u32)rdev->pm.dpm.backbias_response_time;
|
||||
backbias_response_time = (u32)rdev->pm.dpm.backbias_response_time;
|
||||
|
||||
if (voltage_response_time == 0)
|
||||
voltage_response_time = 1000;
|
||||
|
@ -3760,7 +3760,7 @@ static void si_setup_bsp(struct radeon_device *rdev)
|
|||
&pi->pbsu);
|
||||
|
||||
|
||||
pi->dsp = BSP(pi->bsp) | BSU(pi->bsu);
|
||||
pi->dsp = BSP(pi->bsp) | BSU(pi->bsu);
|
||||
pi->psp = BSP(pi->pbsp) | BSU(pi->pbsu);
|
||||
|
||||
WREG32(CG_BSP, pi->dsp);
|
||||
|
@ -4308,7 +4308,7 @@ static int si_populate_memory_timing_parameters(struct radeon_device *rdev,
|
|||
|
||||
radeon_atom_set_engine_dram_timings(rdev,
|
||||
pl->sclk,
|
||||
pl->mclk);
|
||||
pl->mclk);
|
||||
|
||||
dram_timing = RREG32(MC_ARB_DRAM_TIMING);
|
||||
dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2);
|
||||
|
@ -4343,7 +4343,7 @@ static int si_do_program_memory_timing_parameters(struct radeon_device *rdev,
|
|||
si_pi->sram_end);
|
||||
if (ret)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -4821,9 +4821,9 @@ static int si_calculate_sclk_params(struct radeon_device *rdev,
|
|||
spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
|
||||
spll_func_cntl_2 |= SCLK_MUX_SEL(2);
|
||||
|
||||
spll_func_cntl_3 &= ~SPLL_FB_DIV_MASK;
|
||||
spll_func_cntl_3 |= SPLL_FB_DIV(fbdiv);
|
||||
spll_func_cntl_3 |= SPLL_DITHEN;
|
||||
spll_func_cntl_3 &= ~SPLL_FB_DIV_MASK;
|
||||
spll_func_cntl_3 |= SPLL_FB_DIV(fbdiv);
|
||||
spll_func_cntl_3 |= SPLL_DITHEN;
|
||||
|
||||
if (pi->sclk_ss) {
|
||||
struct radeon_atom_ss ss;
|
||||
|
@ -4930,15 +4930,15 @@ static int si_populate_mclk_value(struct radeon_device *rdev,
|
|||
tmp = freq_nom / reference_clock;
|
||||
tmp = tmp * tmp;
|
||||
if (radeon_atombios_get_asic_ss_info(rdev, &ss,
|
||||
ASIC_INTERNAL_MEMORY_SS, freq_nom)) {
|
||||
ASIC_INTERNAL_MEMORY_SS, freq_nom)) {
|
||||
u32 clks = reference_clock * 5 / ss.rate;
|
||||
u32 clkv = (u32)((((131 * ss.percentage * ss.rate) / 100) * tmp) / freq_nom);
|
||||
|
||||
mpll_ss1 &= ~CLKV_MASK;
|
||||
mpll_ss1 |= CLKV(clkv);
|
||||
mpll_ss1 &= ~CLKV_MASK;
|
||||
mpll_ss1 |= CLKV(clkv);
|
||||
|
||||
mpll_ss2 &= ~CLKS_MASK;
|
||||
mpll_ss2 |= CLKS(clks);
|
||||
mpll_ss2 &= ~CLKS_MASK;
|
||||
mpll_ss2 |= CLKS(clks);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -5265,7 +5265,7 @@ static int si_convert_power_state_to_smc(struct radeon_device *rdev,
|
|||
ni_pi->enable_power_containment = false;
|
||||
|
||||
ret = si_populate_sq_ramping_values(rdev, radeon_state, smc_state);
|
||||
if (ret)
|
||||
if (ret)
|
||||
ni_pi->enable_sq_ramping = false;
|
||||
|
||||
return si_populate_smc_t(rdev, radeon_state, smc_state);
|
||||
|
@ -5436,46 +5436,46 @@ static bool si_check_s0_mc_reg_index(u16 in_reg, u16 *out_reg)
|
|||
case MC_SEQ_RAS_TIMING >> 2:
|
||||
*out_reg = MC_SEQ_RAS_TIMING_LP >> 2;
|
||||
break;
|
||||
case MC_SEQ_CAS_TIMING >> 2:
|
||||
case MC_SEQ_CAS_TIMING >> 2:
|
||||
*out_reg = MC_SEQ_CAS_TIMING_LP >> 2;
|
||||
break;
|
||||
case MC_SEQ_MISC_TIMING >> 2:
|
||||
case MC_SEQ_MISC_TIMING >> 2:
|
||||
*out_reg = MC_SEQ_MISC_TIMING_LP >> 2;
|
||||
break;
|
||||
case MC_SEQ_MISC_TIMING2 >> 2:
|
||||
case MC_SEQ_MISC_TIMING2 >> 2:
|
||||
*out_reg = MC_SEQ_MISC_TIMING2_LP >> 2;
|
||||
break;
|
||||
case MC_SEQ_RD_CTL_D0 >> 2:
|
||||
case MC_SEQ_RD_CTL_D0 >> 2:
|
||||
*out_reg = MC_SEQ_RD_CTL_D0_LP >> 2;
|
||||
break;
|
||||
case MC_SEQ_RD_CTL_D1 >> 2:
|
||||
case MC_SEQ_RD_CTL_D1 >> 2:
|
||||
*out_reg = MC_SEQ_RD_CTL_D1_LP >> 2;
|
||||
break;
|
||||
case MC_SEQ_WR_CTL_D0 >> 2:
|
||||
case MC_SEQ_WR_CTL_D0 >> 2:
|
||||
*out_reg = MC_SEQ_WR_CTL_D0_LP >> 2;
|
||||
break;
|
||||
case MC_SEQ_WR_CTL_D1 >> 2:
|
||||
case MC_SEQ_WR_CTL_D1 >> 2:
|
||||
*out_reg = MC_SEQ_WR_CTL_D1_LP >> 2;
|
||||
break;
|
||||
case MC_PMG_CMD_EMRS >> 2:
|
||||
case MC_PMG_CMD_EMRS >> 2:
|
||||
*out_reg = MC_SEQ_PMG_CMD_EMRS_LP >> 2;
|
||||
break;
|
||||
case MC_PMG_CMD_MRS >> 2:
|
||||
case MC_PMG_CMD_MRS >> 2:
|
||||
*out_reg = MC_SEQ_PMG_CMD_MRS_LP >> 2;
|
||||
break;
|
||||
case MC_PMG_CMD_MRS1 >> 2:
|
||||
case MC_PMG_CMD_MRS1 >> 2:
|
||||
*out_reg = MC_SEQ_PMG_CMD_MRS1_LP >> 2;
|
||||
break;
|
||||
case MC_SEQ_PMG_TIMING >> 2:
|
||||
case MC_SEQ_PMG_TIMING >> 2:
|
||||
*out_reg = MC_SEQ_PMG_TIMING_LP >> 2;
|
||||
break;
|
||||
case MC_PMG_CMD_MRS2 >> 2:
|
||||
case MC_PMG_CMD_MRS2 >> 2:
|
||||
*out_reg = MC_SEQ_PMG_CMD_MRS2_LP >> 2;
|
||||
break;
|
||||
case MC_SEQ_WR_CTL_2 >> 2:
|
||||
case MC_SEQ_WR_CTL_2 >> 2:
|
||||
*out_reg = MC_SEQ_WR_CTL_2_LP >> 2;
|
||||
break;
|
||||
default:
|
||||
default:
|
||||
result = false;
|
||||
break;
|
||||
}
|
||||
|
@ -5562,19 +5562,19 @@ static int si_initialize_mc_reg_table(struct radeon_device *rdev)
|
|||
WREG32(MC_SEQ_PMG_CMD_MRS2_LP, RREG32(MC_PMG_CMD_MRS2));
|
||||
WREG32(MC_SEQ_WR_CTL_2_LP, RREG32(MC_SEQ_WR_CTL_2));
|
||||
|
||||
ret = radeon_atom_init_mc_reg_table(rdev, module_index, table);
|
||||
if (ret)
|
||||
goto init_mc_done;
|
||||
ret = radeon_atom_init_mc_reg_table(rdev, module_index, table);
|
||||
if (ret)
|
||||
goto init_mc_done;
|
||||
|
||||
ret = si_copy_vbios_mc_reg_table(table, si_table);
|
||||
if (ret)
|
||||
goto init_mc_done;
|
||||
ret = si_copy_vbios_mc_reg_table(table, si_table);
|
||||
if (ret)
|
||||
goto init_mc_done;
|
||||
|
||||
si_set_s0_mc_reg_index(si_table);
|
||||
|
||||
ret = si_set_mc_special_registers(rdev, si_table);
|
||||
if (ret)
|
||||
goto init_mc_done;
|
||||
if (ret)
|
||||
goto init_mc_done;
|
||||
|
||||
si_set_valid_flag(si_table);
|
||||
|
||||
|
@ -5715,10 +5715,10 @@ static int si_upload_mc_reg_table(struct radeon_device *rdev,
|
|||
|
||||
static void si_enable_voltage_control(struct radeon_device *rdev, bool enable)
|
||||
{
|
||||
if (enable)
|
||||
WREG32_P(GENERAL_PWRMGT, VOLT_PWRMGT_EN, ~VOLT_PWRMGT_EN);
|
||||
else
|
||||
WREG32_P(GENERAL_PWRMGT, 0, ~VOLT_PWRMGT_EN);
|
||||
if (enable)
|
||||
WREG32_P(GENERAL_PWRMGT, VOLT_PWRMGT_EN, ~VOLT_PWRMGT_EN);
|
||||
else
|
||||
WREG32_P(GENERAL_PWRMGT, 0, ~VOLT_PWRMGT_EN);
|
||||
}
|
||||
|
||||
static enum radeon_pcie_gen si_get_maximum_link_speed(struct radeon_device *rdev,
|
||||
|
@ -6820,7 +6820,7 @@ static int si_parse_power_table(struct radeon_device *rdev)
|
|||
struct _NonClockInfoArray *non_clock_info_array;
|
||||
union power_info *power_info;
|
||||
int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
|
||||
u16 data_offset;
|
||||
u16 data_offset;
|
||||
u8 frev, crev;
|
||||
u8 *power_state_offset;
|
||||
struct ni_ps *ps;
|
||||
|
|
|
@ -787,8 +787,8 @@ static void sumo_program_acpi_power_level(struct radeon_device *rdev)
|
|||
struct atom_clock_dividers dividers;
|
||||
int ret;
|
||||
|
||||
ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
|
||||
pi->acpi_pl.sclk,
|
||||
ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
|
||||
pi->acpi_pl.sclk,
|
||||
false, ÷rs);
|
||||
if (ret)
|
||||
return;
|
||||
|
@ -1462,7 +1462,7 @@ static int sumo_parse_power_table(struct radeon_device *rdev)
|
|||
struct _NonClockInfoArray *non_clock_info_array;
|
||||
union power_info *power_info;
|
||||
int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
|
||||
u16 data_offset;
|
||||
u16 data_offset;
|
||||
u8 frev, crev;
|
||||
u8 *power_state_offset;
|
||||
struct sumo_ps *ps;
|
||||
|
|
|
@ -369,8 +369,8 @@ static void trinity_gfx_powergating_initialize(struct radeon_device *rdev)
|
|||
int ret;
|
||||
u32 hw_rev = (RREG32(HW_REV) & ATI_REV_ID_MASK) >> ATI_REV_ID_SHIFT;
|
||||
|
||||
ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
|
||||
25000, false, ÷rs);
|
||||
ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
|
||||
25000, false, ÷rs);
|
||||
if (ret)
|
||||
return;
|
||||
|
||||
|
@ -587,8 +587,8 @@ static void trinity_set_divider_value(struct radeon_device *rdev,
|
|||
u32 value;
|
||||
u32 ix = index * TRINITY_SIZEOF_DPM_STATE_TABLE;
|
||||
|
||||
ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
|
||||
sclk, false, ÷rs);
|
||||
ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
|
||||
sclk, false, ÷rs);
|
||||
if (ret)
|
||||
return;
|
||||
|
||||
|
@ -597,8 +597,8 @@ static void trinity_set_divider_value(struct radeon_device *rdev,
|
|||
value |= CLK_DIVIDER(dividers.post_div);
|
||||
WREG32_SMC(SMU_SCLK_DPM_STATE_0_CNTL_0 + ix, value);
|
||||
|
||||
ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
|
||||
sclk/2, false, ÷rs);
|
||||
ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
|
||||
sclk/2, false, ÷rs);
|
||||
if (ret)
|
||||
return;
|
||||
|
||||
|
@ -1045,14 +1045,14 @@ static int trinity_set_thermal_temperature_range(struct radeon_device *rdev,
|
|||
int low_temp = 0 * 1000;
|
||||
int high_temp = 255 * 1000;
|
||||
|
||||
if (low_temp < min_temp)
|
||||
if (low_temp < min_temp)
|
||||
low_temp = min_temp;
|
||||
if (high_temp > max_temp)
|
||||
if (high_temp > max_temp)
|
||||
high_temp = max_temp;
|
||||
if (high_temp < low_temp) {
|
||||
if (high_temp < low_temp) {
|
||||
DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp);
|
||||
return -EINVAL;
|
||||
}
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
WREG32_P(CG_THERMAL_INT_CTRL, DIG_THERM_INTH(49 + (high_temp / 1000)), ~DIG_THERM_INTH_MASK);
|
||||
WREG32_P(CG_THERMAL_INT_CTRL, DIG_THERM_INTL(49 + (low_temp / 1000)), ~DIG_THERM_INTL_MASK);
|
||||
|
@ -1737,7 +1737,7 @@ static int trinity_parse_power_table(struct radeon_device *rdev)
|
|||
struct _NonClockInfoArray *non_clock_info_array;
|
||||
union power_info *power_info;
|
||||
int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
|
||||
u16 data_offset;
|
||||
u16 data_offset;
|
||||
u8 frev, crev;
|
||||
u8 *power_state_offset;
|
||||
struct sumo_ps *ps;
|
||||
|
|
|
@ -53,7 +53,7 @@ static void vce_v2_0_set_sw_cg(struct radeon_device *rdev, bool gated)
|
|||
WREG32(VCE_UENC_REG_CLOCK_GATING, tmp);
|
||||
|
||||
WREG32(VCE_CGTT_CLK_OVERRIDE, 0);
|
||||
} else {
|
||||
} else {
|
||||
tmp = RREG32(VCE_CLOCK_GATING_B);
|
||||
tmp |= 0xe7;
|
||||
tmp &= ~0xe70000;
|
||||
|
|
Loading…
Reference in New Issue