Merge branch 'drm-next-4.10' of git://people.freedesktop.org/~agd5f/linux into drm-next
- lots of code cleanup - lots of bug fixes - expose rpm based fan info via hwmon - lots of clock and powergating fixes - SI register header cleanup and conversion to common format used by newer asics * 'drm-next-4.10' of git://people.freedesktop.org/~agd5f/linux: (54 commits) drm/amdgpu: drop is_display_hung from display funcs drm/amdgpu/uvd: reduce IB parsing overhead on UVD5+ (v2) drm/amdgpu/uvd: consolidate code for fetching addr from ctx drm/amdgpu: Disable DPM in virtualization drm/amdgpu: use AMDGPU_GEM_CREATE_VRAM_CLEARED for VM PD/PTs (v2) drm/amdgpu: improve AMDGPU_GEM_CREATE_VRAM_CLEARED handling (v2) drm/amdgpu: fix error handling in amdgpu_bo_create_restricted drm/amdgpu: fix amdgpu_fill_buffer (v2) drm/amdgpu: remove amdgpu_irq_get_delayed amdgpu: Wrap dev_err() calls on vm faults with printk_ratelimit() amdgpu: Use dev_err() over vanilla printk() in vm_decode_fault() drm/amd/amdgpu: port of DCE v6 to new headers (v3) drm/amdgpu: cleanup unused iterator members for sdma v2.4 drm/amdgpu: cleanup unused iterator members for sdma v3 drm/amdgpu:impl vgt_flush for VI(V5) drm/amdgpu: enable uvd mgcg for Fiji. drm/amdgpu: refine cz uvd clock gate logic. drm/amdgpu: change log level to KERN_INFO in ci_dpm.c drm/amdgpu: always un-gate UVD REGS path. drm/amdgpu/sdma: fix typo in packet setup ...
This commit is contained in:
commit
17f1dfd01c
|
@ -92,13 +92,13 @@ extern int amdgpu_vm_debug;
|
|||
extern int amdgpu_sched_jobs;
|
||||
extern int amdgpu_sched_hw_submission;
|
||||
extern int amdgpu_powerplay;
|
||||
extern int amdgpu_powercontainment;
|
||||
extern int amdgpu_no_evict;
|
||||
extern int amdgpu_direct_gma_size;
|
||||
extern unsigned amdgpu_pcie_gen_cap;
|
||||
extern unsigned amdgpu_pcie_lane_cap;
|
||||
extern unsigned amdgpu_cg_mask;
|
||||
extern unsigned amdgpu_pg_mask;
|
||||
extern char *amdgpu_disable_cu;
|
||||
extern int amdgpu_sclk_deep_sleep_en;
|
||||
extern char *amdgpu_virtual_display;
|
||||
extern unsigned amdgpu_pp_feature_mask;
|
||||
extern int amdgpu_vram_page_split;
|
||||
|
@ -1633,7 +1633,6 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
|
|||
#define amdgpu_display_set_vga_render_state(adev, r) (adev)->mode_info.funcs->set_vga_render_state((adev), (r))
|
||||
#define amdgpu_display_vblank_get_counter(adev, crtc) (adev)->mode_info.funcs->vblank_get_counter((adev), (crtc))
|
||||
#define amdgpu_display_vblank_wait(adev, crtc) (adev)->mode_info.funcs->vblank_wait((adev), (crtc))
|
||||
#define amdgpu_display_is_display_hung(adev) (adev)->mode_info.funcs->is_display_hung((adev))
|
||||
#define amdgpu_display_backlight_set_level(adev, e, l) (adev)->mode_info.funcs->backlight_set_level((e), (l))
|
||||
#define amdgpu_display_backlight_get_level(adev, e) (adev)->mode_info.funcs->backlight_get_level((e))
|
||||
#define amdgpu_display_hpd_sense(adev, h) (adev)->mode_info.funcs->hpd_sense((adev), (h))
|
||||
|
|
|
@ -70,7 +70,7 @@ static bool igp_read_bios_from_vram(struct amdgpu_device *adev)
|
|||
return false;
|
||||
}
|
||||
adev->bios = kmalloc(size, GFP_KERNEL);
|
||||
if (adev->bios == NULL) {
|
||||
if (!adev->bios) {
|
||||
iounmap(bios);
|
||||
return false;
|
||||
}
|
||||
|
|
|
@ -841,16 +841,6 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
|
|||
return amdgpu_cs_sync_rings(p);
|
||||
}
|
||||
|
||||
static int amdgpu_cs_handle_lockup(struct amdgpu_device *adev, int r)
|
||||
{
|
||||
if (r == -EDEADLK) {
|
||||
r = amdgpu_gpu_reset(adev);
|
||||
if (!r)
|
||||
r = -EAGAIN;
|
||||
}
|
||||
return r;
|
||||
}
|
||||
|
||||
static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
|
||||
struct amdgpu_cs_parser *parser)
|
||||
{
|
||||
|
@ -1054,29 +1044,29 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
|
|||
r = amdgpu_cs_parser_init(&parser, data);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to initialize parser !\n");
|
||||
amdgpu_cs_parser_fini(&parser, r, false);
|
||||
r = amdgpu_cs_handle_lockup(adev, r);
|
||||
return r;
|
||||
goto out;
|
||||
}
|
||||
|
||||
r = amdgpu_cs_parser_bos(&parser, data);
|
||||
if (r == -ENOMEM)
|
||||
DRM_ERROR("Not enough memory for command submission!\n");
|
||||
else if (r && r != -ERESTARTSYS)
|
||||
DRM_ERROR("Failed to process the buffer list %d!\n", r);
|
||||
else if (!r) {
|
||||
reserved_buffers = true;
|
||||
r = amdgpu_cs_ib_fill(adev, &parser);
|
||||
}
|
||||
|
||||
if (!r) {
|
||||
r = amdgpu_cs_dependencies(adev, &parser);
|
||||
if (r)
|
||||
DRM_ERROR("Failed in the dependencies handling %d!\n", r);
|
||||
if (r) {
|
||||
if (r == -ENOMEM)
|
||||
DRM_ERROR("Not enough memory for command submission!\n");
|
||||
else if (r != -ERESTARTSYS)
|
||||
DRM_ERROR("Failed to process the buffer list %d!\n", r);
|
||||
goto out;
|
||||
}
|
||||
|
||||
reserved_buffers = true;
|
||||
r = amdgpu_cs_ib_fill(adev, &parser);
|
||||
if (r)
|
||||
goto out;
|
||||
|
||||
r = amdgpu_cs_dependencies(adev, &parser);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed in the dependencies handling %d!\n", r);
|
||||
goto out;
|
||||
}
|
||||
|
||||
for (i = 0; i < parser.job->num_ibs; i++)
|
||||
trace_amdgpu_cs(&parser, i);
|
||||
|
||||
|
@ -1088,7 +1078,6 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
|
|||
|
||||
out:
|
||||
amdgpu_cs_parser_fini(&parser, r, reserved_buffers);
|
||||
r = amdgpu_cs_handle_lockup(adev, r);
|
||||
return r;
|
||||
}
|
||||
|
||||
|
|
|
@ -1017,8 +1017,8 @@ static void amdgpu_check_arguments(struct amdgpu_device *adev)
|
|||
amdgpu_vm_block_size = 9;
|
||||
}
|
||||
|
||||
if ((amdgpu_vram_page_split != -1 && amdgpu_vram_page_split < 16) ||
|
||||
!amdgpu_check_pot_argument(amdgpu_vram_page_split)) {
|
||||
if (amdgpu_vram_page_split != -1 && (amdgpu_vram_page_split < 16 ||
|
||||
!amdgpu_check_pot_argument(amdgpu_vram_page_split))) {
|
||||
dev_warn(adev->dev, "invalid VRAM page split (%d)\n",
|
||||
amdgpu_vram_page_split);
|
||||
amdgpu_vram_page_split = 1024;
|
||||
|
|
|
@ -52,6 +52,8 @@ enum amdgpu_dpm_event_src {
|
|||
AMDGPU_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL = 4
|
||||
};
|
||||
|
||||
#define SCLK_DEEP_SLEEP_MASK 0x8
|
||||
|
||||
struct amdgpu_ps {
|
||||
u32 caps; /* vbios flags */
|
||||
u32 class; /* vbios flags */
|
||||
|
@ -317,6 +319,11 @@ struct amdgpu_dpm_funcs {
|
|||
(adev)->powerplay.pp_funcs->get_fan_speed_percent((adev)->powerplay.pp_handle, (s)) : \
|
||||
(adev)->pm.funcs->get_fan_speed_percent((adev), (s)))
|
||||
|
||||
#define amdgpu_dpm_get_fan_speed_rpm(adev, s) \
|
||||
((adev)->pp_enabled ? \
|
||||
(adev)->powerplay.pp_funcs->get_fan_speed_rpm((adev)->powerplay.pp_handle, (s)) : \
|
||||
-EINVAL)
|
||||
|
||||
#define amdgpu_dpm_get_sclk(adev, l) \
|
||||
((adev)->pp_enabled ? \
|
||||
(adev)->powerplay.pp_funcs->get_sclk((adev)->powerplay.pp_handle, (l)) : \
|
||||
|
|
|
@ -91,8 +91,8 @@ int amdgpu_exp_hw_support = 0;
|
|||
int amdgpu_sched_jobs = 32;
|
||||
int amdgpu_sched_hw_submission = 2;
|
||||
int amdgpu_powerplay = -1;
|
||||
int amdgpu_powercontainment = 1;
|
||||
int amdgpu_sclk_deep_sleep_en = 1;
|
||||
int amdgpu_no_evict = 0;
|
||||
int amdgpu_direct_gma_size = 0;
|
||||
unsigned amdgpu_pcie_gen_cap = 0;
|
||||
unsigned amdgpu_pcie_lane_cap = 0;
|
||||
unsigned amdgpu_cg_mask = 0xffffffff;
|
||||
|
@ -182,14 +182,14 @@ module_param_named(sched_hw_submission, amdgpu_sched_hw_submission, int, 0444);
|
|||
MODULE_PARM_DESC(powerplay, "Powerplay component (1 = enable, 0 = disable, -1 = auto (default))");
|
||||
module_param_named(powerplay, amdgpu_powerplay, int, 0444);
|
||||
|
||||
MODULE_PARM_DESC(powercontainment, "Power Containment (1 = enable (default), 0 = disable)");
|
||||
module_param_named(powercontainment, amdgpu_powercontainment, int, 0444);
|
||||
|
||||
MODULE_PARM_DESC(ppfeaturemask, "all power features enabled (default))");
|
||||
module_param_named(ppfeaturemask, amdgpu_pp_feature_mask, int, 0444);
|
||||
|
||||
MODULE_PARM_DESC(sclkdeepsleep, "SCLK Deep Sleep (1 = enable (default), 0 = disable)");
|
||||
module_param_named(sclkdeepsleep, amdgpu_sclk_deep_sleep_en, int, 0444);
|
||||
MODULE_PARM_DESC(no_evict, "Support pinning request from user space (1 = enable, 0 = disable (default))");
|
||||
module_param_named(no_evict, amdgpu_no_evict, int, 0444);
|
||||
|
||||
MODULE_PARM_DESC(direct_gma_size, "Direct GMA size in megabytes (max 96MB)");
|
||||
module_param_named(direct_gma_size, amdgpu_direct_gma_size, int, 0444);
|
||||
|
||||
MODULE_PARM_DESC(pcie_gen_cap, "PCIE Gen Caps (0: autodetect (default))");
|
||||
module_param_named(pcie_gen_cap, amdgpu_pcie_gen_cap, uint, 0444);
|
||||
|
|
|
@ -382,24 +382,27 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
|
|||
if (!ring->fence_drv.fences)
|
||||
return -ENOMEM;
|
||||
|
||||
timeout = msecs_to_jiffies(amdgpu_lockup_timeout);
|
||||
if (timeout == 0) {
|
||||
/*
|
||||
* FIXME:
|
||||
* Delayed workqueue cannot use it directly,
|
||||
* so the scheduler will not use delayed workqueue if
|
||||
* MAX_SCHEDULE_TIMEOUT is set.
|
||||
* Currently keep it simple and silly.
|
||||
*/
|
||||
timeout = MAX_SCHEDULE_TIMEOUT;
|
||||
}
|
||||
r = amd_sched_init(&ring->sched, &amdgpu_sched_ops,
|
||||
num_hw_submission,
|
||||
timeout, ring->name);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to create scheduler on ring %s.\n",
|
||||
ring->name);
|
||||
return r;
|
||||
/* No need to setup the GPU scheduler for KIQ ring */
|
||||
if (ring->funcs->type != AMDGPU_RING_TYPE_KIQ) {
|
||||
timeout = msecs_to_jiffies(amdgpu_lockup_timeout);
|
||||
if (timeout == 0) {
|
||||
/*
|
||||
* FIXME:
|
||||
* Delayed workqueue cannot use it directly,
|
||||
* so the scheduler will not use delayed workqueue if
|
||||
* MAX_SCHEDULE_TIMEOUT is set.
|
||||
* Currently keep it simple and silly.
|
||||
*/
|
||||
timeout = MAX_SCHEDULE_TIMEOUT;
|
||||
}
|
||||
r = amd_sched_init(&ring->sched, &amdgpu_sched_ops,
|
||||
num_hw_submission,
|
||||
timeout, ring->name);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to create scheduler on ring %s.\n",
|
||||
ring->name);
|
||||
return r;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -424,15 +424,6 @@ int amdgpu_irq_get(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
|
|||
return 0;
|
||||
}
|
||||
|
||||
bool amdgpu_irq_get_delayed(struct amdgpu_device *adev,
|
||||
struct amdgpu_irq_src *src,
|
||||
unsigned type)
|
||||
{
|
||||
if ((type >= src->num_types) || !src->enabled_types)
|
||||
return false;
|
||||
return atomic_inc_return(&src->enabled_types[type]) == 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_irq_put - disable interrupt
|
||||
*
|
||||
|
|
|
@ -88,9 +88,6 @@ int amdgpu_irq_update(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
|
|||
unsigned type);
|
||||
int amdgpu_irq_get(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
|
||||
unsigned type);
|
||||
bool amdgpu_irq_get_delayed(struct amdgpu_device *adev,
|
||||
struct amdgpu_irq_src *src,
|
||||
unsigned type);
|
||||
int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
|
||||
unsigned type);
|
||||
bool amdgpu_irq_enabled(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
|
||||
|
|
|
@ -271,8 +271,6 @@ struct amdgpu_display_funcs {
|
|||
u32 (*vblank_get_counter)(struct amdgpu_device *adev, int crtc);
|
||||
/* wait for vblank */
|
||||
void (*vblank_wait)(struct amdgpu_device *adev, int crtc);
|
||||
/* is dce hung */
|
||||
bool (*is_display_hung)(struct amdgpu_device *adev);
|
||||
/* set backlight level */
|
||||
void (*backlight_set_level)(struct amdgpu_encoder *amdgpu_encoder,
|
||||
u8 level);
|
||||
|
|
|
@ -128,17 +128,6 @@ static void amdgpu_ttm_placement_init(struct amdgpu_device *adev,
|
|||
if (flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)
|
||||
lpfn = adev->mc.real_vram_size >> PAGE_SHIFT;
|
||||
|
||||
if (flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS &&
|
||||
!(flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) &&
|
||||
adev->mc.visible_vram_size < adev->mc.real_vram_size) {
|
||||
places[c].fpfn = visible_pfn;
|
||||
places[c].lpfn = lpfn;
|
||||
places[c].flags = TTM_PL_FLAG_WC |
|
||||
TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM |
|
||||
TTM_PL_FLAG_TOPDOWN;
|
||||
c++;
|
||||
}
|
||||
|
||||
places[c].fpfn = 0;
|
||||
places[c].lpfn = lpfn;
|
||||
places[c].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
|
||||
|
@ -382,39 +371,36 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
|
|||
|
||||
amdgpu_fill_placement_to_bo(bo, placement);
|
||||
/* Kernel allocation are uninterruptible */
|
||||
|
||||
if (!resv) {
|
||||
bool locked;
|
||||
|
||||
reservation_object_init(&bo->tbo.ttm_resv);
|
||||
locked = ww_mutex_trylock(&bo->tbo.ttm_resv.lock);
|
||||
WARN_ON(!locked);
|
||||
}
|
||||
r = ttm_bo_init(&adev->mman.bdev, &bo->tbo, size, type,
|
||||
&bo->placement, page_align, !kernel, NULL,
|
||||
acc_size, sg, resv, &amdgpu_ttm_bo_destroy);
|
||||
if (unlikely(r != 0)) {
|
||||
acc_size, sg, resv ? resv : &bo->tbo.ttm_resv,
|
||||
&amdgpu_ttm_bo_destroy);
|
||||
if (unlikely(r != 0))
|
||||
return r;
|
||||
}
|
||||
|
||||
if (flags & AMDGPU_GEM_CREATE_VRAM_CLEARED &&
|
||||
bo->tbo.mem.placement & TTM_PL_FLAG_VRAM) {
|
||||
struct dma_fence *fence;
|
||||
|
||||
if (adev->mman.buffer_funcs_ring == NULL ||
|
||||
!adev->mman.buffer_funcs_ring->ready) {
|
||||
r = -EBUSY;
|
||||
goto fail_free;
|
||||
}
|
||||
|
||||
r = amdgpu_bo_reserve(bo, false);
|
||||
if (unlikely(r != 0))
|
||||
goto fail_free;
|
||||
|
||||
amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
|
||||
r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
|
||||
if (unlikely(r != 0))
|
||||
r = amdgpu_fill_buffer(bo, 0, bo->tbo.resv, &fence);
|
||||
if (unlikely(r))
|
||||
goto fail_unreserve;
|
||||
|
||||
amdgpu_fill_buffer(bo, 0, bo->tbo.resv, &fence);
|
||||
amdgpu_bo_fence(bo, fence, false);
|
||||
amdgpu_bo_unreserve(bo);
|
||||
dma_fence_put(bo->tbo.moving);
|
||||
bo->tbo.moving = dma_fence_get(fence);
|
||||
dma_fence_put(fence);
|
||||
}
|
||||
if (!resv)
|
||||
ww_mutex_unlock(&bo->tbo.resv->lock);
|
||||
*bo_ptr = bo;
|
||||
|
||||
trace_amdgpu_bo_create(bo);
|
||||
|
@ -422,8 +408,7 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
|
|||
return 0;
|
||||
|
||||
fail_unreserve:
|
||||
amdgpu_bo_unreserve(bo);
|
||||
fail_free:
|
||||
ww_mutex_unlock(&bo->tbo.resv->lock);
|
||||
amdgpu_bo_unref(&bo);
|
||||
return r;
|
||||
}
|
||||
|
|
|
@ -737,6 +737,21 @@ static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,
|
|||
return sprintf(buf, "%i\n", speed);
|
||||
}
|
||||
|
||||
static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct amdgpu_device *adev = dev_get_drvdata(dev);
|
||||
int err;
|
||||
u32 speed;
|
||||
|
||||
err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
return sprintf(buf, "%i\n", speed);
|
||||
}
|
||||
|
||||
static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, 0);
|
||||
static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 0);
|
||||
static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 1);
|
||||
|
@ -744,6 +759,7 @@ static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1, amdgpu
|
|||
static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1_enable, amdgpu_hwmon_set_pwm1_enable, 0);
|
||||
static SENSOR_DEVICE_ATTR(pwm1_min, S_IRUGO, amdgpu_hwmon_get_pwm1_min, NULL, 0);
|
||||
static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO, amdgpu_hwmon_get_pwm1_max, NULL, 0);
|
||||
static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, amdgpu_hwmon_get_fan1_input, NULL, 0);
|
||||
|
||||
static struct attribute *hwmon_attributes[] = {
|
||||
&sensor_dev_attr_temp1_input.dev_attr.attr,
|
||||
|
@ -753,6 +769,7 @@ static struct attribute *hwmon_attributes[] = {
|
|||
&sensor_dev_attr_pwm1_enable.dev_attr.attr,
|
||||
&sensor_dev_attr_pwm1_min.dev_attr.attr,
|
||||
&sensor_dev_attr_pwm1_max.dev_attr.attr,
|
||||
&sensor_dev_attr_fan1_input.dev_attr.attr,
|
||||
NULL
|
||||
};
|
||||
|
||||
|
@ -804,6 +821,10 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
|
|||
attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
|
||||
return 0;
|
||||
|
||||
/* requires powerplay */
|
||||
if (attr == &sensor_dev_attr_fan1_input.dev_attr.attr)
|
||||
return 0;
|
||||
|
||||
return effective_mode;
|
||||
}
|
||||
|
||||
|
|
|
@ -155,9 +155,6 @@ static int amdgpu_pp_sw_init(void *handle)
|
|||
ret = adev->powerplay.ip_funcs->sw_init(
|
||||
adev->powerplay.pp_handle);
|
||||
|
||||
if (adev->pp_enabled)
|
||||
adev->pm.dpm_enabled = true;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -187,6 +184,9 @@ static int amdgpu_pp_hw_init(void *handle)
|
|||
ret = adev->powerplay.ip_funcs->hw_init(
|
||||
adev->powerplay.pp_handle);
|
||||
|
||||
if ((amdgpu_dpm != 0) && !amdgpu_sriov_vf(adev))
|
||||
adev->pm.dpm_enabled = true;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -44,7 +44,8 @@ enum amdgpu_ring_type {
|
|||
AMDGPU_RING_TYPE_COMPUTE,
|
||||
AMDGPU_RING_TYPE_SDMA,
|
||||
AMDGPU_RING_TYPE_UVD,
|
||||
AMDGPU_RING_TYPE_VCE
|
||||
AMDGPU_RING_TYPE_VCE,
|
||||
AMDGPU_RING_TYPE_KIQ
|
||||
};
|
||||
|
||||
struct amdgpu_device;
|
||||
|
|
|
@ -327,9 +327,8 @@ int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
|
|||
return -EINVAL;
|
||||
|
||||
*sa_bo = kmalloc(sizeof(struct amdgpu_sa_bo), GFP_KERNEL);
|
||||
if ((*sa_bo) == NULL) {
|
||||
if (!(*sa_bo))
|
||||
return -ENOMEM;
|
||||
}
|
||||
(*sa_bo)->manager = sa_manager;
|
||||
(*sa_bo)->fence = NULL;
|
||||
INIT_LIST_HEAD(&(*sa_bo)->olist);
|
||||
|
|
|
@ -1382,28 +1382,40 @@ error_free:
|
|||
}
|
||||
|
||||
int amdgpu_fill_buffer(struct amdgpu_bo *bo,
|
||||
uint32_t src_data,
|
||||
struct reservation_object *resv,
|
||||
struct dma_fence **fence)
|
||||
uint32_t src_data,
|
||||
struct reservation_object *resv,
|
||||
struct dma_fence **fence)
|
||||
{
|
||||
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
|
||||
struct amdgpu_job *job;
|
||||
uint32_t max_bytes = adev->mman.buffer_funcs->fill_max_bytes;
|
||||
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
|
||||
|
||||
uint32_t max_bytes, byte_count;
|
||||
uint64_t dst_offset;
|
||||
struct drm_mm_node *mm_node;
|
||||
unsigned long num_pages;
|
||||
unsigned int num_loops, num_dw;
|
||||
unsigned int i;
|
||||
|
||||
struct amdgpu_job *job;
|
||||
int r;
|
||||
|
||||
byte_count = bo->tbo.num_pages << PAGE_SHIFT;
|
||||
max_bytes = adev->mman.buffer_funcs->fill_max_bytes;
|
||||
num_loops = DIV_ROUND_UP(byte_count, max_bytes);
|
||||
if (!ring->ready) {
|
||||
DRM_ERROR("Trying to clear memory with ring turned off.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
num_pages = bo->tbo.num_pages;
|
||||
mm_node = bo->tbo.mem.mm_node;
|
||||
num_loops = 0;
|
||||
while (num_pages) {
|
||||
uint32_t byte_count = mm_node->size << PAGE_SHIFT;
|
||||
|
||||
num_loops += DIV_ROUND_UP(byte_count, max_bytes);
|
||||
num_pages -= mm_node->size;
|
||||
++mm_node;
|
||||
}
|
||||
num_dw = num_loops * adev->mman.buffer_funcs->fill_num_dw;
|
||||
|
||||
/* for IB padding */
|
||||
while (num_dw & 0x7)
|
||||
num_dw++;
|
||||
num_dw += 64;
|
||||
|
||||
r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, &job);
|
||||
if (r)
|
||||
|
@ -1411,28 +1423,43 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
|
|||
|
||||
if (resv) {
|
||||
r = amdgpu_sync_resv(adev, &job->sync, resv,
|
||||
AMDGPU_FENCE_OWNER_UNDEFINED);
|
||||
AMDGPU_FENCE_OWNER_UNDEFINED);
|
||||
if (r) {
|
||||
DRM_ERROR("sync failed (%d).\n", r);
|
||||
goto error_free;
|
||||
}
|
||||
}
|
||||
|
||||
dst_offset = bo->tbo.mem.start << PAGE_SHIFT;
|
||||
for (i = 0; i < num_loops; i++) {
|
||||
uint32_t cur_size_in_bytes = min(byte_count, max_bytes);
|
||||
num_pages = bo->tbo.num_pages;
|
||||
mm_node = bo->tbo.mem.mm_node;
|
||||
|
||||
amdgpu_emit_fill_buffer(adev, &job->ibs[0], src_data,
|
||||
dst_offset, cur_size_in_bytes);
|
||||
while (num_pages) {
|
||||
uint32_t byte_count = mm_node->size << PAGE_SHIFT;
|
||||
uint64_t dst_addr;
|
||||
|
||||
dst_offset += cur_size_in_bytes;
|
||||
byte_count -= cur_size_in_bytes;
|
||||
r = amdgpu_mm_node_addr(&bo->tbo, mm_node,
|
||||
&bo->tbo.mem, &dst_addr);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
while (byte_count) {
|
||||
uint32_t cur_size_in_bytes = min(byte_count, max_bytes);
|
||||
|
||||
amdgpu_emit_fill_buffer(adev, &job->ibs[0], src_data,
|
||||
dst_addr, cur_size_in_bytes);
|
||||
|
||||
dst_addr += cur_size_in_bytes;
|
||||
byte_count -= cur_size_in_bytes;
|
||||
}
|
||||
|
||||
num_pages -= mm_node->size;
|
||||
++mm_node;
|
||||
}
|
||||
|
||||
amdgpu_ring_pad_ib(ring, &job->ibs[0]);
|
||||
WARN_ON(job->ibs[0].length_dw > num_dw);
|
||||
r = amdgpu_job_submit(job, ring, &adev->mman.entity,
|
||||
AMDGPU_FENCE_OWNER_UNDEFINED, fence);
|
||||
AMDGPU_FENCE_OWNER_UNDEFINED, fence);
|
||||
if (r)
|
||||
goto error_free;
|
||||
|
||||
|
|
|
@ -360,6 +360,18 @@ static void amdgpu_uvd_force_into_uvd_segment(struct amdgpu_bo *abo)
|
|||
}
|
||||
}
|
||||
|
||||
static u64 amdgpu_uvd_get_addr_from_ctx(struct amdgpu_uvd_cs_ctx *ctx)
|
||||
{
|
||||
uint32_t lo, hi;
|
||||
uint64_t addr;
|
||||
|
||||
lo = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->data0);
|
||||
hi = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->data1);
|
||||
addr = ((uint64_t)lo) | (((uint64_t)hi) << 32);
|
||||
|
||||
return addr;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_uvd_cs_pass1 - first parsing round
|
||||
*
|
||||
|
@ -372,14 +384,10 @@ static int amdgpu_uvd_cs_pass1(struct amdgpu_uvd_cs_ctx *ctx)
|
|||
{
|
||||
struct amdgpu_bo_va_mapping *mapping;
|
||||
struct amdgpu_bo *bo;
|
||||
uint32_t cmd, lo, hi;
|
||||
uint64_t addr;
|
||||
uint32_t cmd;
|
||||
uint64_t addr = amdgpu_uvd_get_addr_from_ctx(ctx);
|
||||
int r = 0;
|
||||
|
||||
lo = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->data0);
|
||||
hi = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->data1);
|
||||
addr = ((uint64_t)lo) | (((uint64_t)hi) << 32);
|
||||
|
||||
mapping = amdgpu_cs_find_mapping(ctx->parser, addr, &bo);
|
||||
if (mapping == NULL) {
|
||||
DRM_ERROR("Can't find BO for addr 0x%08Lx\n", addr);
|
||||
|
@ -698,18 +706,16 @@ static int amdgpu_uvd_cs_pass2(struct amdgpu_uvd_cs_ctx *ctx)
|
|||
{
|
||||
struct amdgpu_bo_va_mapping *mapping;
|
||||
struct amdgpu_bo *bo;
|
||||
uint32_t cmd, lo, hi;
|
||||
uint32_t cmd;
|
||||
uint64_t start, end;
|
||||
uint64_t addr;
|
||||
uint64_t addr = amdgpu_uvd_get_addr_from_ctx(ctx);
|
||||
int r;
|
||||
|
||||
lo = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->data0);
|
||||
hi = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->data1);
|
||||
addr = ((uint64_t)lo) | (((uint64_t)hi) << 32);
|
||||
|
||||
mapping = amdgpu_cs_find_mapping(ctx->parser, addr, &bo);
|
||||
if (mapping == NULL)
|
||||
if (mapping == NULL) {
|
||||
DRM_ERROR("Can't find BO for addr 0x%08Lx\n", addr);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
start = amdgpu_bo_gpu_offset(bo);
|
||||
|
||||
|
@ -893,10 +899,13 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)
|
|||
ctx.buf_sizes = buf_sizes;
|
||||
ctx.ib_idx = ib_idx;
|
||||
|
||||
/* first round, make sure the buffers are actually in the UVD segment */
|
||||
r = amdgpu_uvd_cs_packets(&ctx, amdgpu_uvd_cs_pass1);
|
||||
if (r)
|
||||
return r;
|
||||
/* first round only required on chips without UVD 64 bit address support */
|
||||
if (!parser->adev->uvd.address_64_bit) {
|
||||
/* first round, make sure the buffers are actually in the UVD segment */
|
||||
r = amdgpu_uvd_cs_packets(&ctx, amdgpu_uvd_cs_pass1);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
||||
/* second round, patch buffer addresses into the command stream */
|
||||
r = amdgpu_uvd_cs_packets(&ctx, amdgpu_uvd_cs_pass2);
|
||||
|
|
|
@ -529,70 +529,6 @@ static void amdgpu_vm_do_copy_ptes(struct amdgpu_pte_update_params *params,
|
|||
amdgpu_vm_copy_pte(params->adev, params->ib, pe, src, count);
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_vm_clear_bo - initially clear the page dir/table
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
* @bo: bo to clear
|
||||
*
|
||||
* need to reserve bo first before calling it.
|
||||
*/
|
||||
static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
|
||||
struct amdgpu_vm *vm,
|
||||
struct amdgpu_bo *bo)
|
||||
{
|
||||
struct amdgpu_ring *ring;
|
||||
struct dma_fence *fence = NULL;
|
||||
struct amdgpu_job *job;
|
||||
struct amdgpu_pte_update_params params;
|
||||
unsigned entries;
|
||||
uint64_t addr;
|
||||
int r;
|
||||
|
||||
ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
|
||||
|
||||
r = reservation_object_reserve_shared(bo->tbo.resv);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
|
||||
if (r)
|
||||
goto error;
|
||||
|
||||
r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem);
|
||||
if (r)
|
||||
goto error;
|
||||
|
||||
addr = amdgpu_bo_gpu_offset(bo);
|
||||
entries = amdgpu_bo_size(bo) / 8;
|
||||
|
||||
r = amdgpu_job_alloc_with_ib(adev, 64, &job);
|
||||
if (r)
|
||||
goto error;
|
||||
|
||||
memset(¶ms, 0, sizeof(params));
|
||||
params.adev = adev;
|
||||
params.ib = &job->ibs[0];
|
||||
amdgpu_vm_do_set_ptes(¶ms, addr, 0, entries, 0, 0);
|
||||
amdgpu_ring_pad_ib(ring, &job->ibs[0]);
|
||||
|
||||
WARN_ON(job->ibs[0].length_dw > 64);
|
||||
r = amdgpu_job_submit(job, ring, &vm->entity,
|
||||
AMDGPU_FENCE_OWNER_VM, &fence);
|
||||
if (r)
|
||||
goto error_free;
|
||||
|
||||
amdgpu_bo_fence(bo, fence, true);
|
||||
dma_fence_put(fence);
|
||||
return 0;
|
||||
|
||||
error_free:
|
||||
amdgpu_job_free(job);
|
||||
|
||||
error:
|
||||
return r;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_vm_map_gart - Resolve gart mapping of addr
|
||||
*
|
||||
|
@ -1435,7 +1371,8 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
|
|||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
|
||||
AMDGPU_GEM_CREATE_SHADOW |
|
||||
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
|
||||
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
|
||||
AMDGPU_GEM_CREATE_VRAM_CLEARED,
|
||||
NULL, resv, &pt);
|
||||
if (r)
|
||||
goto error_free;
|
||||
|
@ -1445,22 +1382,6 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
|
|||
*/
|
||||
pt->parent = amdgpu_bo_ref(vm->page_directory);
|
||||
|
||||
r = amdgpu_vm_clear_bo(adev, vm, pt);
|
||||
if (r) {
|
||||
amdgpu_bo_unref(&pt->shadow);
|
||||
amdgpu_bo_unref(&pt);
|
||||
goto error_free;
|
||||
}
|
||||
|
||||
if (pt->shadow) {
|
||||
r = amdgpu_vm_clear_bo(adev, vm, pt->shadow);
|
||||
if (r) {
|
||||
amdgpu_bo_unref(&pt->shadow);
|
||||
amdgpu_bo_unref(&pt);
|
||||
goto error_free;
|
||||
}
|
||||
}
|
||||
|
||||
vm->page_tables[pt_idx].bo = pt;
|
||||
vm->page_tables[pt_idx].addr = 0;
|
||||
}
|
||||
|
@ -1642,7 +1563,8 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
|
|||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
|
||||
AMDGPU_GEM_CREATE_SHADOW |
|
||||
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
|
||||
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
|
||||
AMDGPU_GEM_CREATE_VRAM_CLEARED,
|
||||
NULL, NULL, &vm->page_directory);
|
||||
if (r)
|
||||
goto error_free_sched_entity;
|
||||
|
@ -1651,24 +1573,11 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
|
|||
if (r)
|
||||
goto error_free_page_directory;
|
||||
|
||||
r = amdgpu_vm_clear_bo(adev, vm, vm->page_directory);
|
||||
if (r)
|
||||
goto error_unreserve;
|
||||
|
||||
if (vm->page_directory->shadow) {
|
||||
r = amdgpu_vm_clear_bo(adev, vm, vm->page_directory->shadow);
|
||||
if (r)
|
||||
goto error_unreserve;
|
||||
}
|
||||
|
||||
vm->last_eviction_counter = atomic64_read(&adev->num_evictions);
|
||||
amdgpu_bo_unreserve(vm->page_directory);
|
||||
|
||||
return 0;
|
||||
|
||||
error_unreserve:
|
||||
amdgpu_bo_unreserve(vm->page_directory);
|
||||
|
||||
error_free_page_directory:
|
||||
amdgpu_bo_unref(&vm->page_directory->shadow);
|
||||
amdgpu_bo_unref(&vm->page_directory);
|
||||
|
|
|
@ -108,7 +108,7 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
|
|||
lpfn = man->size;
|
||||
|
||||
if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS ||
|
||||
amdgpu_vram_page_split == -1) {
|
||||
place->lpfn || amdgpu_vram_page_split == -1) {
|
||||
pages_per_node = ~0ul;
|
||||
num_nodes = 1;
|
||||
} else {
|
||||
|
|
|
@ -4202,11 +4202,6 @@ static int ci_update_uvd_dpm(struct amdgpu_device *adev, bool gate)
|
|||
|
||||
if (!gate) {
|
||||
/* turn the clocks on when decoding */
|
||||
ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
|
||||
AMD_CG_STATE_UNGATE);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (pi->caps_uvd_dpm ||
|
||||
(adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count <= 0))
|
||||
pi->smc_state_table.UvdBootLevel = 0;
|
||||
|
@ -4223,9 +4218,6 @@ static int ci_update_uvd_dpm(struct amdgpu_device *adev, bool gate)
|
|||
ret = ci_enable_uvd_dpm(adev, false);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
|
||||
AMD_CG_STATE_GATE);
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
@ -5896,7 +5888,7 @@ static int ci_dpm_init(struct amdgpu_device *adev)
|
|||
pi->pcie_dpm_key_disabled = 0;
|
||||
pi->thermal_sclk_dpm_enabled = 0;
|
||||
|
||||
if (amdgpu_sclk_deep_sleep_en)
|
||||
if (amdgpu_pp_feature_mask & SCLK_DEEP_SLEEP_MASK)
|
||||
pi->caps_sclk_ds = true;
|
||||
else
|
||||
pi->caps_sclk_ds = false;
|
||||
|
@ -5999,7 +5991,7 @@ static int ci_dpm_init(struct amdgpu_device *adev)
|
|||
tmp |= CNB_PWRMGT_CNTL__DPM_ENABLED_MASK;
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("Invalid PCC GPIO: %u!\n", gpio.shift);
|
||||
DRM_INFO("Invalid PCC GPIO: %u!\n", gpio.shift);
|
||||
break;
|
||||
}
|
||||
WREG32_SMC(ixCNB_PWRMGT_CNTL, tmp);
|
||||
|
|
|
@ -438,7 +438,7 @@ static int cz_dpm_init(struct amdgpu_device *adev)
|
|||
pi->caps_td_ramping = true;
|
||||
pi->caps_tcp_ramping = true;
|
||||
}
|
||||
if (amdgpu_sclk_deep_sleep_en)
|
||||
if (amdgpu_pp_feature_mask & SCLK_DEEP_SLEEP_MASK)
|
||||
pi->caps_sclk_ds = true;
|
||||
else
|
||||
pi->caps_sclk_ds = false;
|
||||
|
@ -2111,9 +2111,8 @@ static void cz_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate)
|
|||
|
||||
if (gate) {
|
||||
if (pi->caps_uvd_pg) {
|
||||
/* disable clockgating so we can properly shut down the block */
|
||||
ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
|
||||
AMD_CG_STATE_UNGATE);
|
||||
AMD_CG_STATE_GATE);
|
||||
if (ret) {
|
||||
DRM_ERROR("UVD DPM Power Gating failed to set clockgating state\n");
|
||||
return;
|
||||
|
@ -2159,9 +2158,8 @@ static void cz_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate)
|
|||
return;
|
||||
}
|
||||
|
||||
/* enable clockgating. hw will dynamically gate/ungate clocks on the fly */
|
||||
ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
|
||||
AMD_CG_STATE_GATE);
|
||||
AMD_CG_STATE_UNGATE);
|
||||
if (ret) {
|
||||
DRM_ERROR("UVD DPM Power Gating Failed to set clockgating state\n");
|
||||
return;
|
||||
|
|
|
@ -3749,7 +3749,6 @@ static const struct amdgpu_display_funcs dce_v10_0_display_funcs = {
|
|||
.bandwidth_update = &dce_v10_0_bandwidth_update,
|
||||
.vblank_get_counter = &dce_v10_0_vblank_get_counter,
|
||||
.vblank_wait = &dce_v10_0_vblank_wait,
|
||||
.is_display_hung = &dce_v10_0_is_display_hung,
|
||||
.backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level,
|
||||
.backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level,
|
||||
.hpd_sense = &dce_v10_0_hpd_sense,
|
||||
|
|
|
@ -3805,7 +3805,6 @@ static const struct amdgpu_display_funcs dce_v11_0_display_funcs = {
|
|||
.bandwidth_update = &dce_v11_0_bandwidth_update,
|
||||
.vblank_get_counter = &dce_v11_0_vblank_get_counter,
|
||||
.vblank_wait = &dce_v11_0_vblank_wait,
|
||||
.is_display_hung = &dce_v11_0_is_display_hung,
|
||||
.backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level,
|
||||
.backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level,
|
||||
.hpd_sense = &dce_v11_0_hpd_sense,
|
||||
|
|
|
@ -30,8 +30,19 @@
|
|||
#include "atombios_encoders.h"
|
||||
#include "amdgpu_pll.h"
|
||||
#include "amdgpu_connectors.h"
|
||||
#include "si/si_reg.h"
|
||||
#include "si/sid.h"
|
||||
|
||||
#include "bif/bif_3_0_d.h"
|
||||
#include "bif/bif_3_0_sh_mask.h"
|
||||
#include "oss/oss_1_0_d.h"
|
||||
#include "oss/oss_1_0_sh_mask.h"
|
||||
#include "gca/gfx_6_0_d.h"
|
||||
#include "gca/gfx_6_0_sh_mask.h"
|
||||
#include "gmc/gmc_6_0_d.h"
|
||||
#include "gmc/gmc_6_0_sh_mask.h"
|
||||
#include "dce/dce_6_0_d.h"
|
||||
#include "dce/dce_6_0_sh_mask.h"
|
||||
#include "gca/gfx_7_2_enum.h"
|
||||
#include "si_enums.h"
|
||||
|
||||
static void dce_v6_0_set_display_funcs(struct amdgpu_device *adev);
|
||||
static void dce_v6_0_set_irq_funcs(struct amdgpu_device *adev);
|
||||
|
@ -48,12 +59,12 @@ static const u32 crtc_offsets[6] =
|
|||
|
||||
static const u32 hpd_offsets[] =
|
||||
{
|
||||
DC_HPD1_INT_STATUS - DC_HPD1_INT_STATUS,
|
||||
DC_HPD2_INT_STATUS - DC_HPD1_INT_STATUS,
|
||||
DC_HPD3_INT_STATUS - DC_HPD1_INT_STATUS,
|
||||
DC_HPD4_INT_STATUS - DC_HPD1_INT_STATUS,
|
||||
DC_HPD5_INT_STATUS - DC_HPD1_INT_STATUS,
|
||||
DC_HPD6_INT_STATUS - DC_HPD1_INT_STATUS,
|
||||
mmDC_HPD1_INT_STATUS - mmDC_HPD1_INT_STATUS,
|
||||
mmDC_HPD2_INT_STATUS - mmDC_HPD1_INT_STATUS,
|
||||
mmDC_HPD3_INT_STATUS - mmDC_HPD1_INT_STATUS,
|
||||
mmDC_HPD4_INT_STATUS - mmDC_HPD1_INT_STATUS,
|
||||
mmDC_HPD5_INT_STATUS - mmDC_HPD1_INT_STATUS,
|
||||
mmDC_HPD6_INT_STATUS - mmDC_HPD1_INT_STATUS,
|
||||
};
|
||||
|
||||
static const uint32_t dig_offsets[] = {
|
||||
|
@ -73,32 +84,32 @@ static const struct {
|
|||
uint32_t hpd;
|
||||
|
||||
} interrupt_status_offsets[6] = { {
|
||||
.reg = DISP_INTERRUPT_STATUS,
|
||||
.reg = mmDISP_INTERRUPT_STATUS,
|
||||
.vblank = DISP_INTERRUPT_STATUS__LB_D1_VBLANK_INTERRUPT_MASK,
|
||||
.vline = DISP_INTERRUPT_STATUS__LB_D1_VLINE_INTERRUPT_MASK,
|
||||
.hpd = DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT_MASK
|
||||
}, {
|
||||
.reg = DISP_INTERRUPT_STATUS_CONTINUE,
|
||||
.reg = mmDISP_INTERRUPT_STATUS_CONTINUE,
|
||||
.vblank = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VBLANK_INTERRUPT_MASK,
|
||||
.vline = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VLINE_INTERRUPT_MASK,
|
||||
.hpd = DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT_MASK
|
||||
}, {
|
||||
.reg = DISP_INTERRUPT_STATUS_CONTINUE2,
|
||||
.reg = mmDISP_INTERRUPT_STATUS_CONTINUE2,
|
||||
.vblank = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VBLANK_INTERRUPT_MASK,
|
||||
.vline = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VLINE_INTERRUPT_MASK,
|
||||
.hpd = DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT_MASK
|
||||
}, {
|
||||
.reg = DISP_INTERRUPT_STATUS_CONTINUE3,
|
||||
.reg = mmDISP_INTERRUPT_STATUS_CONTINUE3,
|
||||
.vblank = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VBLANK_INTERRUPT_MASK,
|
||||
.vline = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VLINE_INTERRUPT_MASK,
|
||||
.hpd = DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT_MASK
|
||||
}, {
|
||||
.reg = DISP_INTERRUPT_STATUS_CONTINUE4,
|
||||
.reg = mmDISP_INTERRUPT_STATUS_CONTINUE4,
|
||||
.vblank = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VBLANK_INTERRUPT_MASK,
|
||||
.vline = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VLINE_INTERRUPT_MASK,
|
||||
.hpd = DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT_MASK
|
||||
}, {
|
||||
.reg = DISP_INTERRUPT_STATUS_CONTINUE5,
|
||||
.reg = mmDISP_INTERRUPT_STATUS_CONTINUE5,
|
||||
.vblank = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VBLANK_INTERRUPT_MASK,
|
||||
.vline = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VLINE_INTERRUPT_MASK,
|
||||
.hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK
|
||||
|
@ -119,7 +130,7 @@ static void dce_v6_0_audio_endpt_wreg(struct amdgpu_device *adev,
|
|||
|
||||
static bool dce_v6_0_is_in_vblank(struct amdgpu_device *adev, int crtc)
|
||||
{
|
||||
if (RREG32(EVERGREEN_CRTC_STATUS + crtc_offsets[crtc]) & EVERGREEN_CRTC_V_BLANK)
|
||||
if (RREG32(mmCRTC_STATUS + crtc_offsets[crtc]) & CRTC_STATUS__CRTC_V_BLANK_MASK)
|
||||
return true;
|
||||
else
|
||||
return false;
|
||||
|
@ -129,8 +140,8 @@ static bool dce_v6_0_is_counter_moving(struct amdgpu_device *adev, int crtc)
|
|||
{
|
||||
u32 pos1, pos2;
|
||||
|
||||
pos1 = RREG32(EVERGREEN_CRTC_STATUS_POSITION + crtc_offsets[crtc]);
|
||||
pos2 = RREG32(EVERGREEN_CRTC_STATUS_POSITION + crtc_offsets[crtc]);
|
||||
pos1 = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
|
||||
pos2 = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
|
||||
|
||||
if (pos1 != pos2)
|
||||
return true;
|
||||
|
@ -152,7 +163,7 @@ static void dce_v6_0_vblank_wait(struct amdgpu_device *adev, int crtc)
|
|||
if (crtc >= adev->mode_info.num_crtc)
|
||||
return;
|
||||
|
||||
if (!(RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[crtc]) & EVERGREEN_CRTC_MASTER_EN))
|
||||
if (!(RREG32(mmCRTC_CONTROL + crtc_offsets[crtc]) & CRTC_CONTROL__CRTC_MASTER_EN_MASK))
|
||||
return;
|
||||
|
||||
/* depending on when we hit vblank, we may be close to active; if so,
|
||||
|
@ -180,7 +191,7 @@ static u32 dce_v6_0_vblank_get_counter(struct amdgpu_device *adev, int crtc)
|
|||
if (crtc >= adev->mode_info.num_crtc)
|
||||
return 0;
|
||||
else
|
||||
return RREG32(CRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
|
||||
return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
|
||||
}
|
||||
|
||||
static void dce_v6_0_pageflip_interrupt_init(struct amdgpu_device *adev)
|
||||
|
@ -220,16 +231,16 @@ static void dce_v6_0_page_flip(struct amdgpu_device *adev,
|
|||
struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
|
||||
|
||||
/* flip at hsync for async, default is vsync */
|
||||
WREG32(EVERGREEN_GRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, async ?
|
||||
EVERGREEN_GRPH_SURFACE_UPDATE_H_RETRACE_EN : 0);
|
||||
WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, async ?
|
||||
GRPH_FLIP_CONTROL__GRPH_SURFACE_UPDATE_H_RETRACE_EN_MASK : 0);
|
||||
/* update the scanout addresses */
|
||||
WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
|
||||
WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
|
||||
upper_32_bits(crtc_base));
|
||||
WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
|
||||
WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
|
||||
(u32)crtc_base);
|
||||
|
||||
/* post the write */
|
||||
RREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset);
|
||||
RREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset);
|
||||
}
|
||||
|
||||
static int dce_v6_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
|
||||
|
@ -237,8 +248,8 @@ static int dce_v6_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
|
|||
{
|
||||
if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
|
||||
return -EINVAL;
|
||||
*vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + crtc_offsets[crtc]);
|
||||
*position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + crtc_offsets[crtc]);
|
||||
*vbl = RREG32(mmCRTC_V_BLANK_START_END + crtc_offsets[crtc]);
|
||||
*position = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
|
||||
|
||||
return 0;
|
||||
|
||||
|
@ -261,7 +272,7 @@ static bool dce_v6_0_hpd_sense(struct amdgpu_device *adev,
|
|||
if (hpd >= adev->mode_info.num_hpd)
|
||||
return connected;
|
||||
|
||||
if (RREG32(DC_HPD1_INT_STATUS + hpd_offsets[hpd]) & DC_HPDx_SENSE)
|
||||
if (RREG32(mmDC_HPD1_INT_STATUS + hpd_offsets[hpd]) & DC_HPD1_INT_STATUS__DC_HPD1_SENSE_MASK)
|
||||
connected = true;
|
||||
|
||||
return connected;
|
||||
|
@ -284,12 +295,12 @@ static void dce_v6_0_hpd_set_polarity(struct amdgpu_device *adev,
|
|||
if (hpd >= adev->mode_info.num_hpd)
|
||||
return;
|
||||
|
||||
tmp = RREG32(DC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
|
||||
tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
|
||||
if (connected)
|
||||
tmp &= ~DC_HPDx_INT_POLARITY;
|
||||
tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
|
||||
else
|
||||
tmp |= DC_HPDx_INT_POLARITY;
|
||||
WREG32(DC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
|
||||
tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
|
||||
WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -312,9 +323,9 @@ static void dce_v6_0_hpd_init(struct amdgpu_device *adev)
|
|||
if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
|
||||
continue;
|
||||
|
||||
tmp = RREG32(DC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
|
||||
tmp |= DC_HPDx_EN;
|
||||
WREG32(DC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
|
||||
tmp = RREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
|
||||
tmp |= DC_HPD1_CONTROL__DC_HPD1_EN_MASK;
|
||||
WREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
|
||||
|
||||
if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
|
||||
connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
|
||||
|
@ -323,9 +334,9 @@ static void dce_v6_0_hpd_init(struct amdgpu_device *adev)
|
|||
* https://bugzilla.redhat.com/show_bug.cgi?id=726143
|
||||
* also avoid interrupt storms during dpms.
|
||||
*/
|
||||
tmp = RREG32(DC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
|
||||
tmp &= ~DC_HPDx_INT_EN;
|
||||
WREG32(DC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
|
||||
tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
|
||||
tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
|
||||
WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -355,9 +366,9 @@ static void dce_v6_0_hpd_fini(struct amdgpu_device *adev)
|
|||
if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
|
||||
continue;
|
||||
|
||||
tmp = RREG32(DC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
|
||||
tmp &= ~DC_HPDx_EN;
|
||||
WREG32(DC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], 0);
|
||||
tmp = RREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
|
||||
tmp &= ~DC_HPD1_CONTROL__DC_HPD1_EN_MASK;
|
||||
WREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], 0);
|
||||
|
||||
amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
|
||||
}
|
||||
|
@ -365,14 +376,7 @@ static void dce_v6_0_hpd_fini(struct amdgpu_device *adev)
|
|||
|
||||
static u32 dce_v6_0_hpd_get_gpio_reg(struct amdgpu_device *adev)
|
||||
{
|
||||
return SI_DC_GPIO_HPD_A;
|
||||
}
|
||||
|
||||
static bool dce_v6_0_is_display_hung(struct amdgpu_device *adev)
|
||||
{
|
||||
DRM_INFO("xxxx: dce_v6_0_is_display_hung ----no imp!!!!!\n");
|
||||
|
||||
return true;
|
||||
return mmDC_GPIO_HPD_A;
|
||||
}
|
||||
|
||||
static u32 evergreen_get_vblank_counter(struct amdgpu_device* adev, int crtc)
|
||||
|
@ -380,7 +384,7 @@ static u32 evergreen_get_vblank_counter(struct amdgpu_device* adev, int crtc)
|
|||
if (crtc >= adev->mode_info.num_crtc)
|
||||
return 0;
|
||||
else
|
||||
return RREG32(CRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
|
||||
return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
|
||||
}
|
||||
|
||||
static void dce_v6_0_stop_mc_access(struct amdgpu_device *adev,
|
||||
|
@ -389,25 +393,25 @@ static void dce_v6_0_stop_mc_access(struct amdgpu_device *adev,
|
|||
u32 crtc_enabled, tmp, frame_count;
|
||||
int i, j;
|
||||
|
||||
save->vga_render_control = RREG32(VGA_RENDER_CONTROL);
|
||||
save->vga_hdp_control = RREG32(VGA_HDP_CONTROL);
|
||||
save->vga_render_control = RREG32(mmVGA_RENDER_CONTROL);
|
||||
save->vga_hdp_control = RREG32(mmVGA_HDP_CONTROL);
|
||||
|
||||
/* disable VGA render */
|
||||
WREG32(VGA_RENDER_CONTROL, 0);
|
||||
WREG32(mmVGA_RENDER_CONTROL, 0);
|
||||
|
||||
/* blank the display controllers */
|
||||
for (i = 0; i < adev->mode_info.num_crtc; i++) {
|
||||
crtc_enabled = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]) & EVERGREEN_CRTC_MASTER_EN;
|
||||
crtc_enabled = RREG32(mmCRTC_CONTROL + crtc_offsets[i]) & CRTC_CONTROL__CRTC_MASTER_EN_MASK;
|
||||
if (crtc_enabled) {
|
||||
save->crtc_enabled[i] = true;
|
||||
tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]);
|
||||
tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]);
|
||||
|
||||
if (!(tmp & EVERGREEN_CRTC_BLANK_DATA_EN)) {
|
||||
if (!(tmp & CRTC_BLANK_CONTROL__CRTC_BLANK_DATA_EN_MASK)) {
|
||||
dce_v6_0_vblank_wait(adev, i);
|
||||
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
|
||||
tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
|
||||
WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
|
||||
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
|
||||
WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
|
||||
tmp |= CRTC_BLANK_CONTROL__CRTC_BLANK_DATA_EN_MASK;
|
||||
WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
|
||||
WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
|
||||
}
|
||||
/* wait for the next frame */
|
||||
frame_count = evergreen_get_vblank_counter(adev, i);
|
||||
|
@ -418,11 +422,11 @@ static void dce_v6_0_stop_mc_access(struct amdgpu_device *adev,
|
|||
}
|
||||
|
||||
/* XXX this is a hack to avoid strange behavior with EFI on certain systems */
|
||||
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
|
||||
tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
|
||||
tmp &= ~EVERGREEN_CRTC_MASTER_EN;
|
||||
WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
|
||||
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
|
||||
WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
|
||||
tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
|
||||
tmp &= ~CRTC_CONTROL__CRTC_MASTER_EN_MASK;
|
||||
WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
|
||||
WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
|
||||
save->crtc_enabled[i] = false;
|
||||
/* ***** */
|
||||
} else {
|
||||
|
@ -439,41 +443,41 @@ static void dce_v6_0_resume_mc_access(struct amdgpu_device *adev,
|
|||
|
||||
/* update crtc base addresses */
|
||||
for (i = 0; i < adev->mode_info.num_crtc; i++) {
|
||||
WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
|
||||
WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
|
||||
upper_32_bits(adev->mc.vram_start));
|
||||
WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
|
||||
WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
|
||||
upper_32_bits(adev->mc.vram_start));
|
||||
WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i],
|
||||
WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i],
|
||||
(u32)adev->mc.vram_start);
|
||||
WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i],
|
||||
WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i],
|
||||
(u32)adev->mc.vram_start);
|
||||
}
|
||||
|
||||
WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(adev->mc.vram_start));
|
||||
WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS, (u32)adev->mc.vram_start);
|
||||
WREG32(mmVGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(adev->mc.vram_start));
|
||||
WREG32(mmVGA_MEMORY_BASE_ADDRESS, (u32)adev->mc.vram_start);
|
||||
|
||||
/* unlock regs and wait for update */
|
||||
for (i = 0; i < adev->mode_info.num_crtc; i++) {
|
||||
if (save->crtc_enabled[i]) {
|
||||
tmp = RREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i]);
|
||||
tmp = RREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i]);
|
||||
if ((tmp & 0x7) != 3) {
|
||||
tmp &= ~0x7;
|
||||
tmp |= 0x3;
|
||||
WREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i], tmp);
|
||||
WREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i], tmp);
|
||||
}
|
||||
tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]);
|
||||
if (tmp & EVERGREEN_GRPH_UPDATE_LOCK) {
|
||||
tmp &= ~EVERGREEN_GRPH_UPDATE_LOCK;
|
||||
WREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i], tmp);
|
||||
tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]);
|
||||
if (tmp & GRPH_UPDATE__GRPH_UPDATE_LOCK_MASK) {
|
||||
tmp &= ~GRPH_UPDATE__GRPH_UPDATE_LOCK_MASK;
|
||||
WREG32(mmGRPH_UPDATE + crtc_offsets[i], tmp);
|
||||
}
|
||||
tmp = RREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i]);
|
||||
tmp = RREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i]);
|
||||
if (tmp & 1) {
|
||||
tmp &= ~1;
|
||||
WREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
|
||||
WREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
|
||||
}
|
||||
for (j = 0; j < adev->usec_timeout; j++) {
|
||||
tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]);
|
||||
if ((tmp & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING) == 0)
|
||||
tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]);
|
||||
if ((tmp & GRPH_UPDATE__GRPH_SURFACE_UPDATE_PENDING_MASK) == 0)
|
||||
break;
|
||||
udelay(1);
|
||||
}
|
||||
|
@ -481,9 +485,9 @@ static void dce_v6_0_resume_mc_access(struct amdgpu_device *adev,
|
|||
}
|
||||
|
||||
/* Unlock vga access */
|
||||
WREG32(VGA_HDP_CONTROL, save->vga_hdp_control);
|
||||
WREG32(mmVGA_HDP_CONTROL, save->vga_hdp_control);
|
||||
mdelay(1);
|
||||
WREG32(VGA_RENDER_CONTROL, save->vga_render_control);
|
||||
WREG32(mmVGA_RENDER_CONTROL, save->vga_render_control);
|
||||
|
||||
}
|
||||
|
||||
|
@ -491,8 +495,8 @@ static void dce_v6_0_set_vga_render_state(struct amdgpu_device *adev,
|
|||
bool render)
|
||||
{
|
||||
if (!render)
|
||||
WREG32(R_000300_VGA_RENDER_CONTROL,
|
||||
RREG32(R_000300_VGA_RENDER_CONTROL) & C_000300_VGA_VSTATUS_CNTL);
|
||||
WREG32(mmVGA_RENDER_CONTROL,
|
||||
RREG32(mmVGA_RENDER_CONTROL) & VGA_VSTATUS_CNTL);
|
||||
|
||||
}
|
||||
|
||||
|
@ -526,14 +530,14 @@ void dce_v6_0_disable_dce(struct amdgpu_device *adev)
|
|||
|
||||
/*Disable crtc*/
|
||||
for (i = 0; i < dce_v6_0_get_num_crtc(adev); i++) {
|
||||
crtc_enabled = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]) &
|
||||
EVERGREEN_CRTC_MASTER_EN;
|
||||
crtc_enabled = RREG32(mmCRTC_CONTROL + crtc_offsets[i]) &
|
||||
CRTC_CONTROL__CRTC_MASTER_EN_MASK;
|
||||
if (crtc_enabled) {
|
||||
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
|
||||
tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
|
||||
tmp &= ~EVERGREEN_CRTC_MASTER_EN;
|
||||
WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
|
||||
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
|
||||
WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
|
||||
tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
|
||||
tmp &= ~CRTC_CONTROL__CRTC_MASTER_EN_MASK;
|
||||
WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
|
||||
WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -569,19 +573,23 @@ static void dce_v6_0_program_fmt(struct drm_encoder *encoder)
|
|||
case 6:
|
||||
if (dither == AMDGPU_FMT_DITHER_ENABLE)
|
||||
/* XXX sort out optimal dither settings */
|
||||
tmp |= (FMT_FRAME_RANDOM_ENABLE | FMT_HIGHPASS_RANDOM_ENABLE |
|
||||
FMT_SPATIAL_DITHER_EN);
|
||||
tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK |
|
||||
FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK |
|
||||
FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK);
|
||||
else
|
||||
tmp |= FMT_TRUNCATE_EN;
|
||||
tmp |= FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK;
|
||||
break;
|
||||
case 8:
|
||||
if (dither == AMDGPU_FMT_DITHER_ENABLE)
|
||||
/* XXX sort out optimal dither settings */
|
||||
tmp |= (FMT_FRAME_RANDOM_ENABLE | FMT_HIGHPASS_RANDOM_ENABLE |
|
||||
FMT_RGB_RANDOM_ENABLE |
|
||||
FMT_SPATIAL_DITHER_EN | FMT_SPATIAL_DITHER_DEPTH);
|
||||
tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK |
|
||||
FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK |
|
||||
FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE_MASK |
|
||||
FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK |
|
||||
FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH_MASK);
|
||||
else
|
||||
tmp |= (FMT_TRUNCATE_EN | FMT_TRUNCATE_DEPTH);
|
||||
tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK |
|
||||
FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH_MASK);
|
||||
break;
|
||||
case 10:
|
||||
default:
|
||||
|
@ -589,7 +597,7 @@ static void dce_v6_0_program_fmt(struct drm_encoder *encoder)
|
|||
break;
|
||||
}
|
||||
|
||||
WREG32(FMT_BIT_DEPTH_CONTROL + amdgpu_crtc->crtc_offset, tmp);
|
||||
WREG32(mmFMT_BIT_DEPTH_CONTROL + amdgpu_crtc->crtc_offset, tmp);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -603,7 +611,7 @@ static void dce_v6_0_program_fmt(struct drm_encoder *encoder)
|
|||
*/
|
||||
static u32 si_get_number_of_dram_channels(struct amdgpu_device *adev)
|
||||
{
|
||||
u32 tmp = RREG32(MC_SHARED_CHMAP);
|
||||
u32 tmp = RREG32(mmMC_SHARED_CHMAP);
|
||||
|
||||
switch ((tmp & MC_SHARED_CHMAP__NOOFCHAN_MASK) >> MC_SHARED_CHMAP__NOOFCHAN__SHIFT) {
|
||||
case 0:
|
||||
|
@ -1100,28 +1108,28 @@ static void dce_v6_0_program_watermarks(struct amdgpu_device *adev,
|
|||
}
|
||||
|
||||
/* select wm A */
|
||||
arb_control3 = RREG32(DPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset);
|
||||
arb_control3 = RREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset);
|
||||
tmp = arb_control3;
|
||||
tmp &= ~LATENCY_WATERMARK_MASK(3);
|
||||
tmp |= LATENCY_WATERMARK_MASK(1);
|
||||
WREG32(DPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, tmp);
|
||||
WREG32(DPG_PIPE_LATENCY_CONTROL + amdgpu_crtc->crtc_offset,
|
||||
(LATENCY_LOW_WATERMARK(latency_watermark_a) |
|
||||
LATENCY_HIGH_WATERMARK(line_time)));
|
||||
WREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, tmp);
|
||||
WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset,
|
||||
((latency_watermark_a << DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT) |
|
||||
(line_time << DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT)));
|
||||
/* select wm B */
|
||||
tmp = RREG32(DPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset);
|
||||
tmp = RREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset);
|
||||
tmp &= ~LATENCY_WATERMARK_MASK(3);
|
||||
tmp |= LATENCY_WATERMARK_MASK(2);
|
||||
WREG32(DPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, tmp);
|
||||
WREG32(DPG_PIPE_LATENCY_CONTROL + amdgpu_crtc->crtc_offset,
|
||||
(LATENCY_LOW_WATERMARK(latency_watermark_b) |
|
||||
LATENCY_HIGH_WATERMARK(line_time)));
|
||||
WREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, tmp);
|
||||
WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset,
|
||||
((latency_watermark_b << DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT) |
|
||||
(line_time << DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT)));
|
||||
/* restore original selection */
|
||||
WREG32(DPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, arb_control3);
|
||||
WREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, arb_control3);
|
||||
|
||||
/* write the priority marks */
|
||||
WREG32(PRIORITY_A_CNT + amdgpu_crtc->crtc_offset, priority_a_cnt);
|
||||
WREG32(PRIORITY_B_CNT + amdgpu_crtc->crtc_offset, priority_b_cnt);
|
||||
WREG32(mmPRIORITY_A_CNT + amdgpu_crtc->crtc_offset, priority_a_cnt);
|
||||
WREG32(mmPRIORITY_B_CNT + amdgpu_crtc->crtc_offset, priority_b_cnt);
|
||||
|
||||
/* save values for DPM */
|
||||
amdgpu_crtc->line_time = line_time;
|
||||
|
@ -1139,7 +1147,7 @@ static u32 dce_v6_0_line_buffer_adjust(struct amdgpu_device *adev,
|
|||
/*
|
||||
* Line Buffer Setup
|
||||
* There are 3 line buffers, each one shared by 2 display controllers.
|
||||
* DC_LB_MEMORY_SPLIT controls how that line buffer is shared between
|
||||
* mmDC_LB_MEMORY_SPLIT controls how that line buffer is shared between
|
||||
* the display controllers. The paritioning is done via one of four
|
||||
* preset allocations specified in bits 21:20:
|
||||
* 0 - half lb
|
||||
|
@ -1162,14 +1170,14 @@ static u32 dce_v6_0_line_buffer_adjust(struct amdgpu_device *adev,
|
|||
buffer_alloc = 0;
|
||||
}
|
||||
|
||||
WREG32(DC_LB_MEMORY_SPLIT + amdgpu_crtc->crtc_offset,
|
||||
WREG32(mmDC_LB_MEMORY_SPLIT + amdgpu_crtc->crtc_offset,
|
||||
DC_LB_MEMORY_CONFIG(tmp));
|
||||
|
||||
WREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset,
|
||||
DMIF_BUFFERS_ALLOCATED(buffer_alloc));
|
||||
WREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset,
|
||||
(buffer_alloc << PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATED__SHIFT));
|
||||
for (i = 0; i < adev->usec_timeout; i++) {
|
||||
if (RREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset) &
|
||||
DMIF_BUFFERS_ALLOCATED_COMPLETED)
|
||||
if (RREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset) &
|
||||
PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATION_COMPLETED_MASK)
|
||||
break;
|
||||
udelay(1);
|
||||
}
|
||||
|
@ -1411,12 +1419,12 @@ static void dce_v6_0_afmt_fini(struct amdgpu_device *adev)
|
|||
|
||||
static const u32 vga_control_regs[6] =
|
||||
{
|
||||
AVIVO_D1VGA_CONTROL,
|
||||
AVIVO_D2VGA_CONTROL,
|
||||
EVERGREEN_D3VGA_CONTROL,
|
||||
EVERGREEN_D4VGA_CONTROL,
|
||||
EVERGREEN_D5VGA_CONTROL,
|
||||
EVERGREEN_D6VGA_CONTROL,
|
||||
mmD1VGA_CONTROL,
|
||||
mmD2VGA_CONTROL,
|
||||
mmD3VGA_CONTROL,
|
||||
mmD4VGA_CONTROL,
|
||||
mmD5VGA_CONTROL,
|
||||
mmD6VGA_CONTROL,
|
||||
};
|
||||
|
||||
static void dce_v6_0_vga_enable(struct drm_crtc *crtc, bool enable)
|
||||
|
@ -1436,7 +1444,7 @@ static void dce_v6_0_grph_enable(struct drm_crtc *crtc, bool enable)
|
|||
struct drm_device *dev = crtc->dev;
|
||||
struct amdgpu_device *adev = dev->dev_private;
|
||||
|
||||
WREG32(EVERGREEN_GRPH_ENABLE + amdgpu_crtc->crtc_offset, enable ? 1 : 0);
|
||||
WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, enable ? 1 : 0);
|
||||
}
|
||||
|
||||
static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
|
||||
|
@ -1452,7 +1460,7 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
|
|||
struct amdgpu_bo *abo;
|
||||
uint64_t fb_location, tiling_flags;
|
||||
uint32_t fb_format, fb_pitch_pixels, pipe_config;
|
||||
u32 fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_NONE);
|
||||
u32 fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_NONE);
|
||||
u32 viewport_w, viewport_h;
|
||||
int r;
|
||||
bool bypass_lut = false;
|
||||
|
@ -1496,64 +1504,64 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
|
|||
|
||||
switch (target_fb->pixel_format) {
|
||||
case DRM_FORMAT_C8:
|
||||
fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_8BPP) |
|
||||
EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_INDEXED));
|
||||
fb_format = (GRPH_DEPTH(GRPH_DEPTH_8BPP) |
|
||||
GRPH_FORMAT(GRPH_FORMAT_INDEXED));
|
||||
break;
|
||||
case DRM_FORMAT_XRGB4444:
|
||||
case DRM_FORMAT_ARGB4444:
|
||||
fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) |
|
||||
EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB4444));
|
||||
fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) |
|
||||
GRPH_FORMAT(GRPH_FORMAT_ARGB4444));
|
||||
#ifdef __BIG_ENDIAN
|
||||
fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN16);
|
||||
fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16);
|
||||
#endif
|
||||
break;
|
||||
case DRM_FORMAT_XRGB1555:
|
||||
case DRM_FORMAT_ARGB1555:
|
||||
fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) |
|
||||
EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB1555));
|
||||
fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) |
|
||||
GRPH_FORMAT(GRPH_FORMAT_ARGB1555));
|
||||
#ifdef __BIG_ENDIAN
|
||||
fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN16);
|
||||
fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16);
|
||||
#endif
|
||||
break;
|
||||
case DRM_FORMAT_BGRX5551:
|
||||
case DRM_FORMAT_BGRA5551:
|
||||
fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) |
|
||||
EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_BGRA5551));
|
||||
fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) |
|
||||
GRPH_FORMAT(GRPH_FORMAT_BGRA5551));
|
||||
#ifdef __BIG_ENDIAN
|
||||
fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN16);
|
||||
fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16);
|
||||
#endif
|
||||
break;
|
||||
case DRM_FORMAT_RGB565:
|
||||
fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) |
|
||||
EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB565));
|
||||
fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) |
|
||||
GRPH_FORMAT(GRPH_FORMAT_ARGB565));
|
||||
#ifdef __BIG_ENDIAN
|
||||
fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN16);
|
||||
fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16);
|
||||
#endif
|
||||
break;
|
||||
case DRM_FORMAT_XRGB8888:
|
||||
case DRM_FORMAT_ARGB8888:
|
||||
fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_32BPP) |
|
||||
EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB8888));
|
||||
fb_format = (GRPH_DEPTH(GRPH_DEPTH_32BPP) |
|
||||
GRPH_FORMAT(GRPH_FORMAT_ARGB8888));
|
||||
#ifdef __BIG_ENDIAN
|
||||
fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN32);
|
||||
fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN32);
|
||||
#endif
|
||||
break;
|
||||
case DRM_FORMAT_XRGB2101010:
|
||||
case DRM_FORMAT_ARGB2101010:
|
||||
fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_32BPP) |
|
||||
EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB2101010));
|
||||
fb_format = (GRPH_DEPTH(GRPH_DEPTH_32BPP) |
|
||||
GRPH_FORMAT(GRPH_FORMAT_ARGB2101010));
|
||||
#ifdef __BIG_ENDIAN
|
||||
fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN32);
|
||||
fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN32);
|
||||
#endif
|
||||
/* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
|
||||
bypass_lut = true;
|
||||
break;
|
||||
case DRM_FORMAT_BGRX1010102:
|
||||
case DRM_FORMAT_BGRA1010102:
|
||||
fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_32BPP) |
|
||||
EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_BGRA1010102));
|
||||
fb_format = (GRPH_DEPTH(GRPH_DEPTH_32BPP) |
|
||||
GRPH_FORMAT(GRPH_FORMAT_BGRA1010102));
|
||||
#ifdef __BIG_ENDIAN
|
||||
fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN32);
|
||||
fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN32);
|
||||
#endif
|
||||
/* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
|
||||
bypass_lut = true;
|
||||
|
@ -1573,75 +1581,75 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
|
|||
tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
|
||||
num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
|
||||
|
||||
fb_format |= EVERGREEN_GRPH_NUM_BANKS(num_banks);
|
||||
fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_2D_TILED_THIN1);
|
||||
fb_format |= EVERGREEN_GRPH_TILE_SPLIT(tile_split);
|
||||
fb_format |= EVERGREEN_GRPH_BANK_WIDTH(bankw);
|
||||
fb_format |= EVERGREEN_GRPH_BANK_HEIGHT(bankh);
|
||||
fb_format |= EVERGREEN_GRPH_MACRO_TILE_ASPECT(mtaspect);
|
||||
fb_format |= GRPH_NUM_BANKS(num_banks);
|
||||
fb_format |= GRPH_ARRAY_MODE(GRPH_ARRAY_2D_TILED_THIN1);
|
||||
fb_format |= GRPH_TILE_SPLIT(tile_split);
|
||||
fb_format |= GRPH_BANK_WIDTH(bankw);
|
||||
fb_format |= GRPH_BANK_HEIGHT(bankh);
|
||||
fb_format |= GRPH_MACRO_TILE_ASPECT(mtaspect);
|
||||
} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_1D_TILED_THIN1) {
|
||||
fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1);
|
||||
fb_format |= GRPH_ARRAY_MODE(GRPH_ARRAY_1D_TILED_THIN1);
|
||||
}
|
||||
|
||||
pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
|
||||
fb_format |= SI_GRPH_PIPE_CONFIG(pipe_config);
|
||||
fb_format |= GRPH_PIPE_CONFIG(pipe_config);
|
||||
|
||||
dce_v6_0_vga_enable(crtc, false);
|
||||
|
||||
/* Make sure surface address is updated at vertical blank rather than
|
||||
* horizontal blank
|
||||
*/
|
||||
WREG32(EVERGREEN_GRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, 0);
|
||||
WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, 0);
|
||||
|
||||
WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
|
||||
WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
|
||||
upper_32_bits(fb_location));
|
||||
WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
|
||||
WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
|
||||
upper_32_bits(fb_location));
|
||||
WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
|
||||
(u32)fb_location & EVERGREEN_GRPH_SURFACE_ADDRESS_MASK);
|
||||
WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
|
||||
(u32) fb_location & EVERGREEN_GRPH_SURFACE_ADDRESS_MASK);
|
||||
WREG32(EVERGREEN_GRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format);
|
||||
WREG32(EVERGREEN_GRPH_SWAP_CONTROL + amdgpu_crtc->crtc_offset, fb_swap);
|
||||
WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
|
||||
(u32)fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK);
|
||||
WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
|
||||
(u32) fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK);
|
||||
WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format);
|
||||
WREG32(mmGRPH_SWAP_CNTL + amdgpu_crtc->crtc_offset, fb_swap);
|
||||
|
||||
/*
|
||||
* The LUT only has 256 slots for indexing by a 8 bpc fb. Bypass the LUT
|
||||
* for > 8 bpc scanout to avoid truncation of fb indices to 8 msb's, to
|
||||
* retain the full precision throughout the pipeline.
|
||||
*/
|
||||
WREG32_P(EVERGREEN_GRPH_LUT_10BIT_BYPASS_CONTROL + amdgpu_crtc->crtc_offset,
|
||||
(bypass_lut ? EVERGREEN_LUT_10BIT_BYPASS_EN : 0),
|
||||
~EVERGREEN_LUT_10BIT_BYPASS_EN);
|
||||
WREG32_P(mmGRPH_LUT_10BIT_BYPASS + amdgpu_crtc->crtc_offset,
|
||||
(bypass_lut ? GRPH_LUT_10BIT_BYPASS__GRPH_LUT_10BIT_BYPASS_EN_MASK : 0),
|
||||
~GRPH_LUT_10BIT_BYPASS__GRPH_LUT_10BIT_BYPASS_EN_MASK);
|
||||
|
||||
if (bypass_lut)
|
||||
DRM_DEBUG_KMS("Bypassing hardware LUT due to 10 bit fb scanout.\n");
|
||||
|
||||
WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_X + amdgpu_crtc->crtc_offset, 0);
|
||||
WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_Y + amdgpu_crtc->crtc_offset, 0);
|
||||
WREG32(EVERGREEN_GRPH_X_START + amdgpu_crtc->crtc_offset, 0);
|
||||
WREG32(EVERGREEN_GRPH_Y_START + amdgpu_crtc->crtc_offset, 0);
|
||||
WREG32(EVERGREEN_GRPH_X_END + amdgpu_crtc->crtc_offset, target_fb->width);
|
||||
WREG32(EVERGREEN_GRPH_Y_END + amdgpu_crtc->crtc_offset, target_fb->height);
|
||||
WREG32(mmGRPH_SURFACE_OFFSET_X + amdgpu_crtc->crtc_offset, 0);
|
||||
WREG32(mmGRPH_SURFACE_OFFSET_Y + amdgpu_crtc->crtc_offset, 0);
|
||||
WREG32(mmGRPH_X_START + amdgpu_crtc->crtc_offset, 0);
|
||||
WREG32(mmGRPH_Y_START + amdgpu_crtc->crtc_offset, 0);
|
||||
WREG32(mmGRPH_X_END + amdgpu_crtc->crtc_offset, target_fb->width);
|
||||
WREG32(mmGRPH_Y_END + amdgpu_crtc->crtc_offset, target_fb->height);
|
||||
|
||||
fb_pitch_pixels = target_fb->pitches[0] / (target_fb->bits_per_pixel / 8);
|
||||
WREG32(EVERGREEN_GRPH_PITCH + amdgpu_crtc->crtc_offset, fb_pitch_pixels);
|
||||
WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset, fb_pitch_pixels);
|
||||
|
||||
dce_v6_0_grph_enable(crtc, true);
|
||||
|
||||
WREG32(EVERGREEN_DESKTOP_HEIGHT + amdgpu_crtc->crtc_offset,
|
||||
WREG32(mmDESKTOP_HEIGHT + amdgpu_crtc->crtc_offset,
|
||||
target_fb->height);
|
||||
x &= ~3;
|
||||
y &= ~1;
|
||||
WREG32(EVERGREEN_VIEWPORT_START + amdgpu_crtc->crtc_offset,
|
||||
WREG32(mmVIEWPORT_START + amdgpu_crtc->crtc_offset,
|
||||
(x << 16) | y);
|
||||
viewport_w = crtc->mode.hdisplay;
|
||||
viewport_h = (crtc->mode.vdisplay + 1) & ~1;
|
||||
|
||||
WREG32(EVERGREEN_VIEWPORT_SIZE + amdgpu_crtc->crtc_offset,
|
||||
WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset,
|
||||
(viewport_w << 16) | viewport_h);
|
||||
|
||||
/* set pageflip to happen anywhere in vblank interval */
|
||||
WREG32(EVERGREEN_MASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0);
|
||||
WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0);
|
||||
|
||||
if (!atomic && fb && fb != crtc->primary->fb) {
|
||||
amdgpu_fb = to_amdgpu_framebuffer(fb);
|
||||
|
@ -1668,10 +1676,10 @@ static void dce_v6_0_set_interleave(struct drm_crtc *crtc,
|
|||
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
|
||||
|
||||
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
|
||||
WREG32(EVERGREEN_DATA_FORMAT + amdgpu_crtc->crtc_offset,
|
||||
EVERGREEN_INTERLEAVE_EN);
|
||||
WREG32(mmDATA_FORMAT + amdgpu_crtc->crtc_offset,
|
||||
INTERLEAVE_EN);
|
||||
else
|
||||
WREG32(EVERGREEN_DATA_FORMAT + amdgpu_crtc->crtc_offset, 0);
|
||||
WREG32(mmDATA_FORMAT + amdgpu_crtc->crtc_offset, 0);
|
||||
}
|
||||
|
||||
static void dce_v6_0_crtc_load_lut(struct drm_crtc *crtc)
|
||||
|
@ -1684,54 +1692,52 @@ static void dce_v6_0_crtc_load_lut(struct drm_crtc *crtc)
|
|||
|
||||
DRM_DEBUG_KMS("%d\n", amdgpu_crtc->crtc_id);
|
||||
|
||||
WREG32(NI_INPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
|
||||
(NI_INPUT_CSC_GRPH_MODE(NI_INPUT_CSC_BYPASS) |
|
||||
NI_INPUT_CSC_OVL_MODE(NI_INPUT_CSC_BYPASS)));
|
||||
WREG32(NI_PRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset,
|
||||
NI_GRPH_PRESCALE_BYPASS);
|
||||
WREG32(NI_PRESCALE_OVL_CONTROL + amdgpu_crtc->crtc_offset,
|
||||
NI_OVL_PRESCALE_BYPASS);
|
||||
WREG32(NI_INPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset,
|
||||
(NI_GRPH_INPUT_GAMMA_MODE(NI_INPUT_GAMMA_USE_LUT) |
|
||||
NI_OVL_INPUT_GAMMA_MODE(NI_INPUT_GAMMA_USE_LUT)));
|
||||
WREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
|
||||
((0 << INPUT_CSC_CONTROL__INPUT_CSC_GRPH_MODE__SHIFT) |
|
||||
(0 << INPUT_CSC_CONTROL__INPUT_CSC_OVL_MODE__SHIFT)));
|
||||
WREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset,
|
||||
PRESCALE_GRPH_CONTROL__GRPH_PRESCALE_BYPASS_MASK);
|
||||
WREG32(mmPRESCALE_OVL_CONTROL + amdgpu_crtc->crtc_offset,
|
||||
PRESCALE_OVL_CONTROL__OVL_PRESCALE_BYPASS_MASK);
|
||||
WREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset,
|
||||
((0 << INPUT_GAMMA_CONTROL__GRPH_INPUT_GAMMA_MODE__SHIFT) |
|
||||
(0 << INPUT_GAMMA_CONTROL__OVL_INPUT_GAMMA_MODE__SHIFT)));
|
||||
|
||||
WREG32(mmDC_LUT_CONTROL + amdgpu_crtc->crtc_offset, 0);
|
||||
|
||||
WREG32(mmDC_LUT_BLACK_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0);
|
||||
WREG32(mmDC_LUT_BLACK_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0);
|
||||
WREG32(mmDC_LUT_BLACK_OFFSET_RED + amdgpu_crtc->crtc_offset, 0);
|
||||
|
||||
WREG32(EVERGREEN_DC_LUT_CONTROL + amdgpu_crtc->crtc_offset, 0);
|
||||
WREG32(mmDC_LUT_WHITE_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0xffff);
|
||||
WREG32(mmDC_LUT_WHITE_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0xffff);
|
||||
WREG32(mmDC_LUT_WHITE_OFFSET_RED + amdgpu_crtc->crtc_offset, 0xffff);
|
||||
|
||||
WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0);
|
||||
WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0);
|
||||
WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_RED + amdgpu_crtc->crtc_offset, 0);
|
||||
WREG32(mmDC_LUT_RW_MODE + amdgpu_crtc->crtc_offset, 0);
|
||||
WREG32(mmDC_LUT_WRITE_EN_MASK + amdgpu_crtc->crtc_offset, 0x00000007);
|
||||
|
||||
WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0xffff);
|
||||
WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0xffff);
|
||||
WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_RED + amdgpu_crtc->crtc_offset, 0xffff);
|
||||
|
||||
WREG32(EVERGREEN_DC_LUT_RW_MODE + amdgpu_crtc->crtc_offset, 0);
|
||||
WREG32(EVERGREEN_DC_LUT_WRITE_EN_MASK + amdgpu_crtc->crtc_offset, 0x00000007);
|
||||
|
||||
WREG32(EVERGREEN_DC_LUT_RW_INDEX + amdgpu_crtc->crtc_offset, 0);
|
||||
WREG32(mmDC_LUT_RW_INDEX + amdgpu_crtc->crtc_offset, 0);
|
||||
for (i = 0; i < 256; i++) {
|
||||
WREG32(EVERGREEN_DC_LUT_30_COLOR + amdgpu_crtc->crtc_offset,
|
||||
WREG32(mmDC_LUT_30_COLOR + amdgpu_crtc->crtc_offset,
|
||||
(amdgpu_crtc->lut_r[i] << 20) |
|
||||
(amdgpu_crtc->lut_g[i] << 10) |
|
||||
(amdgpu_crtc->lut_b[i] << 0));
|
||||
}
|
||||
|
||||
WREG32(NI_DEGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
|
||||
(NI_GRPH_DEGAMMA_MODE(NI_DEGAMMA_BYPASS) |
|
||||
NI_OVL_DEGAMMA_MODE(NI_DEGAMMA_BYPASS) |
|
||||
NI_ICON_DEGAMMA_MODE(NI_DEGAMMA_BYPASS) |
|
||||
NI_CURSOR_DEGAMMA_MODE(NI_DEGAMMA_BYPASS)));
|
||||
WREG32(NI_GAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset,
|
||||
(NI_GRPH_GAMUT_REMAP_MODE(NI_GAMUT_REMAP_BYPASS) |
|
||||
NI_OVL_GAMUT_REMAP_MODE(NI_GAMUT_REMAP_BYPASS)));
|
||||
WREG32(NI_REGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
|
||||
(NI_GRPH_REGAMMA_MODE(NI_REGAMMA_BYPASS) |
|
||||
NI_OVL_REGAMMA_MODE(NI_REGAMMA_BYPASS)));
|
||||
WREG32(NI_OUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
|
||||
(NI_OUTPUT_CSC_GRPH_MODE(0) |
|
||||
NI_OUTPUT_CSC_OVL_MODE(NI_OUTPUT_CSC_BYPASS)));
|
||||
WREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
|
||||
((0 << DEGAMMA_CONTROL__GRPH_DEGAMMA_MODE__SHIFT) |
|
||||
(0 << DEGAMMA_CONTROL__OVL_DEGAMMA_MODE__SHIFT) |
|
||||
ICON_DEGAMMA_MODE(0) |
|
||||
(0 << DEGAMMA_CONTROL__CURSOR_DEGAMMA_MODE__SHIFT)));
|
||||
WREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset,
|
||||
((0 << GAMUT_REMAP_CONTROL__GRPH_GAMUT_REMAP_MODE__SHIFT) |
|
||||
(0 << GAMUT_REMAP_CONTROL__OVL_GAMUT_REMAP_MODE__SHIFT)));
|
||||
WREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
|
||||
((0 << REGAMMA_CONTROL__GRPH_REGAMMA_MODE__SHIFT) |
|
||||
(0 << REGAMMA_CONTROL__OVL_REGAMMA_MODE__SHIFT)));
|
||||
WREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
|
||||
((0 << OUTPUT_CSC_CONTROL__OUTPUT_CSC_GRPH_MODE__SHIFT) |
|
||||
(0 << OUTPUT_CSC_CONTROL__OUTPUT_CSC_OVL_MODE__SHIFT)));
|
||||
/* XXX match this to the depth of the crtc fmt block, move to modeset? */
|
||||
WREG32(0x1a50 + amdgpu_crtc->crtc_offset, 0);
|
||||
|
||||
|
@ -1810,12 +1816,12 @@ static void dce_v6_0_lock_cursor(struct drm_crtc *crtc, bool lock)
|
|||
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
|
||||
uint32_t cur_lock;
|
||||
|
||||
cur_lock = RREG32(EVERGREEN_CUR_UPDATE + amdgpu_crtc->crtc_offset);
|
||||
cur_lock = RREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset);
|
||||
if (lock)
|
||||
cur_lock |= EVERGREEN_CURSOR_UPDATE_LOCK;
|
||||
cur_lock |= CUR_UPDATE__CURSOR_UPDATE_LOCK_MASK;
|
||||
else
|
||||
cur_lock &= ~EVERGREEN_CURSOR_UPDATE_LOCK;
|
||||
WREG32(EVERGREEN_CUR_UPDATE + amdgpu_crtc->crtc_offset, cur_lock);
|
||||
cur_lock &= ~CUR_UPDATE__CURSOR_UPDATE_LOCK_MASK;
|
||||
WREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset, cur_lock);
|
||||
}
|
||||
|
||||
static void dce_v6_0_hide_cursor(struct drm_crtc *crtc)
|
||||
|
@ -1823,9 +1829,9 @@ static void dce_v6_0_hide_cursor(struct drm_crtc *crtc)
|
|||
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
|
||||
struct amdgpu_device *adev = crtc->dev->dev_private;
|
||||
|
||||
WREG32_IDX(EVERGREEN_CUR_CONTROL + amdgpu_crtc->crtc_offset,
|
||||
EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT) |
|
||||
EVERGREEN_CURSOR_URGENT_CONTROL(EVERGREEN_CURSOR_URGENT_1_2));
|
||||
WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
|
||||
(CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
|
||||
(CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
|
||||
|
||||
|
||||
}
|
||||
|
@ -1835,15 +1841,15 @@ static void dce_v6_0_show_cursor(struct drm_crtc *crtc)
|
|||
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
|
||||
struct amdgpu_device *adev = crtc->dev->dev_private;
|
||||
|
||||
WREG32(EVERGREEN_CUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
|
||||
WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
|
||||
upper_32_bits(amdgpu_crtc->cursor_addr));
|
||||
WREG32(EVERGREEN_CUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
|
||||
WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
|
||||
lower_32_bits(amdgpu_crtc->cursor_addr));
|
||||
|
||||
WREG32_IDX(EVERGREEN_CUR_CONTROL + amdgpu_crtc->crtc_offset,
|
||||
EVERGREEN_CURSOR_EN |
|
||||
EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT) |
|
||||
EVERGREEN_CURSOR_URGENT_CONTROL(EVERGREEN_CURSOR_URGENT_1_2));
|
||||
WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
|
||||
CUR_CONTROL__CURSOR_EN_MASK |
|
||||
(CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
|
||||
(CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
|
||||
|
||||
}
|
||||
|
||||
|
@ -1870,9 +1876,9 @@ static int dce_v6_0_cursor_move_locked(struct drm_crtc *crtc,
|
|||
y = 0;
|
||||
}
|
||||
|
||||
WREG32(EVERGREEN_CUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
|
||||
WREG32(EVERGREEN_CUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
|
||||
WREG32(EVERGREEN_CUR_SIZE + amdgpu_crtc->crtc_offset,
|
||||
WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
|
||||
WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
|
||||
WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
|
||||
((w - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
|
||||
|
||||
amdgpu_crtc->cursor_x = x;
|
||||
|
@ -2478,14 +2484,14 @@ static void dce_v6_0_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev,
|
|||
|
||||
switch (state) {
|
||||
case AMDGPU_IRQ_STATE_DISABLE:
|
||||
interrupt_mask = RREG32(INT_MASK + reg_block);
|
||||
interrupt_mask = RREG32(mmINT_MASK + reg_block);
|
||||
interrupt_mask &= ~VBLANK_INT_MASK;
|
||||
WREG32(INT_MASK + reg_block, interrupt_mask);
|
||||
WREG32(mmINT_MASK + reg_block, interrupt_mask);
|
||||
break;
|
||||
case AMDGPU_IRQ_STATE_ENABLE:
|
||||
interrupt_mask = RREG32(INT_MASK + reg_block);
|
||||
interrupt_mask = RREG32(mmINT_MASK + reg_block);
|
||||
interrupt_mask |= VBLANK_INT_MASK;
|
||||
WREG32(INT_MASK + reg_block, interrupt_mask);
|
||||
WREG32(mmINT_MASK + reg_block, interrupt_mask);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
|
@ -2513,14 +2519,14 @@ static int dce_v6_0_set_hpd_interrupt_state(struct amdgpu_device *adev,
|
|||
|
||||
switch (state) {
|
||||
case AMDGPU_IRQ_STATE_DISABLE:
|
||||
dc_hpd_int_cntl = RREG32(DC_HPD1_INT_CONTROL + hpd_offsets[type]);
|
||||
dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type]);
|
||||
dc_hpd_int_cntl &= ~DC_HPDx_INT_EN;
|
||||
WREG32(DC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl);
|
||||
WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl);
|
||||
break;
|
||||
case AMDGPU_IRQ_STATE_ENABLE:
|
||||
dc_hpd_int_cntl = RREG32(DC_HPD1_INT_CONTROL + hpd_offsets[type]);
|
||||
dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type]);
|
||||
dc_hpd_int_cntl |= DC_HPDx_INT_EN;
|
||||
WREG32(DC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl);
|
||||
WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
|
@ -2588,7 +2594,7 @@ static int dce_v6_0_crtc_irq(struct amdgpu_device *adev,
|
|||
switch (entry->src_data) {
|
||||
case 0: /* vblank */
|
||||
if (disp_int & interrupt_status_offsets[crtc].vblank)
|
||||
WREG32(VBLANK_STATUS + crtc_offsets[crtc], VBLANK_ACK);
|
||||
WREG32(mmVBLANK_STATUS + crtc_offsets[crtc], VBLANK_ACK);
|
||||
else
|
||||
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
|
||||
|
||||
|
@ -2599,7 +2605,7 @@ static int dce_v6_0_crtc_irq(struct amdgpu_device *adev,
|
|||
break;
|
||||
case 1: /* vline */
|
||||
if (disp_int & interrupt_status_offsets[crtc].vline)
|
||||
WREG32(VLINE_STATUS + crtc_offsets[crtc], VLINE_ACK);
|
||||
WREG32(mmVLINE_STATUS + crtc_offsets[crtc], VLINE_ACK);
|
||||
else
|
||||
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
|
||||
|
||||
|
@ -2625,12 +2631,12 @@ static int dce_v6_0_set_pageflip_interrupt_state(struct amdgpu_device *adev,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
reg = RREG32(GRPH_INT_CONTROL + crtc_offsets[type]);
|
||||
reg = RREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type]);
|
||||
if (state == AMDGPU_IRQ_STATE_DISABLE)
|
||||
WREG32(GRPH_INT_CONTROL + crtc_offsets[type],
|
||||
WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
|
||||
reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
|
||||
else
|
||||
WREG32(GRPH_INT_CONTROL + crtc_offsets[type],
|
||||
WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
|
||||
reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
|
||||
|
||||
return 0;
|
||||
|
@ -2653,9 +2659,9 @@ static int dce_v6_0_pageflip_irq(struct amdgpu_device *adev,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (RREG32(GRPH_INT_STATUS + crtc_offsets[crtc_id]) &
|
||||
if (RREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id]) &
|
||||
GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK)
|
||||
WREG32(GRPH_INT_STATUS + crtc_offsets[crtc_id],
|
||||
WREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id],
|
||||
GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK);
|
||||
|
||||
/* IRQ could occur when in initial stage */
|
||||
|
@ -2706,9 +2712,9 @@ static int dce_v6_0_hpd_irq(struct amdgpu_device *adev,
|
|||
mask = interrupt_status_offsets[hpd].hpd;
|
||||
|
||||
if (disp_int & mask) {
|
||||
tmp = RREG32(DC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
|
||||
tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
|
||||
tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK;
|
||||
WREG32(DC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
|
||||
WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
|
||||
schedule_work(&adev->hotplug_work);
|
||||
DRM_INFO("IH: HPD%d\n", hpd + 1);
|
||||
}
|
||||
|
@ -3024,7 +3030,6 @@ static const struct amdgpu_display_funcs dce_v6_0_display_funcs = {
|
|||
.bandwidth_update = &dce_v6_0_bandwidth_update,
|
||||
.vblank_get_counter = &dce_v6_0_vblank_get_counter,
|
||||
.vblank_wait = &dce_v6_0_vblank_wait,
|
||||
.is_display_hung = &dce_v6_0_is_display_hung,
|
||||
.backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level,
|
||||
.backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level,
|
||||
.hpd_sense = &dce_v6_0_hpd_sense,
|
||||
|
|
|
@ -3586,7 +3586,6 @@ static const struct amdgpu_display_funcs dce_v8_0_display_funcs = {
|
|||
.bandwidth_update = &dce_v8_0_bandwidth_update,
|
||||
.vblank_get_counter = &dce_v8_0_vblank_get_counter,
|
||||
.vblank_wait = &dce_v8_0_vblank_wait,
|
||||
.is_display_hung = &dce_v8_0_is_display_hung,
|
||||
.backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level,
|
||||
.backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level,
|
||||
.hpd_sense = &dce_v8_0_hpd_sense,
|
||||
|
|
|
@ -95,11 +95,6 @@ static u32 dce_virtual_hpd_get_gpio_reg(struct amdgpu_device *adev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static bool dce_virtual_is_display_hung(struct amdgpu_device *adev)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static void dce_virtual_stop_mc_access(struct amdgpu_device *adev,
|
||||
struct amdgpu_mode_mc_save *save)
|
||||
{
|
||||
|
@ -691,7 +686,6 @@ static const struct amdgpu_display_funcs dce_virtual_display_funcs = {
|
|||
.bandwidth_update = &dce_virtual_bandwidth_update,
|
||||
.vblank_get_counter = &dce_virtual_vblank_get_counter,
|
||||
.vblank_wait = &dce_virtual_vblank_wait,
|
||||
.is_display_hung = &dce_virtual_is_display_hung,
|
||||
.backlight_set_level = NULL,
|
||||
.backlight_get_level = NULL,
|
||||
.hpd_sense = &dce_virtual_hpd_sense,
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -2105,6 +2105,18 @@ static void gfx_v7_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
|
|||
amdgpu_ring_write(ring, 0x20); /* poll interval */
|
||||
}
|
||||
|
||||
static void gfx_v7_0_ring_emit_vgt_flush(struct amdgpu_ring *ring)
|
||||
{
|
||||
amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE, 0));
|
||||
amdgpu_ring_write(ring, EVENT_TYPE(VS_PARTIAL_FLUSH) |
|
||||
EVENT_INDEX(4));
|
||||
|
||||
amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE, 0));
|
||||
amdgpu_ring_write(ring, EVENT_TYPE(VGT_FLUSH) |
|
||||
EVENT_INDEX(0));
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* gfx_v7_0_ring_emit_hdp_invalidate - emit an hdp invalidate on the cp
|
||||
*
|
||||
|
@ -2260,6 +2272,7 @@ static void gfx_v7_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
|
|||
|
||||
dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */
|
||||
if (flags & AMDGPU_HAVE_CTX_SWITCH) {
|
||||
gfx_v7_0_ring_emit_vgt_flush(ring);
|
||||
/* set load_global_config & load_global_uconfig */
|
||||
dw2 |= 0x8001;
|
||||
/* set load_cs_sh_regs */
|
||||
|
@ -4359,7 +4372,11 @@ static void gfx_v7_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
|
|||
|
||||
static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t address)
|
||||
{
|
||||
WREG32(mmSQ_IND_INDEX, (wave & 0xF) | ((simd & 0x3) << 4) | (address << 16) | (1 << 13));
|
||||
WREG32(mmSQ_IND_INDEX,
|
||||
(wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
|
||||
(simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
|
||||
(address << SQ_IND_INDEX__INDEX__SHIFT) |
|
||||
(SQ_IND_INDEX__FORCE_READ_MASK));
|
||||
return RREG32(mmSQ_IND_DATA);
|
||||
}
|
||||
|
||||
|
@ -5149,7 +5166,7 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = {
|
|||
12 + 12 + 12 + /* gfx_v7_0_ring_emit_fence_gfx x3 for user fence, vm fence */
|
||||
7 + 4 + /* gfx_v7_0_ring_emit_pipeline_sync */
|
||||
17 + 6 + /* gfx_v7_0_ring_emit_vm_flush */
|
||||
3, /* gfx_v7_ring_emit_cntxcntl */
|
||||
3 + 4, /* gfx_v7_ring_emit_cntxcntl including vgt flush*/
|
||||
.emit_ib_size = 4, /* gfx_v7_0_ring_emit_ib_gfx */
|
||||
.emit_ib = gfx_v7_0_ring_emit_ib_gfx,
|
||||
.emit_fence = gfx_v7_0_ring_emit_fence_gfx,
|
||||
|
|
|
@ -3904,7 +3904,7 @@ static int gfx_v8_0_init_save_restore_list(struct amdgpu_device *adev)
|
|||
int list_size;
|
||||
unsigned int *register_list_format =
|
||||
kmalloc(adev->gfx.rlc.reg_list_format_size_bytes, GFP_KERNEL);
|
||||
if (register_list_format == NULL)
|
||||
if (!register_list_format)
|
||||
return -ENOMEM;
|
||||
memcpy(register_list_format, adev->gfx.rlc.register_list_format,
|
||||
adev->gfx.rlc.reg_list_format_size_bytes);
|
||||
|
@ -5442,7 +5442,11 @@ static void gfx_v8_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
|
|||
|
||||
static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t address)
|
||||
{
|
||||
WREG32(mmSQ_IND_INDEX, (wave & 0xF) | ((simd & 0x3) << 4) | (address << 16) | (1 << 13));
|
||||
WREG32(mmSQ_IND_INDEX,
|
||||
(wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
|
||||
(simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
|
||||
(address << SQ_IND_INDEX__INDEX__SHIFT) |
|
||||
(SQ_IND_INDEX__FORCE_READ_MASK));
|
||||
return RREG32(mmSQ_IND_DATA);
|
||||
}
|
||||
|
||||
|
@ -6182,6 +6186,18 @@ static void gfx_v8_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
|
|||
amdgpu_ring_write(ring, 0x20); /* poll interval */
|
||||
}
|
||||
|
||||
static void gfx_v8_0_ring_emit_vgt_flush(struct amdgpu_ring *ring)
|
||||
{
|
||||
amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE, 0));
|
||||
amdgpu_ring_write(ring, EVENT_TYPE(VS_PARTIAL_FLUSH) |
|
||||
EVENT_INDEX(4));
|
||||
|
||||
amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE, 0));
|
||||
amdgpu_ring_write(ring, EVENT_TYPE(VGT_FLUSH) |
|
||||
EVENT_INDEX(0));
|
||||
}
|
||||
|
||||
|
||||
static void gfx_v8_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
|
||||
{
|
||||
amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
|
||||
|
@ -6367,6 +6383,7 @@ static void gfx_v8_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
|
|||
|
||||
dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */
|
||||
if (flags & AMDGPU_HAVE_CTX_SWITCH) {
|
||||
gfx_v8_0_ring_emit_vgt_flush(ring);
|
||||
/* set load_global_config & load_global_uconfig */
|
||||
dw2 |= 0x8001;
|
||||
/* set load_cs_sh_regs */
|
||||
|
@ -6570,7 +6587,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
|
|||
7 + /* gfx_v8_0_ring_emit_pipeline_sync */
|
||||
128 + 19 + /* gfx_v8_0_ring_emit_vm_flush */
|
||||
2 + /* gfx_v8_ring_emit_sb */
|
||||
3, /* gfx_v8_ring_emit_cntxcntl */
|
||||
3 + 4, /* gfx_v8_ring_emit_cntxcntl including vgt flush */
|
||||
.emit_ib_size = 4, /* gfx_v8_0_ring_emit_ib_gfx */
|
||||
.emit_ib = gfx_v8_0_ring_emit_ib_gfx,
|
||||
.emit_fence = gfx_v8_0_ring_emit_fence_gfx,
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
|
||||
/*
|
||||
* Copyright 2014 Advanced Micro Devices, Inc.
|
||||
*
|
||||
|
@ -26,7 +25,16 @@
|
|||
#include "amdgpu.h"
|
||||
#include "gmc_v6_0.h"
|
||||
#include "amdgpu_ucode.h"
|
||||
#include "si/sid.h"
|
||||
|
||||
#include "bif/bif_3_0_d.h"
|
||||
#include "bif/bif_3_0_sh_mask.h"
|
||||
#include "oss/oss_1_0_d.h"
|
||||
#include "oss/oss_1_0_sh_mask.h"
|
||||
#include "gmc/gmc_6_0_d.h"
|
||||
#include "gmc/gmc_6_0_sh_mask.h"
|
||||
#include "dce/dce_6_0_d.h"
|
||||
#include "dce/dce_6_0_sh_mask.h"
|
||||
#include "si_enums.h"
|
||||
|
||||
static void gmc_v6_0_set_gart_funcs(struct amdgpu_device *adev);
|
||||
static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev);
|
||||
|
@ -37,6 +45,16 @@ MODULE_FIRMWARE("radeon/pitcairn_mc.bin");
|
|||
MODULE_FIRMWARE("radeon/verde_mc.bin");
|
||||
MODULE_FIRMWARE("radeon/oland_mc.bin");
|
||||
|
||||
#define MC_SEQ_MISC0__MT__MASK 0xf0000000
|
||||
#define MC_SEQ_MISC0__MT__GDDR1 0x10000000
|
||||
#define MC_SEQ_MISC0__MT__DDR2 0x20000000
|
||||
#define MC_SEQ_MISC0__MT__GDDR3 0x30000000
|
||||
#define MC_SEQ_MISC0__MT__GDDR4 0x40000000
|
||||
#define MC_SEQ_MISC0__MT__GDDR5 0x50000000
|
||||
#define MC_SEQ_MISC0__MT__HBM 0x60000000
|
||||
#define MC_SEQ_MISC0__MT__DDR3 0xB0000000
|
||||
|
||||
|
||||
static const u32 crtc_offsets[6] =
|
||||
{
|
||||
SI_CRTC0_REGISTER_OFFSET,
|
||||
|
@ -57,14 +75,14 @@ static void gmc_v6_0_mc_stop(struct amdgpu_device *adev,
|
|||
|
||||
gmc_v6_0_wait_for_idle((void *)adev);
|
||||
|
||||
blackout = RREG32(MC_SHARED_BLACKOUT_CNTL);
|
||||
if (REG_GET_FIELD(blackout, mmMC_SHARED_BLACKOUT_CNTL, xxBLACKOUT_MODE) != 1) {
|
||||
blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
|
||||
if (REG_GET_FIELD(blackout, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE) != 1) {
|
||||
/* Block CPU access */
|
||||
WREG32(BIF_FB_EN, 0);
|
||||
WREG32(mmBIF_FB_EN, 0);
|
||||
/* blackout the MC */
|
||||
blackout = REG_SET_FIELD(blackout,
|
||||
mmMC_SHARED_BLACKOUT_CNTL, xxBLACKOUT_MODE, 0);
|
||||
WREG32(MC_SHARED_BLACKOUT_CNTL, blackout | 1);
|
||||
MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 0);
|
||||
WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout | 1);
|
||||
}
|
||||
/* wait for the MC to settle */
|
||||
udelay(100);
|
||||
|
@ -77,13 +95,13 @@ static void gmc_v6_0_mc_resume(struct amdgpu_device *adev,
|
|||
u32 tmp;
|
||||
|
||||
/* unblackout the MC */
|
||||
tmp = RREG32(MC_SHARED_BLACKOUT_CNTL);
|
||||
tmp = REG_SET_FIELD(tmp, mmMC_SHARED_BLACKOUT_CNTL, xxBLACKOUT_MODE, 0);
|
||||
WREG32(MC_SHARED_BLACKOUT_CNTL, tmp);
|
||||
tmp = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
|
||||
tmp = REG_SET_FIELD(tmp, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 0);
|
||||
WREG32(mmMC_SHARED_BLACKOUT_CNTL, tmp);
|
||||
/* allow CPU access */
|
||||
tmp = REG_SET_FIELD(0, mmBIF_FB_EN, xxFB_READ_EN, 1);
|
||||
tmp = REG_SET_FIELD(tmp, mmBIF_FB_EN, xxFB_WRITE_EN, 1);
|
||||
WREG32(BIF_FB_EN, tmp);
|
||||
tmp = REG_SET_FIELD(0, BIF_FB_EN, FB_READ_EN, 1);
|
||||
tmp = REG_SET_FIELD(tmp, BIF_FB_EN, FB_WRITE_EN, 1);
|
||||
WREG32(mmBIF_FB_EN, tmp);
|
||||
|
||||
if (adev->mode_info.num_crtc)
|
||||
amdgpu_display_resume_mc_access(adev, save);
|
||||
|
@ -158,37 +176,37 @@ static int gmc_v6_0_mc_load_microcode(struct amdgpu_device *adev)
|
|||
new_fw_data = (const __le32 *)
|
||||
(adev->mc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
|
||||
|
||||
running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK;
|
||||
running = RREG32(mmMC_SEQ_SUP_CNTL) & MC_SEQ_SUP_CNTL__RUN_MASK;
|
||||
|
||||
if (running == 0) {
|
||||
|
||||
/* reset the engine and set to writable */
|
||||
WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
|
||||
WREG32(MC_SEQ_SUP_CNTL, 0x00000010);
|
||||
WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
|
||||
WREG32(mmMC_SEQ_SUP_CNTL, 0x00000010);
|
||||
|
||||
/* load mc io regs */
|
||||
for (i = 0; i < regs_size; i++) {
|
||||
WREG32(MC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(new_io_mc_regs++));
|
||||
WREG32(MC_SEQ_IO_DEBUG_DATA, le32_to_cpup(new_io_mc_regs++));
|
||||
WREG32(mmMC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(new_io_mc_regs++));
|
||||
WREG32(mmMC_SEQ_IO_DEBUG_DATA, le32_to_cpup(new_io_mc_regs++));
|
||||
}
|
||||
/* load the MC ucode */
|
||||
for (i = 0; i < ucode_size; i++) {
|
||||
WREG32(MC_SEQ_SUP_PGM, le32_to_cpup(new_fw_data++));
|
||||
WREG32(mmMC_SEQ_SUP_PGM, le32_to_cpup(new_fw_data++));
|
||||
}
|
||||
|
||||
/* put the engine back into the active state */
|
||||
WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
|
||||
WREG32(MC_SEQ_SUP_CNTL, 0x00000004);
|
||||
WREG32(MC_SEQ_SUP_CNTL, 0x00000001);
|
||||
WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
|
||||
WREG32(mmMC_SEQ_SUP_CNTL, 0x00000004);
|
||||
WREG32(mmMC_SEQ_SUP_CNTL, 0x00000001);
|
||||
|
||||
/* wait for training to complete */
|
||||
for (i = 0; i < adev->usec_timeout; i++) {
|
||||
if (RREG32(MC_SEQ_TRAIN_WAKEUP_CNTL) & TRAIN_DONE_D0)
|
||||
if (RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL) & MC_SEQ_TRAIN_WAKEUP_CNTL__TRAIN_DONE_D0_MASK)
|
||||
break;
|
||||
udelay(1);
|
||||
}
|
||||
for (i = 0; i < adev->usec_timeout; i++) {
|
||||
if (RREG32(MC_SEQ_TRAIN_WAKEUP_CNTL) & TRAIN_DONE_D1)
|
||||
if (RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL) & MC_SEQ_TRAIN_WAKEUP_CNTL__TRAIN_DONE_D1_MASK)
|
||||
break;
|
||||
udelay(1);
|
||||
}
|
||||
|
@ -225,7 +243,7 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
|
|||
WREG32((0xb08 + j), 0x00000000);
|
||||
WREG32((0xb09 + j), 0x00000000);
|
||||
}
|
||||
WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
|
||||
WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0);
|
||||
|
||||
gmc_v6_0_mc_stop(adev, &save);
|
||||
|
||||
|
@ -233,24 +251,24 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
|
|||
dev_warn(adev->dev, "Wait for MC idle timedout !\n");
|
||||
}
|
||||
|
||||
WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
|
||||
WREG32(mmVGA_HDP_CONTROL, VGA_HDP_CONTROL__VGA_MEMORY_DISABLE_MASK);
|
||||
/* Update configuration */
|
||||
WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
|
||||
WREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
|
||||
adev->mc.vram_start >> 12);
|
||||
WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
|
||||
WREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
|
||||
adev->mc.vram_end >> 12);
|
||||
WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
|
||||
WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
|
||||
adev->vram_scratch.gpu_addr >> 12);
|
||||
tmp = ((adev->mc.vram_end >> 24) & 0xFFFF) << 16;
|
||||
tmp |= ((adev->mc.vram_start >> 24) & 0xFFFF);
|
||||
WREG32(MC_VM_FB_LOCATION, tmp);
|
||||
WREG32(mmMC_VM_FB_LOCATION, tmp);
|
||||
/* XXX double check these! */
|
||||
WREG32(HDP_NONSURFACE_BASE, (adev->mc.vram_start >> 8));
|
||||
WREG32(HDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
|
||||
WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
|
||||
WREG32(MC_VM_AGP_BASE, 0);
|
||||
WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
|
||||
WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
|
||||
WREG32(mmHDP_NONSURFACE_BASE, (adev->mc.vram_start >> 8));
|
||||
WREG32(mmHDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
|
||||
WREG32(mmHDP_NONSURFACE_SIZE, 0x3FFFFFFF);
|
||||
WREG32(mmMC_VM_AGP_BASE, 0);
|
||||
WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF);
|
||||
WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF);
|
||||
|
||||
if (gmc_v6_0_wait_for_idle((void *)adev)) {
|
||||
dev_warn(adev->dev, "Wait for MC idle timedout !\n");
|
||||
|
@ -265,16 +283,16 @@ static int gmc_v6_0_mc_init(struct amdgpu_device *adev)
|
|||
u32 tmp;
|
||||
int chansize, numchan;
|
||||
|
||||
tmp = RREG32(MC_ARB_RAMCFG);
|
||||
if (tmp & CHANSIZE_OVERRIDE) {
|
||||
tmp = RREG32(mmMC_ARB_RAMCFG);
|
||||
if (tmp & (1 << 11)) {
|
||||
chansize = 16;
|
||||
} else if (tmp & CHANSIZE_MASK) {
|
||||
} else if (tmp & MC_ARB_RAMCFG__CHANSIZE_MASK) {
|
||||
chansize = 64;
|
||||
} else {
|
||||
chansize = 32;
|
||||
}
|
||||
tmp = RREG32(MC_SHARED_CHMAP);
|
||||
switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
|
||||
tmp = RREG32(mmMC_SHARED_CHMAP);
|
||||
switch ((tmp & MC_SHARED_CHMAP__NOOFCHAN_MASK) >> MC_SHARED_CHMAP__NOOFCHAN__SHIFT) {
|
||||
case 0:
|
||||
default:
|
||||
numchan = 1;
|
||||
|
@ -309,8 +327,8 @@ static int gmc_v6_0_mc_init(struct amdgpu_device *adev)
|
|||
adev->mc.aper_base = pci_resource_start(adev->pdev, 0);
|
||||
adev->mc.aper_size = pci_resource_len(adev->pdev, 0);
|
||||
/* size in MB on si */
|
||||
adev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL;
|
||||
adev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL;
|
||||
adev->mc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
|
||||
adev->mc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
|
||||
adev->mc.visible_vram_size = adev->mc.aper_size;
|
||||
|
||||
/* unless the user had overridden it, set the gart
|
||||
|
@ -329,9 +347,9 @@ static int gmc_v6_0_mc_init(struct amdgpu_device *adev)
|
|||
static void gmc_v6_0_gart_flush_gpu_tlb(struct amdgpu_device *adev,
|
||||
uint32_t vmid)
|
||||
{
|
||||
WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0);
|
||||
WREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0);
|
||||
|
||||
WREG32(VM_INVALIDATE_REQUEST, 1 << vmid);
|
||||
WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
|
||||
}
|
||||
|
||||
static int gmc_v6_0_gart_set_pte_pde(struct amdgpu_device *adev,
|
||||
|
@ -355,20 +373,20 @@ static void gmc_v6_0_set_fault_enable_default(struct amdgpu_device *adev,
|
|||
{
|
||||
u32 tmp;
|
||||
|
||||
tmp = RREG32(VM_CONTEXT1_CNTL);
|
||||
tmp = REG_SET_FIELD(tmp, mmVM_CONTEXT1_CNTL,
|
||||
xxRANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
|
||||
tmp = REG_SET_FIELD(tmp, mmVM_CONTEXT1_CNTL,
|
||||
xxDUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
|
||||
tmp = REG_SET_FIELD(tmp, mmVM_CONTEXT1_CNTL,
|
||||
xxPDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
|
||||
tmp = REG_SET_FIELD(tmp, mmVM_CONTEXT1_CNTL,
|
||||
xxVALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
|
||||
tmp = REG_SET_FIELD(tmp, mmVM_CONTEXT1_CNTL,
|
||||
xxREAD_PROTECTION_FAULT_ENABLE_DEFAULT, value);
|
||||
tmp = REG_SET_FIELD(tmp, mmVM_CONTEXT1_CNTL,
|
||||
xxWRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
|
||||
WREG32(VM_CONTEXT1_CNTL, tmp);
|
||||
tmp = RREG32(mmVM_CONTEXT1_CNTL);
|
||||
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
|
||||
RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
|
||||
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
|
||||
DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
|
||||
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
|
||||
PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
|
||||
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
|
||||
VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
|
||||
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
|
||||
READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
|
||||
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
|
||||
WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
|
||||
WREG32(mmVM_CONTEXT1_CNTL, tmp);
|
||||
}
|
||||
|
||||
static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
|
||||
|
@ -383,33 +401,39 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
|
|||
if (r)
|
||||
return r;
|
||||
/* Setup TLB control */
|
||||
WREG32(MC_VM_MX_L1_TLB_CNTL,
|
||||
WREG32(mmMC_VM_MX_L1_TLB_CNTL,
|
||||
(0xA << 7) |
|
||||
ENABLE_L1_TLB |
|
||||
ENABLE_L1_FRAGMENT_PROCESSING |
|
||||
SYSTEM_ACCESS_MODE_NOT_IN_SYS |
|
||||
ENABLE_ADVANCED_DRIVER_MODEL |
|
||||
SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
|
||||
MC_VM_MX_L1_TLB_CNTL__ENABLE_L1_TLB_MASK |
|
||||
MC_VM_MX_L1_TLB_CNTL__ENABLE_L1_FRAGMENT_PROCESSING_MASK |
|
||||
MC_VM_MX_L1_TLB_CNTL__SYSTEM_ACCESS_MODE_MASK |
|
||||
MC_VM_MX_L1_TLB_CNTL__ENABLE_ADVANCED_DRIVER_MODEL_MASK |
|
||||
(0UL << MC_VM_MX_L1_TLB_CNTL__SYSTEM_APERTURE_UNMAPPED_ACCESS__SHIFT));
|
||||
/* Setup L2 cache */
|
||||
WREG32(VM_L2_CNTL, ENABLE_L2_CACHE |
|
||||
ENABLE_L2_FRAGMENT_PROCESSING |
|
||||
ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
|
||||
ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
|
||||
EFFECTIVE_L2_QUEUE_SIZE(7) |
|
||||
CONTEXT1_IDENTITY_ACCESS_MODE(1));
|
||||
WREG32(VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS | INVALIDATE_L2_CACHE);
|
||||
WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
|
||||
BANK_SELECT(4) |
|
||||
L2_CACHE_BIGK_FRAGMENT_SIZE(4));
|
||||
WREG32(mmVM_L2_CNTL,
|
||||
VM_L2_CNTL__ENABLE_L2_CACHE_MASK |
|
||||
VM_L2_CNTL__ENABLE_L2_FRAGMENT_PROCESSING_MASK |
|
||||
VM_L2_CNTL__ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE_MASK |
|
||||
VM_L2_CNTL__ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE_MASK |
|
||||
(7UL << VM_L2_CNTL__EFFECTIVE_L2_QUEUE_SIZE__SHIFT) |
|
||||
(1UL << VM_L2_CNTL__CONTEXT1_IDENTITY_ACCESS_MODE__SHIFT));
|
||||
WREG32(mmVM_L2_CNTL2,
|
||||
VM_L2_CNTL2__INVALIDATE_ALL_L1_TLBS_MASK |
|
||||
VM_L2_CNTL2__INVALIDATE_L2_CACHE_MASK);
|
||||
WREG32(mmVM_L2_CNTL3,
|
||||
VM_L2_CNTL3__L2_CACHE_BIGK_ASSOCIATIVITY_MASK |
|
||||
(4UL << VM_L2_CNTL3__BANK_SELECT__SHIFT) |
|
||||
(4UL << VM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT));
|
||||
/* setup context0 */
|
||||
WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12);
|
||||
WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gtt_end >> 12);
|
||||
WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12);
|
||||
WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
|
||||
WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12);
|
||||
WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gtt_end >> 12);
|
||||
WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12);
|
||||
WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
|
||||
(u32)(adev->dummy_page.addr >> 12));
|
||||
WREG32(VM_CONTEXT0_CNTL2, 0);
|
||||
WREG32(VM_CONTEXT0_CNTL, (ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
|
||||
RANGE_PROTECTION_FAULT_ENABLE_DEFAULT));
|
||||
WREG32(mmVM_CONTEXT0_CNTL2, 0);
|
||||
WREG32(mmVM_CONTEXT0_CNTL,
|
||||
VM_CONTEXT0_CNTL__ENABLE_CONTEXT_MASK |
|
||||
(0UL << VM_CONTEXT0_CNTL__PAGE_TABLE_DEPTH__SHIFT) |
|
||||
VM_CONTEXT0_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK);
|
||||
|
||||
WREG32(0x575, 0);
|
||||
WREG32(0x576, 0);
|
||||
|
@ -417,39 +441,41 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
|
|||
|
||||
/* empty context1-15 */
|
||||
/* set vm size, must be a multiple of 4 */
|
||||
WREG32(VM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
|
||||
WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, adev->vm_manager.max_pfn - 1);
|
||||
WREG32(mmVM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
|
||||
WREG32(mmVM_CONTEXT1_PAGE_TABLE_END_ADDR, adev->vm_manager.max_pfn - 1);
|
||||
/* Assign the pt base to something valid for now; the pts used for
|
||||
* the VMs are determined by the application and setup and assigned
|
||||
* on the fly in the vm part of radeon_gart.c
|
||||
*/
|
||||
for (i = 1; i < 16; i++) {
|
||||
if (i < 8)
|
||||
WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i,
|
||||
WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i,
|
||||
adev->gart.table_addr >> 12);
|
||||
else
|
||||
WREG32(VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8,
|
||||
WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8,
|
||||
adev->gart.table_addr >> 12);
|
||||
}
|
||||
|
||||
/* enable context1-15 */
|
||||
WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
|
||||
WREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
|
||||
(u32)(adev->dummy_page.addr >> 12));
|
||||
WREG32(VM_CONTEXT1_CNTL2, 4);
|
||||
WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) |
|
||||
PAGE_TABLE_BLOCK_SIZE(amdgpu_vm_block_size - 9) |
|
||||
RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
|
||||
RANGE_PROTECTION_FAULT_ENABLE_DEFAULT |
|
||||
DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
|
||||
DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT |
|
||||
PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT |
|
||||
PDE0_PROTECTION_FAULT_ENABLE_DEFAULT |
|
||||
VALID_PROTECTION_FAULT_ENABLE_INTERRUPT |
|
||||
VALID_PROTECTION_FAULT_ENABLE_DEFAULT |
|
||||
READ_PROTECTION_FAULT_ENABLE_INTERRUPT |
|
||||
READ_PROTECTION_FAULT_ENABLE_DEFAULT |
|
||||
WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT |
|
||||
WRITE_PROTECTION_FAULT_ENABLE_DEFAULT);
|
||||
WREG32(mmVM_CONTEXT1_CNTL2, 4);
|
||||
WREG32(mmVM_CONTEXT1_CNTL,
|
||||
VM_CONTEXT1_CNTL__ENABLE_CONTEXT_MASK |
|
||||
(1UL << VM_CONTEXT1_CNTL__PAGE_TABLE_DEPTH__SHIFT) |
|
||||
((amdgpu_vm_block_size - 9) << VM_CONTEXT1_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT) |
|
||||
VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
|
||||
VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK |
|
||||
VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
|
||||
VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK |
|
||||
VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
|
||||
VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK |
|
||||
VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
|
||||
VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK |
|
||||
VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
|
||||
VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK |
|
||||
VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
|
||||
VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK);
|
||||
|
||||
gmc_v6_0_gart_flush_gpu_tlb(adev, 0);
|
||||
dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n",
|
||||
|
@ -488,19 +514,22 @@ static void gmc_v6_0_gart_disable(struct amdgpu_device *adev)
|
|||
}*/
|
||||
|
||||
/* Disable all tables */
|
||||
WREG32(VM_CONTEXT0_CNTL, 0);
|
||||
WREG32(VM_CONTEXT1_CNTL, 0);
|
||||
WREG32(mmVM_CONTEXT0_CNTL, 0);
|
||||
WREG32(mmVM_CONTEXT1_CNTL, 0);
|
||||
/* Setup TLB control */
|
||||
WREG32(MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE_NOT_IN_SYS |
|
||||
SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
|
||||
WREG32(mmMC_VM_MX_L1_TLB_CNTL,
|
||||
MC_VM_MX_L1_TLB_CNTL__SYSTEM_ACCESS_MODE_MASK |
|
||||
(0UL << MC_VM_MX_L1_TLB_CNTL__SYSTEM_APERTURE_UNMAPPED_ACCESS__SHIFT));
|
||||
/* Setup L2 cache */
|
||||
WREG32(VM_L2_CNTL, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
|
||||
ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
|
||||
EFFECTIVE_L2_QUEUE_SIZE(7) |
|
||||
CONTEXT1_IDENTITY_ACCESS_MODE(1));
|
||||
WREG32(VM_L2_CNTL2, 0);
|
||||
WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
|
||||
L2_CACHE_BIGK_FRAGMENT_SIZE(0));
|
||||
WREG32(mmVM_L2_CNTL,
|
||||
VM_L2_CNTL__ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE_MASK |
|
||||
VM_L2_CNTL__ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE_MASK |
|
||||
(7UL << VM_L2_CNTL__EFFECTIVE_L2_QUEUE_SIZE__SHIFT) |
|
||||
(1UL << VM_L2_CNTL__CONTEXT1_IDENTITY_ACCESS_MODE__SHIFT));
|
||||
WREG32(mmVM_L2_CNTL2, 0);
|
||||
WREG32(mmVM_L2_CNTL3,
|
||||
VM_L2_CNTL3__L2_CACHE_BIGK_ASSOCIATIVITY_MASK |
|
||||
(0UL << VM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT));
|
||||
amdgpu_gart_table_vram_unpin(adev);
|
||||
}
|
||||
|
||||
|
@ -523,7 +552,7 @@ static int gmc_v6_0_vm_init(struct amdgpu_device *adev)
|
|||
|
||||
/* base offset of vram pages */
|
||||
if (adev->flags & AMD_IS_APU) {
|
||||
u64 tmp = RREG32(MC_VM_FB_OFFSET);
|
||||
u64 tmp = RREG32(mmMC_VM_FB_OFFSET);
|
||||
tmp <<= 22;
|
||||
adev->vm_manager.vram_base_offset = tmp;
|
||||
} else
|
||||
|
@ -540,19 +569,19 @@ static void gmc_v6_0_vm_decode_fault(struct amdgpu_device *adev,
|
|||
u32 status, u32 addr, u32 mc_client)
|
||||
{
|
||||
u32 mc_id;
|
||||
u32 vmid = REG_GET_FIELD(status, mmVM_CONTEXT1_PROTECTION_FAULT_STATUS, xxVMID);
|
||||
u32 protections = REG_GET_FIELD(status, mmVM_CONTEXT1_PROTECTION_FAULT_STATUS,
|
||||
xxPROTECTIONS);
|
||||
u32 vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID);
|
||||
u32 protections = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
|
||||
PROTECTIONS);
|
||||
char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff,
|
||||
(mc_client >> 8) & 0xff, mc_client & 0xff, 0 };
|
||||
|
||||
mc_id = REG_GET_FIELD(status, mmVM_CONTEXT1_PROTECTION_FAULT_STATUS,
|
||||
xxMEMORY_CLIENT_ID);
|
||||
mc_id = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
|
||||
MEMORY_CLIENT_ID);
|
||||
|
||||
dev_err(adev->dev, "VM fault (0x%02x, vmid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
|
||||
protections, vmid, addr,
|
||||
REG_GET_FIELD(status, mmVM_CONTEXT1_PROTECTION_FAULT_STATUS,
|
||||
xxMEMORY_CLIENT_RW) ?
|
||||
REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
|
||||
MEMORY_CLIENT_RW) ?
|
||||
"write" : "read", block, mc_client, mc_id);
|
||||
}
|
||||
|
||||
|
@ -655,7 +684,7 @@ static void gmc_v6_0_enable_hdp_mgcg(struct amdgpu_device *adev,
|
|||
{
|
||||
u32 orig, data;
|
||||
|
||||
orig = data = RREG32(HDP_HOST_PATH_CNTL);
|
||||
orig = data = RREG32(mmHDP_HOST_PATH_CNTL);
|
||||
|
||||
if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_HDP_MGCG))
|
||||
data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 0);
|
||||
|
@ -663,7 +692,7 @@ static void gmc_v6_0_enable_hdp_mgcg(struct amdgpu_device *adev,
|
|||
data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 1);
|
||||
|
||||
if (orig != data)
|
||||
WREG32(HDP_HOST_PATH_CNTL, data);
|
||||
WREG32(mmHDP_HOST_PATH_CNTL, data);
|
||||
}
|
||||
|
||||
static void gmc_v6_0_enable_hdp_ls(struct amdgpu_device *adev,
|
||||
|
@ -671,7 +700,7 @@ static void gmc_v6_0_enable_hdp_ls(struct amdgpu_device *adev,
|
|||
{
|
||||
u32 orig, data;
|
||||
|
||||
orig = data = RREG32(HDP_MEM_POWER_LS);
|
||||
orig = data = RREG32(mmHDP_MEM_POWER_LS);
|
||||
|
||||
if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_HDP_LS))
|
||||
data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 1);
|
||||
|
@ -679,7 +708,7 @@ static void gmc_v6_0_enable_hdp_ls(struct amdgpu_device *adev,
|
|||
data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 0);
|
||||
|
||||
if (orig != data)
|
||||
WREG32(HDP_MEM_POWER_LS, data);
|
||||
WREG32(mmHDP_MEM_POWER_LS, data);
|
||||
}
|
||||
*/
|
||||
|
||||
|
@ -713,7 +742,7 @@ static int gmc_v6_0_early_init(void *handle)
|
|||
if (adev->flags & AMD_IS_APU) {
|
||||
adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
|
||||
} else {
|
||||
u32 tmp = RREG32(MC_SEQ_MISC0);
|
||||
u32 tmp = RREG32(mmMC_SEQ_MISC0);
|
||||
tmp &= MC_SEQ_MISC0__MT__MASK;
|
||||
adev->mc.vram_type = gmc_v6_0_convert_vram_type(tmp);
|
||||
}
|
||||
|
@ -879,7 +908,7 @@ static int gmc_v6_0_resume(void *handle)
|
|||
static bool gmc_v6_0_is_idle(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
u32 tmp = RREG32(SRBM_STATUS);
|
||||
u32 tmp = RREG32(mmSRBM_STATUS);
|
||||
|
||||
if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
|
||||
SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK | SRBM_STATUS__VMC_BUSY_MASK))
|
||||
|
@ -895,7 +924,7 @@ static int gmc_v6_0_wait_for_idle(void *handle)
|
|||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
for (i = 0; i < adev->usec_timeout; i++) {
|
||||
tmp = RREG32(SRBM_STATUS) & (SRBM_STATUS__MCB_BUSY_MASK |
|
||||
tmp = RREG32(mmSRBM_STATUS) & (SRBM_STATUS__MCB_BUSY_MASK |
|
||||
SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
|
||||
SRBM_STATUS__MCC_BUSY_MASK |
|
||||
SRBM_STATUS__MCD_BUSY_MASK |
|
||||
|
@ -913,17 +942,17 @@ static int gmc_v6_0_soft_reset(void *handle)
|
|||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_mode_mc_save save;
|
||||
u32 srbm_soft_reset = 0;
|
||||
u32 tmp = RREG32(SRBM_STATUS);
|
||||
u32 tmp = RREG32(mmSRBM_STATUS);
|
||||
|
||||
if (tmp & SRBM_STATUS__VMC_BUSY_MASK)
|
||||
srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
|
||||
mmSRBM_SOFT_RESET, xxSOFT_RESET_VMC, 1);
|
||||
SRBM_SOFT_RESET, SOFT_RESET_VMC, 1);
|
||||
|
||||
if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
|
||||
SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK)) {
|
||||
if (!(adev->flags & AMD_IS_APU))
|
||||
srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
|
||||
mmSRBM_SOFT_RESET, xxSOFT_RESET_MC, 1);
|
||||
SRBM_SOFT_RESET, SOFT_RESET_MC, 1);
|
||||
}
|
||||
|
||||
if (srbm_soft_reset) {
|
||||
|
@ -933,17 +962,17 @@ static int gmc_v6_0_soft_reset(void *handle)
|
|||
}
|
||||
|
||||
|
||||
tmp = RREG32(SRBM_SOFT_RESET);
|
||||
tmp = RREG32(mmSRBM_SOFT_RESET);
|
||||
tmp |= srbm_soft_reset;
|
||||
dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
|
||||
WREG32(SRBM_SOFT_RESET, tmp);
|
||||
tmp = RREG32(SRBM_SOFT_RESET);
|
||||
WREG32(mmSRBM_SOFT_RESET, tmp);
|
||||
tmp = RREG32(mmSRBM_SOFT_RESET);
|
||||
|
||||
udelay(50);
|
||||
|
||||
tmp &= ~srbm_soft_reset;
|
||||
WREG32(SRBM_SOFT_RESET, tmp);
|
||||
tmp = RREG32(SRBM_SOFT_RESET);
|
||||
WREG32(mmSRBM_SOFT_RESET, tmp);
|
||||
tmp = RREG32(mmSRBM_SOFT_RESET);
|
||||
|
||||
udelay(50);
|
||||
|
||||
|
@ -969,20 +998,20 @@ static int gmc_v6_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
|
|||
|
||||
switch (state) {
|
||||
case AMDGPU_IRQ_STATE_DISABLE:
|
||||
tmp = RREG32(VM_CONTEXT0_CNTL);
|
||||
tmp = RREG32(mmVM_CONTEXT0_CNTL);
|
||||
tmp &= ~bits;
|
||||
WREG32(VM_CONTEXT0_CNTL, tmp);
|
||||
tmp = RREG32(VM_CONTEXT1_CNTL);
|
||||
WREG32(mmVM_CONTEXT0_CNTL, tmp);
|
||||
tmp = RREG32(mmVM_CONTEXT1_CNTL);
|
||||
tmp &= ~bits;
|
||||
WREG32(VM_CONTEXT1_CNTL, tmp);
|
||||
WREG32(mmVM_CONTEXT1_CNTL, tmp);
|
||||
break;
|
||||
case AMDGPU_IRQ_STATE_ENABLE:
|
||||
tmp = RREG32(VM_CONTEXT0_CNTL);
|
||||
tmp = RREG32(mmVM_CONTEXT0_CNTL);
|
||||
tmp |= bits;
|
||||
WREG32(VM_CONTEXT0_CNTL, tmp);
|
||||
tmp = RREG32(VM_CONTEXT1_CNTL);
|
||||
WREG32(mmVM_CONTEXT0_CNTL, tmp);
|
||||
tmp = RREG32(mmVM_CONTEXT1_CNTL);
|
||||
tmp |= bits;
|
||||
WREG32(VM_CONTEXT1_CNTL, tmp);
|
||||
WREG32(mmVM_CONTEXT1_CNTL, tmp);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
|
@ -997,9 +1026,9 @@ static int gmc_v6_0_process_interrupt(struct amdgpu_device *adev,
|
|||
{
|
||||
u32 addr, status;
|
||||
|
||||
addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR);
|
||||
status = RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS);
|
||||
WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
|
||||
addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR);
|
||||
status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
|
||||
WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1);
|
||||
|
||||
if (!addr && !status)
|
||||
return 0;
|
||||
|
@ -1007,13 +1036,15 @@ static int gmc_v6_0_process_interrupt(struct amdgpu_device *adev,
|
|||
if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_FIRST)
|
||||
gmc_v6_0_set_fault_enable_default(adev, false);
|
||||
|
||||
dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
|
||||
entry->src_id, entry->src_data);
|
||||
dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
|
||||
addr);
|
||||
dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
|
||||
status);
|
||||
gmc_v6_0_vm_decode_fault(adev, status, addr, 0);
|
||||
if (printk_ratelimit()) {
|
||||
dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
|
||||
entry->src_id, entry->src_data);
|
||||
dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
|
||||
addr);
|
||||
dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
|
||||
status);
|
||||
gmc_v6_0_vm_decode_fault(adev, status, addr, 0);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -711,7 +711,7 @@ static void gmc_v7_0_vm_decode_fault(struct amdgpu_device *adev,
|
|||
mc_id = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
|
||||
MEMORY_CLIENT_ID);
|
||||
|
||||
printk("VM fault (0x%02x, vmid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
|
||||
dev_err(adev->dev, "VM fault (0x%02x, vmid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
|
||||
protections, vmid, addr,
|
||||
REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
|
||||
MEMORY_CLIENT_RW) ?
|
||||
|
@ -1198,13 +1198,15 @@ static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev,
|
|||
if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_FIRST)
|
||||
gmc_v7_0_set_fault_enable_default(adev, false);
|
||||
|
||||
dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
|
||||
entry->src_id, entry->src_data);
|
||||
dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
|
||||
addr);
|
||||
dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
|
||||
status);
|
||||
gmc_v7_0_vm_decode_fault(adev, status, addr, mc_client);
|
||||
if (printk_ratelimit()) {
|
||||
dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
|
||||
entry->src_id, entry->src_data);
|
||||
dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
|
||||
addr);
|
||||
dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
|
||||
status);
|
||||
gmc_v7_0_vm_decode_fault(adev, status, addr, mc_client);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -837,7 +837,7 @@ static void gmc_v8_0_vm_decode_fault(struct amdgpu_device *adev,
|
|||
mc_id = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
|
||||
MEMORY_CLIENT_ID);
|
||||
|
||||
printk("VM fault (0x%02x, vmid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
|
||||
dev_err(adev->dev, "VM fault (0x%02x, vmid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
|
||||
protections, vmid, addr,
|
||||
REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
|
||||
MEMORY_CLIENT_RW) ?
|
||||
|
@ -1242,13 +1242,15 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev,
|
|||
if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_FIRST)
|
||||
gmc_v8_0_set_fault_enable_default(adev, false);
|
||||
|
||||
dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
|
||||
entry->src_id, entry->src_data);
|
||||
dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
|
||||
addr);
|
||||
dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
|
||||
status);
|
||||
gmc_v8_0_vm_decode_fault(adev, status, addr, mc_client);
|
||||
if (printk_ratelimit()) {
|
||||
dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
|
||||
entry->src_id, entry->src_data);
|
||||
dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
|
||||
addr);
|
||||
dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
|
||||
status);
|
||||
gmc_v8_0_vm_decode_fault(adev, status, addr, mc_client);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -2845,7 +2845,7 @@ static int kv_dpm_init(struct amdgpu_device *adev)
|
|||
pi->caps_tcp_ramping = true;
|
||||
}
|
||||
|
||||
if (amdgpu_sclk_deep_sleep_en)
|
||||
if (amdgpu_pp_feature_mask & SCLK_DEEP_SLEEP_MASK)
|
||||
pi->caps_sclk_ds = true;
|
||||
else
|
||||
pi->caps_sclk_ds = false;
|
||||
|
|
|
@ -775,11 +775,11 @@ static void sdma_v2_4_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe,
|
|||
unsigned ndw = count * 2;
|
||||
|
||||
ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
|
||||
SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
|
||||
SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
|
||||
ib->ptr[ib->length_dw++] = pe;
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(pe);
|
||||
ib->ptr[ib->length_dw++] = ndw;
|
||||
for (; ndw > 0; ndw -= 2, --count, pe += 8) {
|
||||
for (; ndw > 0; ndw -= 2) {
|
||||
ib->ptr[ib->length_dw++] = lower_32_bits(value);
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(value);
|
||||
value += incr;
|
||||
|
|
|
@ -977,11 +977,11 @@ static void sdma_v3_0_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe,
|
|||
unsigned ndw = count * 2;
|
||||
|
||||
ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
|
||||
SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
|
||||
SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
|
||||
ib->ptr[ib->length_dw++] = lower_32_bits(pe);
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(pe);
|
||||
ib->ptr[ib->length_dw++] = ndw;
|
||||
for (; ndw > 0; ndw -= 2, --count, pe += 8) {
|
||||
for (; ndw > 0; ndw -= 2) {
|
||||
ib->ptr[ib->length_dw++] = lower_32_bits(value);
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(value);
|
||||
value += incr;
|
||||
|
|
|
@ -0,0 +1,272 @@
|
|||
/*
|
||||
* Copyright 2016 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
#ifndef SI_ENUMS_H
|
||||
#define SI_ENUMS_H
|
||||
|
||||
#define VBLANK_INT_MASK (1 << 0)
|
||||
#define DC_HPDx_INT_EN (1 << 16)
|
||||
#define VBLANK_ACK (1 << 4)
|
||||
#define VLINE_ACK (1 << 4)
|
||||
|
||||
#define CURSOR_WIDTH 64
|
||||
#define CURSOR_HEIGHT 64
|
||||
|
||||
#define VGA_VSTATUS_CNTL 0xFFFCFFFF
|
||||
#define PRIORITY_MARK_MASK 0x7fff
|
||||
#define PRIORITY_OFF (1 << 16)
|
||||
#define PRIORITY_ALWAYS_ON (1 << 20)
|
||||
#define INTERLEAVE_EN (1 << 0)
|
||||
|
||||
#define LATENCY_WATERMARK_MASK(x) ((x) << 16)
|
||||
#define DC_LB_MEMORY_CONFIG(x) ((x) << 20)
|
||||
#define ICON_DEGAMMA_MODE(x) (((x) & 0x3) << 8)
|
||||
|
||||
#define GRPH_ENDIAN_SWAP(x) (((x) & 0x3) << 0)
|
||||
#define GRPH_ENDIAN_NONE 0
|
||||
#define GRPH_ENDIAN_8IN16 1
|
||||
#define GRPH_ENDIAN_8IN32 2
|
||||
#define GRPH_ENDIAN_8IN64 3
|
||||
|
||||
#define GRPH_DEPTH(x) (((x) & 0x3) << 0)
|
||||
#define GRPH_DEPTH_8BPP 0
|
||||
#define GRPH_DEPTH_16BPP 1
|
||||
#define GRPH_DEPTH_32BPP 2
|
||||
|
||||
#define GRPH_FORMAT(x) (((x) & 0x7) << 8)
|
||||
#define GRPH_FORMAT_INDEXED 0
|
||||
#define GRPH_FORMAT_ARGB1555 0
|
||||
#define GRPH_FORMAT_ARGB565 1
|
||||
#define GRPH_FORMAT_ARGB4444 2
|
||||
#define GRPH_FORMAT_AI88 3
|
||||
#define GRPH_FORMAT_MONO16 4
|
||||
#define GRPH_FORMAT_BGRA5551 5
|
||||
#define GRPH_FORMAT_ARGB8888 0
|
||||
#define GRPH_FORMAT_ARGB2101010 1
|
||||
#define GRPH_FORMAT_32BPP_DIG 2
|
||||
#define GRPH_FORMAT_8B_ARGB2101010 3
|
||||
#define GRPH_FORMAT_BGRA1010102 4
|
||||
#define GRPH_FORMAT_8B_BGRA1010102 5
|
||||
#define GRPH_FORMAT_RGB111110 6
|
||||
#define GRPH_FORMAT_BGR101111 7
|
||||
|
||||
#define GRPH_NUM_BANKS(x) (((x) & 0x3) << 2)
|
||||
#define GRPH_ARRAY_MODE(x) (((x) & 0x7) << 20)
|
||||
#define GRPH_ARRAY_LINEAR_GENERAL 0
|
||||
#define GRPH_ARRAY_LINEAR_ALIGNED 1
|
||||
#define GRPH_ARRAY_1D_TILED_THIN1 2
|
||||
#define GRPH_ARRAY_2D_TILED_THIN1 4
|
||||
#define GRPH_TILE_SPLIT(x) (((x) & 0x7) << 13)
|
||||
#define GRPH_BANK_WIDTH(x) (((x) & 0x3) << 6)
|
||||
#define GRPH_BANK_HEIGHT(x) (((x) & 0x3) << 11)
|
||||
#define GRPH_MACRO_TILE_ASPECT(x) (((x) & 0x3) << 18)
|
||||
#define GRPH_ARRAY_MODE(x) (((x) & 0x7) << 20)
|
||||
#define GRPH_PIPE_CONFIG(x) (((x) & 0x1f) << 24)
|
||||
|
||||
#define CURSOR_EN (1 << 0)
|
||||
#define CURSOR_MODE(x) (((x) & 0x3) << 8)
|
||||
#define CURSOR_MONO 0
|
||||
#define CURSOR_24_1 1
|
||||
#define CURSOR_24_8_PRE_MULT 2
|
||||
#define CURSOR_24_8_UNPRE_MULT 3
|
||||
#define CURSOR_2X_MAGNIFY (1 << 16)
|
||||
#define CURSOR_FORCE_MC_ON (1 << 20)
|
||||
#define CURSOR_URGENT_CONTROL(x) (((x) & 0x7) << 24)
|
||||
#define CURSOR_URGENT_ALWAYS 0
|
||||
#define CURSOR_URGENT_1_8 1
|
||||
#define CURSOR_URGENT_1_4 2
|
||||
#define CURSOR_URGENT_3_8 3
|
||||
#define CURSOR_URGENT_1_2 4
|
||||
#define CURSOR_UPDATE_PENDING (1 << 0)
|
||||
#define CURSOR_UPDATE_TAKEN (1 << 1)
|
||||
#define CURSOR_UPDATE_LOCK (1 << 16)
|
||||
#define CURSOR_DISABLE_MULTIPLE_UPDATE (1 << 24)
|
||||
|
||||
#define AMDGPU_NUM_OF_VMIDS 8
|
||||
#define SI_CRTC0_REGISTER_OFFSET 0
|
||||
#define SI_CRTC1_REGISTER_OFFSET 0x300
|
||||
#define SI_CRTC2_REGISTER_OFFSET 0x2600
|
||||
#define SI_CRTC3_REGISTER_OFFSET 0x2900
|
||||
#define SI_CRTC4_REGISTER_OFFSET 0x2c00
|
||||
#define SI_CRTC5_REGISTER_OFFSET 0x2f00
|
||||
|
||||
#define DMA0_REGISTER_OFFSET 0x000
|
||||
#define DMA1_REGISTER_OFFSET 0x200
|
||||
#define ES_AND_GS_AUTO 3
|
||||
#define RADEON_PACKET_TYPE3 3
|
||||
#define CE_PARTITION_BASE 3
|
||||
#define BUF_SWAP_32BIT (2 << 16)
|
||||
|
||||
#define GFX_POWER_STATUS (1 << 1)
|
||||
#define GFX_CLOCK_STATUS (1 << 2)
|
||||
#define GFX_LS_STATUS (1 << 3)
|
||||
#define RLC_BUSY_STATUS (1 << 0)
|
||||
|
||||
#define RLC_PUD(x) ((x) << 0)
|
||||
#define RLC_PUD_MASK (0xff << 0)
|
||||
#define RLC_PDD(x) ((x) << 8)
|
||||
#define RLC_PDD_MASK (0xff << 8)
|
||||
#define RLC_TTPD(x) ((x) << 16)
|
||||
#define RLC_TTPD_MASK (0xff << 16)
|
||||
#define RLC_MSD(x) ((x) << 24)
|
||||
#define RLC_MSD_MASK (0xff << 24)
|
||||
#define WRITE_DATA_ENGINE_SEL(x) ((x) << 30)
|
||||
#define WRITE_DATA_DST_SEL(x) ((x) << 8)
|
||||
#define EVENT_TYPE(x) ((x) << 0)
|
||||
#define EVENT_INDEX(x) ((x) << 8)
|
||||
#define WAIT_REG_MEM_MEM_SPACE(x) ((x) << 4)
|
||||
#define WAIT_REG_MEM_FUNCTION(x) ((x) << 0)
|
||||
#define WAIT_REG_MEM_ENGINE(x) ((x) << 8)
|
||||
|
||||
#define GFX6_NUM_GFX_RINGS 1
|
||||
#define GFX6_NUM_COMPUTE_RINGS 2
|
||||
#define RLC_SAVE_AND_RESTORE_STARTING_OFFSET 0x90
|
||||
#define RLC_CLEAR_STATE_DESCRIPTOR_OFFSET 0x3D
|
||||
|
||||
#define TAHITI_GB_ADDR_CONFIG_GOLDEN 0x12011003
|
||||
#define VERDE_GB_ADDR_CONFIG_GOLDEN 0x12010002
|
||||
#define HAINAN_GB_ADDR_CONFIG_GOLDEN 0x02010001
|
||||
|
||||
#define PACKET3(op, n) ((RADEON_PACKET_TYPE3 << 30) | \
|
||||
(((op) & 0xFF) << 8) | \
|
||||
((n) & 0x3FFF) << 16)
|
||||
#define PACKET3_COMPUTE(op, n) (PACKET3(op, n) | 1 << 1)
|
||||
#define PACKET3_NOP 0x10
|
||||
#define PACKET3_SET_BASE 0x11
|
||||
#define PACKET3_BASE_INDEX(x) ((x) << 0)
|
||||
#define PACKET3_CLEAR_STATE 0x12
|
||||
#define PACKET3_INDEX_BUFFER_SIZE 0x13
|
||||
#define PACKET3_DISPATCH_DIRECT 0x15
|
||||
#define PACKET3_DISPATCH_INDIRECT 0x16
|
||||
#define PACKET3_ALLOC_GDS 0x1B
|
||||
#define PACKET3_WRITE_GDS_RAM 0x1C
|
||||
#define PACKET3_ATOMIC_GDS 0x1D
|
||||
#define PACKET3_ATOMIC 0x1E
|
||||
#define PACKET3_OCCLUSION_QUERY 0x1F
|
||||
#define PACKET3_SET_PREDICATION 0x20
|
||||
#define PACKET3_REG_RMW 0x21
|
||||
#define PACKET3_COND_EXEC 0x22
|
||||
#define PACKET3_PRED_EXEC 0x23
|
||||
#define PACKET3_DRAW_INDIRECT 0x24
|
||||
#define PACKET3_DRAW_INDEX_INDIRECT 0x25
|
||||
#define PACKET3_INDEX_BASE 0x26
|
||||
#define PACKET3_DRAW_INDEX_2 0x27
|
||||
#define PACKET3_CONTEXT_CONTROL 0x28
|
||||
#define PACKET3_INDEX_TYPE 0x2A
|
||||
#define PACKET3_DRAW_INDIRECT_MULTI 0x2C
|
||||
#define PACKET3_DRAW_INDEX_AUTO 0x2D
|
||||
#define PACKET3_DRAW_INDEX_IMMD 0x2E
|
||||
#define PACKET3_NUM_INSTANCES 0x2F
|
||||
#define PACKET3_DRAW_INDEX_MULTI_AUTO 0x30
|
||||
#define PACKET3_INDIRECT_BUFFER_CONST 0x31
|
||||
#define PACKET3_INDIRECT_BUFFER 0x3F
|
||||
#define PACKET3_STRMOUT_BUFFER_UPDATE 0x34
|
||||
#define PACKET3_DRAW_INDEX_OFFSET_2 0x35
|
||||
#define PACKET3_DRAW_INDEX_MULTI_ELEMENT 0x36
|
||||
#define PACKET3_WRITE_DATA 0x37
|
||||
#define PACKET3_DRAW_INDEX_INDIRECT_MULTI 0x38
|
||||
#define PACKET3_MEM_SEMAPHORE 0x39
|
||||
#define PACKET3_MPEG_INDEX 0x3A
|
||||
#define PACKET3_COPY_DW 0x3B
|
||||
#define PACKET3_WAIT_REG_MEM 0x3C
|
||||
#define PACKET3_MEM_WRITE 0x3D
|
||||
#define PACKET3_COPY_DATA 0x40
|
||||
#define PACKET3_CP_DMA 0x41
|
||||
# define PACKET3_CP_DMA_DST_SEL(x) ((x) << 20)
|
||||
# define PACKET3_CP_DMA_ENGINE(x) ((x) << 27)
|
||||
# define PACKET3_CP_DMA_SRC_SEL(x) ((x) << 29)
|
||||
# define PACKET3_CP_DMA_CP_SYNC (1 << 31)
|
||||
# define PACKET3_CP_DMA_DIS_WC (1 << 21)
|
||||
# define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 22)
|
||||
# define PACKET3_CP_DMA_CMD_DST_SWAP(x) ((x) << 24)
|
||||
# define PACKET3_CP_DMA_CMD_SAS (1 << 26)
|
||||
# define PACKET3_CP_DMA_CMD_DAS (1 << 27)
|
||||
# define PACKET3_CP_DMA_CMD_SAIC (1 << 28)
|
||||
# define PACKET3_CP_DMA_CMD_DAIC (1 << 29)
|
||||
# define PACKET3_CP_DMA_CMD_RAW_WAIT (1 << 30)
|
||||
#define PACKET3_PFP_SYNC_ME 0x42
|
||||
#define PACKET3_SURFACE_SYNC 0x43
|
||||
# define PACKET3_DEST_BASE_0_ENA (1 << 0)
|
||||
# define PACKET3_DEST_BASE_1_ENA (1 << 1)
|
||||
# define PACKET3_CB0_DEST_BASE_ENA (1 << 6)
|
||||
# define PACKET3_CB1_DEST_BASE_ENA (1 << 7)
|
||||
# define PACKET3_CB2_DEST_BASE_ENA (1 << 8)
|
||||
# define PACKET3_CB3_DEST_BASE_ENA (1 << 9)
|
||||
# define PACKET3_CB4_DEST_BASE_ENA (1 << 10)
|
||||
# define PACKET3_CB5_DEST_BASE_ENA (1 << 11)
|
||||
# define PACKET3_CB6_DEST_BASE_ENA (1 << 12)
|
||||
# define PACKET3_CB7_DEST_BASE_ENA (1 << 13)
|
||||
# define PACKET3_DB_DEST_BASE_ENA (1 << 14)
|
||||
# define PACKET3_DEST_BASE_2_ENA (1 << 19)
|
||||
# define PACKET3_DEST_BASE_3_ENA (1 << 21)
|
||||
# define PACKET3_TCL1_ACTION_ENA (1 << 22)
|
||||
# define PACKET3_TC_ACTION_ENA (1 << 23)
|
||||
# define PACKET3_CB_ACTION_ENA (1 << 25)
|
||||
# define PACKET3_DB_ACTION_ENA (1 << 26)
|
||||
# define PACKET3_SH_KCACHE_ACTION_ENA (1 << 27)
|
||||
# define PACKET3_SH_ICACHE_ACTION_ENA (1 << 29)
|
||||
#define PACKET3_ME_INITIALIZE 0x44
|
||||
#define PACKET3_ME_INITIALIZE_DEVICE_ID(x) ((x) << 16)
|
||||
#define PACKET3_COND_WRITE 0x45
|
||||
#define PACKET3_EVENT_WRITE 0x46
|
||||
#define PACKET3_EVENT_WRITE_EOP 0x47
|
||||
#define PACKET3_EVENT_WRITE_EOS 0x48
|
||||
#define PACKET3_PREAMBLE_CNTL 0x4A
|
||||
# define PACKET3_PREAMBLE_BEGIN_CLEAR_STATE (2 << 28)
|
||||
# define PACKET3_PREAMBLE_END_CLEAR_STATE (3 << 28)
|
||||
#define PACKET3_ONE_REG_WRITE 0x57
|
||||
#define PACKET3_LOAD_CONFIG_REG 0x5F
|
||||
#define PACKET3_LOAD_CONTEXT_REG 0x60
|
||||
#define PACKET3_LOAD_SH_REG 0x61
|
||||
#define PACKET3_SET_CONFIG_REG 0x68
|
||||
#define PACKET3_SET_CONFIG_REG_START 0x00002000
|
||||
#define PACKET3_SET_CONFIG_REG_END 0x00002c00
|
||||
#define PACKET3_SET_CONTEXT_REG 0x69
|
||||
#define PACKET3_SET_CONTEXT_REG_START 0x000a000
|
||||
#define PACKET3_SET_CONTEXT_REG_END 0x000a400
|
||||
#define PACKET3_SET_CONTEXT_REG_INDIRECT 0x73
|
||||
#define PACKET3_SET_RESOURCE_INDIRECT 0x74
|
||||
#define PACKET3_SET_SH_REG 0x76
|
||||
#define PACKET3_SET_SH_REG_START 0x00002c00
|
||||
#define PACKET3_SET_SH_REG_END 0x00003000
|
||||
#define PACKET3_SET_SH_REG_OFFSET 0x77
|
||||
#define PACKET3_ME_WRITE 0x7A
|
||||
#define PACKET3_SCRATCH_RAM_WRITE 0x7D
|
||||
#define PACKET3_SCRATCH_RAM_READ 0x7E
|
||||
#define PACKET3_CE_WRITE 0x7F
|
||||
#define PACKET3_LOAD_CONST_RAM 0x80
|
||||
#define PACKET3_WRITE_CONST_RAM 0x81
|
||||
#define PACKET3_WRITE_CONST_RAM_OFFSET 0x82
|
||||
#define PACKET3_DUMP_CONST_RAM 0x83
|
||||
#define PACKET3_INCREMENT_CE_COUNTER 0x84
|
||||
#define PACKET3_INCREMENT_DE_COUNTER 0x85
|
||||
#define PACKET3_WAIT_ON_CE_COUNTER 0x86
|
||||
#define PACKET3_WAIT_ON_DE_COUNTER 0x87
|
||||
#define PACKET3_WAIT_ON_DE_COUNTER_DIFF 0x88
|
||||
#define PACKET3_SET_CE_DE_COUNTERS 0x89
|
||||
#define PACKET3_WAIT_ON_AVAIL_BUFFER 0x8A
|
||||
#define PACKET3_SWITCH_BUFFER 0x8B
|
||||
#define PACKET3_SEM_WAIT_ON_SIGNAL (0x1 << 12)
|
||||
#define PACKET3_SEM_SEL_SIGNAL (0x6 << 29)
|
||||
#define PACKET3_SEM_SEL_WAIT (0x7 << 29)
|
||||
|
||||
#endif
|
|
@ -45,7 +45,8 @@ static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev);
|
|||
static void uvd_v4_2_set_irq_funcs(struct amdgpu_device *adev);
|
||||
static int uvd_v4_2_start(struct amdgpu_device *adev);
|
||||
static void uvd_v4_2_stop(struct amdgpu_device *adev);
|
||||
|
||||
static int uvd_v4_2_set_clockgating_state(void *handle,
|
||||
enum amd_clockgating_state state);
|
||||
/**
|
||||
* uvd_v4_2_ring_get_rptr - get read pointer
|
||||
*
|
||||
|
@ -154,9 +155,9 @@ static int uvd_v4_2_hw_init(void *handle)
|
|||
uint32_t tmp;
|
||||
int r;
|
||||
|
||||
/* raise clocks while booting up the VCPU */
|
||||
amdgpu_asic_set_uvd_clocks(adev, 53300, 40000);
|
||||
|
||||
uvd_v4_2_init_cg(adev);
|
||||
uvd_v4_2_set_clockgating_state(adev, AMD_CG_STATE_GATE);
|
||||
amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
|
||||
r = uvd_v4_2_start(adev);
|
||||
if (r)
|
||||
goto done;
|
||||
|
@ -196,8 +197,6 @@ static int uvd_v4_2_hw_init(void *handle)
|
|||
amdgpu_ring_commit(ring);
|
||||
|
||||
done:
|
||||
/* lower clocks again */
|
||||
amdgpu_asic_set_uvd_clocks(adev, 0, 0);
|
||||
|
||||
if (!r)
|
||||
DRM_INFO("UVD initialized successfully.\n");
|
||||
|
@ -274,9 +273,6 @@ static int uvd_v4_2_start(struct amdgpu_device *adev)
|
|||
|
||||
uvd_v4_2_mc_resume(adev);
|
||||
|
||||
/* disable clock gating */
|
||||
WREG32(mmUVD_CGC_GATE, 0);
|
||||
|
||||
/* disable interupt */
|
||||
WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1));
|
||||
|
||||
|
@ -568,8 +564,6 @@ static void uvd_v4_2_mc_resume(struct amdgpu_device *adev)
|
|||
WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
|
||||
WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
|
||||
WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
|
||||
|
||||
uvd_v4_2_init_cg(adev);
|
||||
}
|
||||
|
||||
static void uvd_v4_2_enable_mgcg(struct amdgpu_device *adev,
|
||||
|
@ -579,7 +573,7 @@ static void uvd_v4_2_enable_mgcg(struct amdgpu_device *adev,
|
|||
|
||||
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) {
|
||||
data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
|
||||
data = 0xfff;
|
||||
data |= 0xfff;
|
||||
WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
|
||||
|
||||
orig = data = RREG32(mmUVD_CGC_CTRL);
|
||||
|
@ -603,6 +597,8 @@ static void uvd_v4_2_set_dcm(struct amdgpu_device *adev,
|
|||
{
|
||||
u32 tmp, tmp2;
|
||||
|
||||
WREG32_FIELD(UVD_CGC_GATE, REGS, 0);
|
||||
|
||||
tmp = RREG32(mmUVD_CGC_CTRL);
|
||||
tmp &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK | UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK);
|
||||
tmp |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK |
|
||||
|
@ -686,34 +682,18 @@ static int uvd_v4_2_process_interrupt(struct amdgpu_device *adev,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void uvd_v5_0_set_bypass_mode(struct amdgpu_device *adev, bool enable)
|
||||
{
|
||||
u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL);
|
||||
|
||||
if (enable)
|
||||
tmp |= (GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK |
|
||||
GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK);
|
||||
else
|
||||
tmp &= ~(GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK |
|
||||
GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK);
|
||||
|
||||
WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp);
|
||||
}
|
||||
|
||||
static int uvd_v4_2_set_clockgating_state(void *handle,
|
||||
enum amd_clockgating_state state)
|
||||
{
|
||||
bool gate = false;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
if (state == AMD_CG_STATE_GATE)
|
||||
gate = true;
|
||||
|
||||
uvd_v5_0_set_bypass_mode(adev, gate);
|
||||
|
||||
if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
|
||||
return 0;
|
||||
|
||||
if (state == AMD_CG_STATE_GATE)
|
||||
gate = true;
|
||||
|
||||
uvd_v4_2_enable_mgcg(adev, gate);
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -40,7 +40,10 @@ static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev);
|
|||
static void uvd_v5_0_set_irq_funcs(struct amdgpu_device *adev);
|
||||
static int uvd_v5_0_start(struct amdgpu_device *adev);
|
||||
static void uvd_v5_0_stop(struct amdgpu_device *adev);
|
||||
|
||||
static int uvd_v5_0_set_clockgating_state(void *handle,
|
||||
enum amd_clockgating_state state);
|
||||
static void uvd_v5_0_enable_mgcg(struct amdgpu_device *adev,
|
||||
bool enable);
|
||||
/**
|
||||
* uvd_v5_0_ring_get_rptr - get read pointer
|
||||
*
|
||||
|
@ -149,9 +152,6 @@ static int uvd_v5_0_hw_init(void *handle)
|
|||
uint32_t tmp;
|
||||
int r;
|
||||
|
||||
/* raise clocks while booting up the VCPU */
|
||||
amdgpu_asic_set_uvd_clocks(adev, 53300, 40000);
|
||||
|
||||
r = uvd_v5_0_start(adev);
|
||||
if (r)
|
||||
goto done;
|
||||
|
@ -189,11 +189,7 @@ static int uvd_v5_0_hw_init(void *handle)
|
|||
amdgpu_ring_write(ring, 3);
|
||||
|
||||
amdgpu_ring_commit(ring);
|
||||
|
||||
done:
|
||||
/* lower clocks again */
|
||||
amdgpu_asic_set_uvd_clocks(adev, 0, 0);
|
||||
|
||||
if (!r)
|
||||
DRM_INFO("UVD initialized successfully.\n");
|
||||
|
||||
|
@ -226,6 +222,7 @@ static int uvd_v5_0_suspend(void *handle)
|
|||
r = uvd_v5_0_hw_fini(adev);
|
||||
if (r)
|
||||
return r;
|
||||
uvd_v5_0_set_clockgating_state(adev, AMD_CG_STATE_GATE);
|
||||
|
||||
r = amdgpu_uvd_suspend(adev);
|
||||
if (r)
|
||||
|
@ -313,8 +310,9 @@ static int uvd_v5_0_start(struct amdgpu_device *adev)
|
|||
|
||||
uvd_v5_0_mc_resume(adev);
|
||||
|
||||
/* disable clock gating */
|
||||
WREG32(mmUVD_CGC_GATE, 0);
|
||||
amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
|
||||
uvd_v5_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE);
|
||||
uvd_v5_0_enable_mgcg(adev, true);
|
||||
|
||||
/* disable interupt */
|
||||
WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1));
|
||||
|
@ -628,16 +626,12 @@ static int uvd_v5_0_process_interrupt(struct amdgpu_device *adev,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void uvd_v5_0_set_sw_clock_gating(struct amdgpu_device *adev)
|
||||
static void uvd_v5_0_enable_clock_gating(struct amdgpu_device *adev, bool enable)
|
||||
{
|
||||
uint32_t data, data1, data2, suvd_flags;
|
||||
uint32_t data1, data3, suvd_flags;
|
||||
|
||||
data = RREG32(mmUVD_CGC_CTRL);
|
||||
data1 = RREG32(mmUVD_SUVD_CGC_GATE);
|
||||
data2 = RREG32(mmUVD_SUVD_CGC_CTRL);
|
||||
|
||||
data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK |
|
||||
UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK);
|
||||
data3 = RREG32(mmUVD_CGC_GATE);
|
||||
|
||||
suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
|
||||
UVD_SUVD_CGC_GATE__SIT_MASK |
|
||||
|
@ -645,6 +639,49 @@ static void uvd_v5_0_set_sw_clock_gating(struct amdgpu_device *adev)
|
|||
UVD_SUVD_CGC_GATE__SCM_MASK |
|
||||
UVD_SUVD_CGC_GATE__SDB_MASK;
|
||||
|
||||
if (enable) {
|
||||
data3 |= (UVD_CGC_GATE__SYS_MASK |
|
||||
UVD_CGC_GATE__UDEC_MASK |
|
||||
UVD_CGC_GATE__MPEG2_MASK |
|
||||
UVD_CGC_GATE__RBC_MASK |
|
||||
UVD_CGC_GATE__LMI_MC_MASK |
|
||||
UVD_CGC_GATE__IDCT_MASK |
|
||||
UVD_CGC_GATE__MPRD_MASK |
|
||||
UVD_CGC_GATE__MPC_MASK |
|
||||
UVD_CGC_GATE__LBSI_MASK |
|
||||
UVD_CGC_GATE__LRBBM_MASK |
|
||||
UVD_CGC_GATE__UDEC_RE_MASK |
|
||||
UVD_CGC_GATE__UDEC_CM_MASK |
|
||||
UVD_CGC_GATE__UDEC_IT_MASK |
|
||||
UVD_CGC_GATE__UDEC_DB_MASK |
|
||||
UVD_CGC_GATE__UDEC_MP_MASK |
|
||||
UVD_CGC_GATE__WCB_MASK |
|
||||
UVD_CGC_GATE__VCPU_MASK |
|
||||
UVD_CGC_GATE__JPEG_MASK |
|
||||
UVD_CGC_GATE__SCPU_MASK);
|
||||
data3 &= ~UVD_CGC_GATE__REGS_MASK;
|
||||
data1 |= suvd_flags;
|
||||
} else {
|
||||
data3 = 0;
|
||||
data1 = 0;
|
||||
}
|
||||
|
||||
WREG32(mmUVD_SUVD_CGC_GATE, data1);
|
||||
WREG32(mmUVD_CGC_GATE, data3);
|
||||
}
|
||||
|
||||
static void uvd_v5_0_set_sw_clock_gating(struct amdgpu_device *adev)
|
||||
{
|
||||
uint32_t data, data2;
|
||||
|
||||
data = RREG32(mmUVD_CGC_CTRL);
|
||||
data2 = RREG32(mmUVD_SUVD_CGC_CTRL);
|
||||
|
||||
|
||||
data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK |
|
||||
UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK);
|
||||
|
||||
|
||||
data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK |
|
||||
(1 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_GATE_DLY_TIMER)) |
|
||||
(4 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_OFF_DELAY));
|
||||
|
@ -675,11 +712,8 @@ static void uvd_v5_0_set_sw_clock_gating(struct amdgpu_device *adev)
|
|||
UVD_SUVD_CGC_CTRL__SMP_MODE_MASK |
|
||||
UVD_SUVD_CGC_CTRL__SCM_MODE_MASK |
|
||||
UVD_SUVD_CGC_CTRL__SDB_MODE_MASK);
|
||||
data1 |= suvd_flags;
|
||||
|
||||
WREG32(mmUVD_CGC_CTRL, data);
|
||||
WREG32(mmUVD_CGC_GATE, 0);
|
||||
WREG32(mmUVD_SUVD_CGC_GATE, data1);
|
||||
WREG32(mmUVD_SUVD_CGC_CTRL, data2);
|
||||
}
|
||||
|
||||
|
@ -724,18 +758,30 @@ static void uvd_v5_0_set_hw_clock_gating(struct amdgpu_device *adev)
|
|||
}
|
||||
#endif
|
||||
|
||||
static void uvd_v5_0_set_bypass_mode(struct amdgpu_device *adev, bool enable)
|
||||
static void uvd_v5_0_enable_mgcg(struct amdgpu_device *adev,
|
||||
bool enable)
|
||||
{
|
||||
u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL);
|
||||
u32 orig, data;
|
||||
|
||||
if (enable)
|
||||
tmp |= (GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK |
|
||||
GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK);
|
||||
else
|
||||
tmp &= ~(GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK |
|
||||
GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK);
|
||||
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) {
|
||||
data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
|
||||
data |= 0xfff;
|
||||
WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
|
||||
|
||||
WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp);
|
||||
orig = data = RREG32(mmUVD_CGC_CTRL);
|
||||
data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
|
||||
if (orig != data)
|
||||
WREG32(mmUVD_CGC_CTRL, data);
|
||||
} else {
|
||||
data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
|
||||
data &= ~0xfff;
|
||||
WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
|
||||
|
||||
orig = data = RREG32(mmUVD_CGC_CTRL);
|
||||
data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
|
||||
if (orig != data)
|
||||
WREG32(mmUVD_CGC_CTRL, data);
|
||||
}
|
||||
}
|
||||
|
||||
static int uvd_v5_0_set_clockgating_state(void *handle,
|
||||
|
@ -745,8 +791,6 @@ static int uvd_v5_0_set_clockgating_state(void *handle,
|
|||
bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
|
||||
static int curstate = -1;
|
||||
|
||||
uvd_v5_0_set_bypass_mode(adev, enable);
|
||||
|
||||
if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
|
||||
return 0;
|
||||
|
||||
|
@ -755,17 +799,18 @@ static int uvd_v5_0_set_clockgating_state(void *handle,
|
|||
|
||||
curstate = state;
|
||||
if (enable) {
|
||||
/* disable HW gating and enable Sw gating */
|
||||
uvd_v5_0_set_sw_clock_gating(adev);
|
||||
} else {
|
||||
/* wait for STATUS to clear */
|
||||
if (uvd_v5_0_wait_for_idle(handle))
|
||||
return -EBUSY;
|
||||
uvd_v5_0_enable_clock_gating(adev, true);
|
||||
|
||||
/* enable HW gates because UVD is idle */
|
||||
/* uvd_v5_0_set_hw_clock_gating(adev); */
|
||||
} else {
|
||||
uvd_v5_0_enable_clock_gating(adev, false);
|
||||
}
|
||||
|
||||
uvd_v5_0_set_sw_clock_gating(adev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -151,6 +151,8 @@ static int uvd_v6_0_hw_init(void *handle)
|
|||
uint32_t tmp;
|
||||
int r;
|
||||
|
||||
amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
|
||||
|
||||
r = uvd_v6_0_start(adev);
|
||||
if (r)
|
||||
goto done;
|
||||
|
@ -935,28 +937,12 @@ static void uvd_v6_0_set_hw_clock_gating(struct amdgpu_device *adev)
|
|||
}
|
||||
#endif
|
||||
|
||||
static void uvd_v6_0_set_bypass_mode(struct amdgpu_device *adev, bool enable)
|
||||
{
|
||||
u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL);
|
||||
|
||||
if (enable)
|
||||
tmp |= (GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK |
|
||||
GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK);
|
||||
else
|
||||
tmp &= ~(GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK |
|
||||
GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK);
|
||||
|
||||
WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp);
|
||||
}
|
||||
|
||||
static int uvd_v6_0_set_clockgating_state(void *handle,
|
||||
enum amd_clockgating_state state)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
|
||||
|
||||
uvd_v6_0_set_bypass_mode(adev, enable);
|
||||
|
||||
if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
|
||||
return 0;
|
||||
|
||||
|
|
|
@ -926,7 +926,8 @@ static int vi_common_early_init(void *handle)
|
|||
AMD_CG_SUPPORT_HDP_LS |
|
||||
AMD_CG_SUPPORT_ROM_MGCG |
|
||||
AMD_CG_SUPPORT_MC_MGCG |
|
||||
AMD_CG_SUPPORT_MC_LS;
|
||||
AMD_CG_SUPPORT_MC_LS |
|
||||
AMD_CG_SUPPORT_UVD_MGCG;
|
||||
adev->pg_flags = 0;
|
||||
adev->external_rev_id = adev->rev_id + 0x3c;
|
||||
break;
|
||||
|
@ -936,12 +937,12 @@ static int vi_common_early_init(void *handle)
|
|||
adev->external_rev_id = adev->rev_id + 0x14;
|
||||
break;
|
||||
case CHIP_POLARIS11:
|
||||
adev->cg_flags = 0;
|
||||
adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG;
|
||||
adev->pg_flags = 0;
|
||||
adev->external_rev_id = adev->rev_id + 0x5A;
|
||||
break;
|
||||
case CHIP_POLARIS10:
|
||||
adev->cg_flags = 0;
|
||||
adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG;
|
||||
adev->pg_flags = 0;
|
||||
adev->external_rev_id = adev->rev_id + 0x50;
|
||||
break;
|
||||
|
|
|
@ -0,0 +1,661 @@
|
|||
/*
|
||||
*
|
||||
* Copyright (C) 2016 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included
|
||||
* in all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
||||
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
|
||||
* AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef BIF_3_0_D_H
|
||||
#define BIF_3_0_D_H
|
||||
|
||||
#define ixPB0_DFT_DEBUG_CTRL_REG0 0x1300C
|
||||
#define ixPB0_DFT_JIT_INJ_REG0 0x13000
|
||||
#define ixPB0_DFT_JIT_INJ_REG1 0x13004
|
||||
#define ixPB0_DFT_JIT_INJ_REG2 0x13008
|
||||
#define ixPB0_GLB_CTRL_REG0 0x10004
|
||||
#define ixPB0_GLB_CTRL_REG1 0x10008
|
||||
#define ixPB0_GLB_CTRL_REG2 0x1000C
|
||||
#define ixPB0_GLB_CTRL_REG3 0x10010
|
||||
#define ixPB0_GLB_CTRL_REG4 0x10014
|
||||
#define ixPB0_GLB_CTRL_REG5 0x10018
|
||||
#define ixPB0_GLB_OVRD_REG0 0x10030
|
||||
#define ixPB0_GLB_OVRD_REG1 0x10034
|
||||
#define ixPB0_GLB_OVRD_REG2 0x10038
|
||||
#define ixPB0_GLB_SCI_STAT_OVRD_REG0 0x1001C
|
||||
#define ixPB0_GLB_SCI_STAT_OVRD_REG1 0x10020
|
||||
#define ixPB0_GLB_SCI_STAT_OVRD_REG2 0x10024
|
||||
#define ixPB0_GLB_SCI_STAT_OVRD_REG3 0x10028
|
||||
#define ixPB0_GLB_SCI_STAT_OVRD_REG4 0x1002C
|
||||
#define ixPB0_HW_DEBUG 0x12004
|
||||
#define ixPB0_PIF_CNTL 0x0010
|
||||
#define ixPB0_PIF_CNTL2 0x0014
|
||||
#define ixPB0_PIF_HW_DEBUG 0x0002
|
||||
#define ixPB0_PIF_PAIRING 0x0011
|
||||
#define ixPB0_PIF_PDNB_OVERRIDE_0 0x0020
|
||||
#define ixPB0_PIF_PDNB_OVERRIDE_10 0x0032
|
||||
#define ixPB0_PIF_PDNB_OVERRIDE_1 0x0021
|
||||
#define ixPB0_PIF_PDNB_OVERRIDE_11 0x0033
|
||||
#define ixPB0_PIF_PDNB_OVERRIDE_12 0x0034
|
||||
#define ixPB0_PIF_PDNB_OVERRIDE_13 0x0035
|
||||
#define ixPB0_PIF_PDNB_OVERRIDE_14 0x0036
|
||||
#define ixPB0_PIF_PDNB_OVERRIDE_15 0x0037
|
||||
#define ixPB0_PIF_PDNB_OVERRIDE_2 0x0022
|
||||
#define ixPB0_PIF_PDNB_OVERRIDE_3 0x0023
|
||||
#define ixPB0_PIF_PDNB_OVERRIDE_4 0x0024
|
||||
#define ixPB0_PIF_PDNB_OVERRIDE_5 0x0025
|
||||
#define ixPB0_PIF_PDNB_OVERRIDE_6 0x0026
|
||||
#define ixPB0_PIF_PDNB_OVERRIDE_7 0x0027
|
||||
#define ixPB0_PIF_PDNB_OVERRIDE_8 0x0030
|
||||
#define ixPB0_PIF_PDNB_OVERRIDE_9 0x0031
|
||||
#define ixPB0_PIF_PWRDOWN_0 0x0012
|
||||
#define ixPB0_PIF_PWRDOWN_1 0x0013
|
||||
#define ixPB0_PIF_PWRDOWN_2 0x0017
|
||||
#define ixPB0_PIF_PWRDOWN_3 0x0018
|
||||
#define ixPB0_PIF_SC_CTL 0x0016
|
||||
#define ixPB0_PIF_SCRATCH 0x0001
|
||||
#define ixPB0_PIF_SEQ_STATUS_0 0x0028
|
||||
#define ixPB0_PIF_SEQ_STATUS_10 0x003A
|
||||
#define ixPB0_PIF_SEQ_STATUS_1 0x0029
|
||||
#define ixPB0_PIF_SEQ_STATUS_11 0x003B
|
||||
#define ixPB0_PIF_SEQ_STATUS_12 0x003C
|
||||
#define ixPB0_PIF_SEQ_STATUS_13 0x003D
|
||||
#define ixPB0_PIF_SEQ_STATUS_14 0x003E
|
||||
#define ixPB0_PIF_SEQ_STATUS_15 0x003F
|
||||
#define ixPB0_PIF_SEQ_STATUS_2 0x002A
|
||||
#define ixPB0_PIF_SEQ_STATUS_3 0x002B
|
||||
#define ixPB0_PIF_SEQ_STATUS_4 0x002C
|
||||
#define ixPB0_PIF_SEQ_STATUS_5 0x002D
|
||||
#define ixPB0_PIF_SEQ_STATUS_6 0x002E
|
||||
#define ixPB0_PIF_SEQ_STATUS_7 0x002F
|
||||
#define ixPB0_PIF_SEQ_STATUS_8 0x0038
|
||||
#define ixPB0_PIF_SEQ_STATUS_9 0x0039
|
||||
#define ixPB0_PIF_TXPHYSTATUS 0x0015
|
||||
#define ixPB0_PLL_LC0_CTRL_REG0 0x14480
|
||||
#define ixPB0_PLL_LC0_OVRD_REG0 0x14490
|
||||
#define ixPB0_PLL_LC0_OVRD_REG1 0x14494
|
||||
#define ixPB0_PLL_LC0_SCI_STAT_OVRD_REG0 0x14500
|
||||
#define ixPB0_PLL_LC1_SCI_STAT_OVRD_REG0 0x14504
|
||||
#define ixPB0_PLL_LC2_SCI_STAT_OVRD_REG0 0x14508
|
||||
#define ixPB0_PLL_LC3_SCI_STAT_OVRD_REG0 0x1450C
|
||||
#define ixPB0_PLL_RO0_CTRL_REG0 0x14440
|
||||
#define ixPB0_PLL_RO0_OVRD_REG0 0x14450
|
||||
#define ixPB0_PLL_RO0_OVRD_REG1 0x14454
|
||||
#define ixPB0_PLL_RO0_SCI_STAT_OVRD_REG0 0x14460
|
||||
#define ixPB0_PLL_RO1_SCI_STAT_OVRD_REG0 0x14464
|
||||
#define ixPB0_PLL_RO2_SCI_STAT_OVRD_REG0 0x14468
|
||||
#define ixPB0_PLL_RO3_SCI_STAT_OVRD_REG0 0x1446C
|
||||
#define ixPB0_PLL_RO_GLB_CTRL_REG0 0x14000
|
||||
#define ixPB0_PLL_RO_GLB_OVRD_REG0 0x14010
|
||||
#define ixPB0_RX_GLB_CTRL_REG0 0x16000
|
||||
#define ixPB0_RX_GLB_CTRL_REG1 0x16004
|
||||
#define ixPB0_RX_GLB_CTRL_REG2 0x16008
|
||||
#define ixPB0_RX_GLB_CTRL_REG3 0x1600C
|
||||
#define ixPB0_RX_GLB_CTRL_REG4 0x16010
|
||||
#define ixPB0_RX_GLB_CTRL_REG5 0x16014
|
||||
#define ixPB0_RX_GLB_CTRL_REG6 0x16018
|
||||
#define ixPB0_RX_GLB_CTRL_REG7 0x1601C
|
||||
#define ixPB0_RX_GLB_CTRL_REG8 0x16020
|
||||
#define ixPB0_RX_GLB_OVRD_REG0 0x16030
|
||||
#define ixPB0_RX_GLB_OVRD_REG1 0x16034
|
||||
#define ixPB0_RX_GLB_SCI_STAT_OVRD_REG0 0x16028
|
||||
#define ixPB0_RX_LANE0_CTRL_REG0 0x16440
|
||||
#define ixPB0_RX_LANE0_SCI_STAT_OVRD_REG0 0x16448
|
||||
#define ixPB0_RX_LANE10_CTRL_REG0 0x17500
|
||||
#define ixPB0_RX_LANE10_SCI_STAT_OVRD_REG0 0x17508
|
||||
#define ixPB0_RX_LANE11_CTRL_REG0 0x17600
|
||||
#define ixPB0_RX_LANE11_SCI_STAT_OVRD_REG0 0x17608
|
||||
#define ixPB0_RX_LANE12_CTRL_REG0 0x17840
|
||||
#define ixPB0_RX_LANE12_SCI_STAT_OVRD_REG0 0x17848
|
||||
#define ixPB0_RX_LANE13_CTRL_REG0 0x17880
|
||||
#define ixPB0_RX_LANE13_SCI_STAT_OVRD_REG0 0x17888
|
||||
#define ixPB0_RX_LANE14_CTRL_REG0 0x17900
|
||||
#define ixPB0_RX_LANE14_SCI_STAT_OVRD_REG0 0x17908
|
||||
#define ixPB0_RX_LANE15_CTRL_REG0 0x17A00
|
||||
#define ixPB0_RX_LANE15_SCI_STAT_OVRD_REG0 0x17A08
|
||||
#define ixPB0_RX_LANE1_CTRL_REG0 0x16480
|
||||
#define ixPB0_RX_LANE1_SCI_STAT_OVRD_REG0 0x16488
|
||||
#define ixPB0_RX_LANE2_CTRL_REG0 0x16500
|
||||
#define ixPB0_RX_LANE2_SCI_STAT_OVRD_REG0 0x16508
|
||||
#define ixPB0_RX_LANE3_CTRL_REG0 0x16600
|
||||
#define ixPB0_RX_LANE3_SCI_STAT_OVRD_REG0 0x16608
|
||||
#define ixPB0_RX_LANE4_CTRL_REG0 0x16800
|
||||
#define ixPB0_RX_LANE4_SCI_STAT_OVRD_REG0 0x16848
|
||||
#define ixPB0_RX_LANE5_CTRL_REG0 0x16880
|
||||
#define ixPB0_RX_LANE5_SCI_STAT_OVRD_REG0 0x16888
|
||||
#define ixPB0_RX_LANE6_CTRL_REG0 0x16900
|
||||
#define ixPB0_RX_LANE6_SCI_STAT_OVRD_REG0 0x16908
|
||||
#define ixPB0_RX_LANE7_CTRL_REG0 0x16A00
|
||||
#define ixPB0_RX_LANE7_SCI_STAT_OVRD_REG0 0x16A08
|
||||
#define ixPB0_RX_LANE8_CTRL_REG0 0x17440
|
||||
#define ixPB0_RX_LANE8_SCI_STAT_OVRD_REG0 0x17448
|
||||
#define ixPB0_RX_LANE9_CTRL_REG0 0x17480
|
||||
#define ixPB0_RX_LANE9_SCI_STAT_OVRD_REG0 0x17488
|
||||
#define ixPB0_STRAP_GLB_REG0 0x12020
|
||||
#define ixPB0_STRAP_PLL_REG0 0x12030
|
||||
#define ixPB0_STRAP_RX_REG0 0x12028
|
||||
#define ixPB0_STRAP_RX_REG1 0x1202C
|
||||
#define ixPB0_STRAP_TX_REG0 0x12024
|
||||
#define ixPB0_TX_GLB_COEFF_ACCEPT_TABLE_REG0 0x18014
|
||||
#define ixPB0_TX_GLB_COEFF_ACCEPT_TABLE_REG1 0x18018
|
||||
#define ixPB0_TX_GLB_COEFF_ACCEPT_TABLE_REG2 0x1801C
|
||||
#define ixPB0_TX_GLB_COEFF_ACCEPT_TABLE_REG3 0x18020
|
||||
#define ixPB0_TX_GLB_CTRL_REG0 0x18000
|
||||
#define ixPB0_TX_GLB_LANE_SKEW_CTRL 0x18004
|
||||
#define ixPB0_TX_GLB_OVRD_REG0 0x18030
|
||||
#define ixPB0_TX_GLB_OVRD_REG1 0x18034
|
||||
#define ixPB0_TX_GLB_OVRD_REG2 0x18038
|
||||
#define ixPB0_TX_GLB_OVRD_REG3 0x1803C
|
||||
#define ixPB0_TX_GLB_OVRD_REG4 0x18040
|
||||
#define ixPB0_TX_GLB_SCI_STAT_OVRD_REG0 0x18010
|
||||
#define ixPB0_TX_LANE0_CTRL_REG0 0x18440
|
||||
#define ixPB0_TX_LANE0_OVRD_REG0 0x18444
|
||||
#define ixPB0_TX_LANE0_SCI_STAT_OVRD_REG0 0x18448
|
||||
#define ixPB0_TX_LANE10_CTRL_REG0 0x19500
|
||||
#define ixPB0_TX_LANE10_OVRD_REG0 0x19504
|
||||
#define ixPB0_TX_LANE10_SCI_STAT_OVRD_REG0 0x19508
|
||||
#define ixPB0_TX_LANE11_CTRL_REG0 0x19600
|
||||
#define ixPB0_TX_LANE11_OVRD_REG0 0x19604
|
||||
#define ixPB0_TX_LANE11_SCI_STAT_OVRD_REG0 0x19608
|
||||
#define ixPB0_TX_LANE12_CTRL_REG0 0x19840
|
||||
#define ixPB0_TX_LANE12_OVRD_REG0 0x19844
|
||||
#define ixPB0_TX_LANE12_SCI_STAT_OVRD_REG0 0x19848
|
||||
#define ixPB0_TX_LANE13_CTRL_REG0 0x19880
|
||||
#define ixPB0_TX_LANE13_OVRD_REG0 0x19884
|
||||
#define ixPB0_TX_LANE13_SCI_STAT_OVRD_REG0 0x19888
|
||||
#define ixPB0_TX_LANE14_CTRL_REG0 0x19900
|
||||
#define ixPB0_TX_LANE14_OVRD_REG0 0x19904
|
||||
#define ixPB0_TX_LANE14_SCI_STAT_OVRD_REG0 0x19908
|
||||
#define ixPB0_TX_LANE15_CTRL_REG0 0x19A00
|
||||
#define ixPB0_TX_LANE15_OVRD_REG0 0x19A04
|
||||
#define ixPB0_TX_LANE15_SCI_STAT_OVRD_REG0 0x19A08
|
||||
#define ixPB0_TX_LANE1_CTRL_REG0 0x18480
|
||||
#define ixPB0_TX_LANE1_OVRD_REG0 0x18484
|
||||
#define ixPB0_TX_LANE1_SCI_STAT_OVRD_REG0 0x18488
|
||||
#define ixPB0_TX_LANE2_CTRL_REG0 0x18500
|
||||
#define ixPB0_TX_LANE2_OVRD_REG0 0x18504
|
||||
#define ixPB0_TX_LANE2_SCI_STAT_OVRD_REG0 0x18508
|
||||
#define ixPB0_TX_LANE3_CTRL_REG0 0x18600
|
||||
#define ixPB0_TX_LANE3_OVRD_REG0 0x18604
|
||||
#define ixPB0_TX_LANE3_SCI_STAT_OVRD_REG0 0x18608
|
||||
#define ixPB0_TX_LANE4_CTRL_REG0 0x18840
|
||||
#define ixPB0_TX_LANE4_OVRD_REG0 0x18844
|
||||
#define ixPB0_TX_LANE4_SCI_STAT_OVRD_REG0 0x18848
|
||||
#define ixPB0_TX_LANE5_CTRL_REG0 0x18880
|
||||
#define ixPB0_TX_LANE5_OVRD_REG0 0x18884
|
||||
#define ixPB0_TX_LANE5_SCI_STAT_OVRD_REG0 0x18888
|
||||
#define ixPB0_TX_LANE6_CTRL_REG0 0x18900
|
||||
#define ixPB0_TX_LANE6_OVRD_REG0 0x18904
|
||||
#define ixPB0_TX_LANE6_SCI_STAT_OVRD_REG0 0x18908
|
||||
#define ixPB0_TX_LANE7_CTRL_REG0 0x18A00
|
||||
#define ixPB0_TX_LANE7_OVRD_REG0 0x18A04
|
||||
#define ixPB0_TX_LANE7_SCI_STAT_OVRD_REG0 0x18A08
|
||||
#define ixPB0_TX_LANE8_CTRL_REG0 0x19440
|
||||
#define ixPB0_TX_LANE8_OVRD_REG0 0x19444
|
||||
#define ixPB0_TX_LANE8_SCI_STAT_OVRD_REG0 0x19448
|
||||
#define ixPB0_TX_LANE9_CTRL_REG0 0x19480
|
||||
#define ixPB0_TX_LANE9_OVRD_REG0 0x19484
|
||||
#define ixPB0_TX_LANE9_SCI_STAT_OVRD_REG0 0x19488
|
||||
#define ixPB1_DFT_DEBUG_CTRL_REG0 0x1300C
|
||||
#define ixPB1_DFT_JIT_INJ_REG0 0x13000
|
||||
#define ixPB1_DFT_JIT_INJ_REG1 0x13004
|
||||
#define ixPB1_DFT_JIT_INJ_REG2 0x13008
|
||||
#define ixPB1_GLB_CTRL_REG0 0x10004
|
||||
#define ixPB1_GLB_CTRL_REG1 0x10008
|
||||
#define ixPB1_GLB_CTRL_REG2 0x1000C
|
||||
#define ixPB1_GLB_CTRL_REG3 0x10010
|
||||
#define ixPB1_GLB_CTRL_REG4 0x10014
|
||||
#define ixPB1_GLB_CTRL_REG5 0x10018
|
||||
#define ixPB1_GLB_OVRD_REG0 0x10030
|
||||
#define ixPB1_GLB_OVRD_REG1 0x10034
|
||||
#define ixPB1_GLB_OVRD_REG2 0x10038
|
||||
#define ixPB1_GLB_SCI_STAT_OVRD_REG0 0x1001C
|
||||
#define ixPB1_GLB_SCI_STAT_OVRD_REG1 0x10020
|
||||
#define ixPB1_GLB_SCI_STAT_OVRD_REG2 0x10024
|
||||
#define ixPB1_GLB_SCI_STAT_OVRD_REG3 0x10028
|
||||
#define ixPB1_GLB_SCI_STAT_OVRD_REG4 0x1002C
|
||||
#define ixPB1_HW_DEBUG 0x12004
|
||||
#define ixPB1_PIF_CNTL 0x0010
|
||||
#define ixPB1_PIF_CNTL2 0x0014
|
||||
#define ixPB1_PIF_HW_DEBUG 0x0002
|
||||
#define ixPB1_PIF_PAIRING 0x0011
|
||||
#define ixPB1_PIF_PDNB_OVERRIDE_0 0x0020
|
||||
#define ixPB1_PIF_PDNB_OVERRIDE_10 0x0032
|
||||
#define ixPB1_PIF_PDNB_OVERRIDE_1 0x0021
|
||||
#define ixPB1_PIF_PDNB_OVERRIDE_11 0x0033
|
||||
#define ixPB1_PIF_PDNB_OVERRIDE_12 0x0034
|
||||
#define ixPB1_PIF_PDNB_OVERRIDE_13 0x0035
|
||||
#define ixPB1_PIF_PDNB_OVERRIDE_14 0x0036
|
||||
#define ixPB1_PIF_PDNB_OVERRIDE_15 0x0037
|
||||
#define ixPB1_PIF_PDNB_OVERRIDE_2 0x0022
|
||||
#define ixPB1_PIF_PDNB_OVERRIDE_3 0x0023
|
||||
#define ixPB1_PIF_PDNB_OVERRIDE_4 0x0024
|
||||
#define ixPB1_PIF_PDNB_OVERRIDE_5 0x0025
|
||||
#define ixPB1_PIF_PDNB_OVERRIDE_6 0x0026
|
||||
#define ixPB1_PIF_PDNB_OVERRIDE_7 0x0027
|
||||
#define ixPB1_PIF_PDNB_OVERRIDE_8 0x0030
|
||||
#define ixPB1_PIF_PDNB_OVERRIDE_9 0x0031
|
||||
#define ixPB1_PIF_PWRDOWN_0 0x0012
|
||||
#define ixPB1_PIF_PWRDOWN_1 0x0013
|
||||
#define ixPB1_PIF_PWRDOWN_2 0x0017
|
||||
#define ixPB1_PIF_PWRDOWN_3 0x0018
|
||||
#define ixPB1_PIF_SC_CTL 0x0016
|
||||
#define ixPB1_PIF_SCRATCH 0x0001
|
||||
#define ixPB1_PIF_SEQ_STATUS_0 0x0028
|
||||
#define ixPB1_PIF_SEQ_STATUS_10 0x003A
|
||||
#define ixPB1_PIF_SEQ_STATUS_1 0x0029
|
||||
#define ixPB1_PIF_SEQ_STATUS_11 0x003B
|
||||
#define ixPB1_PIF_SEQ_STATUS_12 0x003C
|
||||
#define ixPB1_PIF_SEQ_STATUS_13 0x003D
|
||||
#define ixPB1_PIF_SEQ_STATUS_14 0x003E
|
||||
#define ixPB1_PIF_SEQ_STATUS_15 0x003F
|
||||
#define ixPB1_PIF_SEQ_STATUS_2 0x002A
|
||||
#define ixPB1_PIF_SEQ_STATUS_3 0x002B
|
||||
#define ixPB1_PIF_SEQ_STATUS_4 0x002C
|
||||
#define ixPB1_PIF_SEQ_STATUS_5 0x002D
|
||||
#define ixPB1_PIF_SEQ_STATUS_6 0x002E
|
||||
#define ixPB1_PIF_SEQ_STATUS_7 0x002F
|
||||
#define ixPB1_PIF_SEQ_STATUS_8 0x0038
|
||||
#define ixPB1_PIF_SEQ_STATUS_9 0x0039
|
||||
#define ixPB1_PIF_TXPHYSTATUS 0x0015
|
||||
#define ixPB1_PLL_LC0_CTRL_REG0 0x14480
|
||||
#define ixPB1_PLL_LC0_OVRD_REG0 0x14490
|
||||
#define ixPB1_PLL_LC0_OVRD_REG1 0x14494
|
||||
#define ixPB1_PLL_LC0_SCI_STAT_OVRD_REG0 0x14500
|
||||
#define ixPB1_PLL_LC1_SCI_STAT_OVRD_REG0 0x14504
|
||||
#define ixPB1_PLL_LC2_SCI_STAT_OVRD_REG0 0x14508
|
||||
#define ixPB1_PLL_LC3_SCI_STAT_OVRD_REG0 0x1450C
|
||||
#define ixPB1_PLL_RO0_CTRL_REG0 0x14440
|
||||
#define ixPB1_PLL_RO0_OVRD_REG0 0x14450
|
||||
#define ixPB1_PLL_RO0_OVRD_REG1 0x14454
|
||||
#define ixPB1_PLL_RO0_SCI_STAT_OVRD_REG0 0x14460
|
||||
#define ixPB1_PLL_RO1_SCI_STAT_OVRD_REG0 0x14464
|
||||
#define ixPB1_PLL_RO2_SCI_STAT_OVRD_REG0 0x14468
|
||||
#define ixPB1_PLL_RO3_SCI_STAT_OVRD_REG0 0x1446C
|
||||
#define ixPB1_PLL_RO_GLB_CTRL_REG0 0x14000
|
||||
#define ixPB1_PLL_RO_GLB_OVRD_REG0 0x14010
|
||||
#define ixPB1_RX_GLB_CTRL_REG0 0x16000
|
||||
#define ixPB1_RX_GLB_CTRL_REG1 0x16004
|
||||
#define ixPB1_RX_GLB_CTRL_REG2 0x16008
|
||||
#define ixPB1_RX_GLB_CTRL_REG3 0x1600C
|
||||
#define ixPB1_RX_GLB_CTRL_REG4 0x16010
|
||||
#define ixPB1_RX_GLB_CTRL_REG5 0x16014
|
||||
#define ixPB1_RX_GLB_CTRL_REG6 0x16018
|
||||
#define ixPB1_RX_GLB_CTRL_REG7 0x1601C
|
||||
#define ixPB1_RX_GLB_CTRL_REG8 0x16020
|
||||
#define ixPB1_RX_GLB_OVRD_REG0 0x16030
|
||||
#define ixPB1_RX_GLB_OVRD_REG1 0x16034
|
||||
#define ixPB1_RX_GLB_SCI_STAT_OVRD_REG0 0x16028
|
||||
#define ixPB1_RX_LANE0_CTRL_REG0 0x16440
|
||||
#define ixPB1_RX_LANE0_SCI_STAT_OVRD_REG0 0x16448
|
||||
#define ixPB1_RX_LANE10_CTRL_REG0 0x17500
|
||||
#define ixPB1_RX_LANE10_SCI_STAT_OVRD_REG0 0x17508
|
||||
#define ixPB1_RX_LANE11_CTRL_REG0 0x17600
|
||||
#define ixPB1_RX_LANE11_SCI_STAT_OVRD_REG0 0x17608
|
||||
#define ixPB1_RX_LANE12_CTRL_REG0 0x17840
|
||||
#define ixPB1_RX_LANE12_SCI_STAT_OVRD_REG0 0x17848
|
||||
#define ixPB1_RX_LANE13_CTRL_REG0 0x17880
|
||||
#define ixPB1_RX_LANE13_SCI_STAT_OVRD_REG0 0x17888
|
||||
#define ixPB1_RX_LANE14_CTRL_REG0 0x17900
|
||||
#define ixPB1_RX_LANE14_SCI_STAT_OVRD_REG0 0x17908
|
||||
#define ixPB1_RX_LANE15_CTRL_REG0 0x17A00
|
||||
#define ixPB1_RX_LANE15_SCI_STAT_OVRD_REG0 0x17A08
|
||||
#define ixPB1_RX_LANE1_CTRL_REG0 0x16480
|
||||
#define ixPB1_RX_LANE1_SCI_STAT_OVRD_REG0 0x16488
|
||||
#define ixPB1_RX_LANE2_CTRL_REG0 0x16500
|
||||
#define ixPB1_RX_LANE2_SCI_STAT_OVRD_REG0 0x16508
|
||||
#define ixPB1_RX_LANE3_CTRL_REG0 0x16600
|
||||
#define ixPB1_RX_LANE3_SCI_STAT_OVRD_REG0 0x16608
|
||||
#define ixPB1_RX_LANE4_CTRL_REG0 0x16800
|
||||
#define ixPB1_RX_LANE4_SCI_STAT_OVRD_REG0 0x16848
|
||||
#define ixPB1_RX_LANE5_CTRL_REG0 0x16880
|
||||
#define ixPB1_RX_LANE5_SCI_STAT_OVRD_REG0 0x16888
|
||||
#define ixPB1_RX_LANE6_CTRL_REG0 0x16900
|
||||
#define ixPB1_RX_LANE6_SCI_STAT_OVRD_REG0 0x16908
|
||||
#define ixPB1_RX_LANE7_CTRL_REG0 0x16A00
|
||||
#define ixPB1_RX_LANE7_SCI_STAT_OVRD_REG0 0x16A08
|
||||
#define ixPB1_RX_LANE8_CTRL_REG0 0x17440
|
||||
#define ixPB1_RX_LANE8_SCI_STAT_OVRD_REG0 0x17448
|
||||
#define ixPB1_RX_LANE9_CTRL_REG0 0x17480
|
||||
#define ixPB1_RX_LANE9_SCI_STAT_OVRD_REG0 0x17488
|
||||
#define ixPB1_STRAP_GLB_REG0 0x12020
|
||||
#define ixPB1_STRAP_PLL_REG0 0x12030
|
||||
#define ixPB1_STRAP_RX_REG0 0x12028
|
||||
#define ixPB1_STRAP_RX_REG1 0x1202C
|
||||
#define ixPB1_STRAP_TX_REG0 0x12024
|
||||
#define ixPB1_TX_GLB_COEFF_ACCEPT_TABLE_REG0 0x18014
|
||||
#define ixPB1_TX_GLB_COEFF_ACCEPT_TABLE_REG1 0x18018
|
||||
#define ixPB1_TX_GLB_COEFF_ACCEPT_TABLE_REG2 0x1801C
|
||||
#define ixPB1_TX_GLB_COEFF_ACCEPT_TABLE_REG3 0x18020
|
||||
#define ixPB1_TX_GLB_CTRL_REG0 0x18000
|
||||
#define ixPB1_TX_GLB_LANE_SKEW_CTRL 0x18004
|
||||
#define ixPB1_TX_GLB_OVRD_REG0 0x18030
|
||||
#define ixPB1_TX_GLB_OVRD_REG1 0x18034
|
||||
#define ixPB1_TX_GLB_OVRD_REG2 0x18038
|
||||
#define ixPB1_TX_GLB_OVRD_REG3 0x1803C
|
||||
#define ixPB1_TX_GLB_OVRD_REG4 0x18040
|
||||
#define ixPB1_TX_GLB_SCI_STAT_OVRD_REG0 0x18010
|
||||
#define ixPB1_TX_LANE0_CTRL_REG0 0x18440
|
||||
#define ixPB1_TX_LANE0_OVRD_REG0 0x18444
|
||||
#define ixPB1_TX_LANE0_SCI_STAT_OVRD_REG0 0x18448
|
||||
#define ixPB1_TX_LANE10_CTRL_REG0 0x19500
|
||||
#define ixPB1_TX_LANE10_OVRD_REG0 0x19504
|
||||
#define ixPB1_TX_LANE10_SCI_STAT_OVRD_REG0 0x19508
|
||||
#define ixPB1_TX_LANE11_CTRL_REG0 0x19600
|
||||
#define ixPB1_TX_LANE11_OVRD_REG0 0x19604
|
||||
#define ixPB1_TX_LANE11_SCI_STAT_OVRD_REG0 0x19608
|
||||
#define ixPB1_TX_LANE12_CTRL_REG0 0x19840
|
||||
#define ixPB1_TX_LANE12_OVRD_REG0 0x19844
|
||||
#define ixPB1_TX_LANE12_SCI_STAT_OVRD_REG0 0x19848
|
||||
#define ixPB1_TX_LANE13_CTRL_REG0 0x19880
|
||||
#define ixPB1_TX_LANE13_OVRD_REG0 0x19884
|
||||
#define ixPB1_TX_LANE13_SCI_STAT_OVRD_REG0 0x19888
|
||||
#define ixPB1_TX_LANE14_CTRL_REG0 0x19900
|
||||
#define ixPB1_TX_LANE14_OVRD_REG0 0x19904
|
||||
#define ixPB1_TX_LANE14_SCI_STAT_OVRD_REG0 0x19908
|
||||
#define ixPB1_TX_LANE15_CTRL_REG0 0x19A00
|
||||
#define ixPB1_TX_LANE15_OVRD_REG0 0x19A04
|
||||
#define ixPB1_TX_LANE15_SCI_STAT_OVRD_REG0 0x19A08
|
||||
#define ixPB1_TX_LANE1_CTRL_REG0 0x18480
|
||||
#define ixPB1_TX_LANE1_OVRD_REG0 0x18484
|
||||
#define ixPB1_TX_LANE1_SCI_STAT_OVRD_REG0 0x18488
|
||||
#define ixPB1_TX_LANE2_CTRL_REG0 0x18500
|
||||
#define ixPB1_TX_LANE2_OVRD_REG0 0x18504
|
||||
#define ixPB1_TX_LANE2_SCI_STAT_OVRD_REG0 0x18508
|
||||
#define ixPB1_TX_LANE3_CTRL_REG0 0x18600
|
||||
#define ixPB1_TX_LANE3_OVRD_REG0 0x18604
|
||||
#define ixPB1_TX_LANE3_SCI_STAT_OVRD_REG0 0x18608
|
||||
#define ixPB1_TX_LANE4_CTRL_REG0 0x18840
|
||||
#define ixPB1_TX_LANE4_OVRD_REG0 0x18844
|
||||
#define ixPB1_TX_LANE4_SCI_STAT_OVRD_REG0 0x18848
|
||||
#define ixPB1_TX_LANE5_CTRL_REG0 0x18880
|
||||
#define ixPB1_TX_LANE5_OVRD_REG0 0x18884
|
||||
#define ixPB1_TX_LANE5_SCI_STAT_OVRD_REG0 0x18888
|
||||
#define ixPB1_TX_LANE6_CTRL_REG0 0x18900
|
||||
#define ixPB1_TX_LANE6_OVRD_REG0 0x18904
|
||||
#define ixPB1_TX_LANE6_SCI_STAT_OVRD_REG0 0x18908
|
||||
#define ixPB1_TX_LANE7_CTRL_REG0 0x18A00
|
||||
#define ixPB1_TX_LANE7_OVRD_REG0 0x18A04
|
||||
#define ixPB1_TX_LANE7_SCI_STAT_OVRD_REG0 0x18A08
|
||||
#define ixPB1_TX_LANE8_CTRL_REG0 0x19440
|
||||
#define ixPB1_TX_LANE8_OVRD_REG0 0x19444
|
||||
#define ixPB1_TX_LANE8_SCI_STAT_OVRD_REG0 0x19448
|
||||
#define ixPB1_TX_LANE9_CTRL_REG0 0x19480
|
||||
#define ixPB1_TX_LANE9_OVRD_REG0 0x19484
|
||||
#define ixPB1_TX_LANE9_SCI_STAT_OVRD_REG0 0x19488
|
||||
#define ixPCIE_BUS_CNTL 0x0021
|
||||
#define ixPCIE_CFG_CNTL 0x003C
|
||||
#define ixPCIE_CI_CNTL 0x0020
|
||||
#define ixPCIE_CNTL 0x0010
|
||||
#define ixPCIE_CNTL2 0x001C
|
||||
#define ixPCIE_CONFIG_CNTL 0x0011
|
||||
#define ixPCIE_DEBUG_CNTL 0x0012
|
||||
#define ixPCIE_ERR_CNTL 0x006A
|
||||
#define ixPCIE_F0_DPA_CAP 0x00E0
|
||||
#define ixPCIE_F0_DPA_CNTL 0x00E5
|
||||
#define ixPCIE_F0_DPA_LATENCY_INDICATOR 0x00E4
|
||||
#define ixPCIE_F0_DPA_SUBSTATE_PWR_ALLOC_0 0x00E7
|
||||
#define ixPCIE_F0_DPA_SUBSTATE_PWR_ALLOC_1 0x00E8
|
||||
#define ixPCIE_F0_DPA_SUBSTATE_PWR_ALLOC_2 0x00E9
|
||||
#define ixPCIE_F0_DPA_SUBSTATE_PWR_ALLOC_3 0x00EA
|
||||
#define ixPCIE_F0_DPA_SUBSTATE_PWR_ALLOC_4 0x00EB
|
||||
#define ixPCIE_F0_DPA_SUBSTATE_PWR_ALLOC_5 0x00EC
|
||||
#define ixPCIE_F0_DPA_SUBSTATE_PWR_ALLOC_6 0x00ED
|
||||
#define ixPCIE_F0_DPA_SUBSTATE_PWR_ALLOC_7 0x00EE
|
||||
#define ixPCIE_FC_CPL 0x0062
|
||||
#define ixPCIE_FC_NP 0x0061
|
||||
#define ixPCIE_FC_P 0x0060
|
||||
#define ixPCIE_HW_DEBUG 0x0002
|
||||
#define ixPCIE_I2C_REG_ADDR_EXPAND 0x003A
|
||||
#define ixPCIE_I2C_REG_DATA 0x003B
|
||||
#define ixPCIE_INT_CNTL 0x001A
|
||||
#define ixPCIE_INT_STATUS 0x001B
|
||||
#define ixPCIE_LC_BEST_EQ_SETTINGS 0x00B9
|
||||
#define ixPCIE_LC_BW_CHANGE_CNTL 0x00B2
|
||||
#define ixPCIE_LC_CDR_CNTL 0x00B3
|
||||
#define ixPCIE_LC_CNTL 0x00A0
|
||||
#define ixPCIE_LC_CNTL2 0x00B1
|
||||
#define ixPCIE_LC_CNTL3 0x00B5
|
||||
#define ixPCIE_LC_CNTL4 0x00B6
|
||||
#define ixPCIE_LC_CNTL5 0x00B7
|
||||
#define ixPCIE_LC_FORCE_COEFF 0x00B8
|
||||
#define ixPCIE_LC_FORCE_EQ_REQ_COEFF 0x00BA
|
||||
#define ixPCIE_LC_LANE_CNTL 0x00B4
|
||||
#define ixPCIE_LC_LINK_WIDTH_CNTL 0x00A2
|
||||
#define ixPCIE_LC_N_FTS_CNTL 0x00A3
|
||||
#define ixPCIE_LC_SPEED_CNTL 0x00A4
|
||||
#define ixPCIE_LC_STATE0 0x00A5
|
||||
#define ixPCIE_LC_STATE10 0x0026
|
||||
#define ixPCIE_LC_STATE1 0x00A6
|
||||
#define ixPCIE_LC_STATE11 0x0027
|
||||
#define ixPCIE_LC_STATE2 0x00A7
|
||||
#define ixPCIE_LC_STATE3 0x00A8
|
||||
#define ixPCIE_LC_STATE4 0x00A9
|
||||
#define ixPCIE_LC_STATE5 0x00AA
|
||||
#define ixPCIE_LC_STATE6 0x0022
|
||||
#define ixPCIE_LC_STATE7 0x0023
|
||||
#define ixPCIE_LC_STATE8 0x0024
|
||||
#define ixPCIE_LC_STATE9 0x0025
|
||||
#define ixPCIE_LC_STATUS1 0x0028
|
||||
#define ixPCIE_LC_STATUS2 0x0029
|
||||
#define ixPCIE_LC_TRAINING_CNTL 0x00A1
|
||||
#define ixPCIE_P_BUF_STATUS 0x0041
|
||||
#define ixPCIE_P_CNTL 0x0040
|
||||
#define ixPCIE_P_DECODER_STATUS 0x0042
|
||||
#define ixPCIE_PERF_CNTL_EVENT0_PORT_SEL 0x0093
|
||||
#define ixPCIE_PERF_CNTL_EVENT1_PORT_SEL 0x0094
|
||||
#define ixPCIE_PERF_CNTL_MST_C_CLK 0x0087
|
||||
#define ixPCIE_PERF_CNTL_MST_R_CLK 0x0084
|
||||
#define ixPCIE_PERF_CNTL_SLV_NS_C_CLK 0x0090
|
||||
#define ixPCIE_PERF_CNTL_SLV_R_CLK 0x008A
|
||||
#define ixPCIE_PERF_CNTL_SLV_S_C_CLK 0x008D
|
||||
#define ixPCIE_PERF_CNTL_TXCLK 0x0081
|
||||
#define ixPCIE_PERF_CNTL_TXCLK2 0x0095
|
||||
#define ixPCIE_PERF_COUNT0_MST_C_CLK 0x0088
|
||||
#define ixPCIE_PERF_COUNT0_MST_R_CLK 0x0085
|
||||
#define ixPCIE_PERF_COUNT0_SLV_NS_C_CLK 0x0091
|
||||
#define ixPCIE_PERF_COUNT0_SLV_R_CLK 0x008B
|
||||
#define ixPCIE_PERF_COUNT0_SLV_S_C_CLK 0x008E
|
||||
#define ixPCIE_PERF_COUNT0_TXCLK 0x0082
|
||||
#define ixPCIE_PERF_COUNT0_TXCLK2 0x0096
|
||||
#define ixPCIE_PERF_COUNT1_MST_C_CLK 0x0089
|
||||
#define ixPCIE_PERF_COUNT1_MST_R_CLK 0x0086
|
||||
#define ixPCIE_PERF_COUNT1_SLV_NS_C_CLK 0x0092
|
||||
#define ixPCIE_PERF_COUNT1_SLV_R_CLK 0x008C
|
||||
#define ixPCIE_PERF_COUNT1_SLV_S_C_CLK 0x008F
|
||||
#define ixPCIE_PERF_COUNT1_TXCLK 0x0083
|
||||
#define ixPCIE_PERF_COUNT1_TXCLK2 0x0097
|
||||
#define ixPCIE_PERF_COUNT_CNTL 0x0080
|
||||
#define ixPCIEP_HW_DEBUG 0x0002
|
||||
#define ixPCIE_P_MISC_STATUS 0x0043
|
||||
#define ixPCIEP_PORT_CNTL 0x0010
|
||||
#define ixPCIE_P_PORT_LANE_STATUS 0x0050
|
||||
#define ixPCIE_PRBS_CLR 0x00C8
|
||||
#define ixPCIE_PRBS_ERRCNT_0 0x00D0
|
||||
#define ixPCIE_PRBS_ERRCNT_10 0x00DA
|
||||
#define ixPCIE_PRBS_ERRCNT_1 0x00D1
|
||||
#define ixPCIE_PRBS_ERRCNT_11 0x00DB
|
||||
#define ixPCIE_PRBS_ERRCNT_12 0x00DC
|
||||
#define ixPCIE_PRBS_ERRCNT_13 0x00DD
|
||||
#define ixPCIE_PRBS_ERRCNT_14 0x00DE
|
||||
#define ixPCIE_PRBS_ERRCNT_15 0x00DF
|
||||
#define ixPCIE_PRBS_ERRCNT_2 0x00D2
|
||||
#define ixPCIE_PRBS_ERRCNT_3 0x00D3
|
||||
#define ixPCIE_PRBS_ERRCNT_4 0x00D4
|
||||
#define ixPCIE_PRBS_ERRCNT_5 0x00D5
|
||||
#define ixPCIE_PRBS_ERRCNT_6 0x00D6
|
||||
#define ixPCIE_PRBS_ERRCNT_7 0x00D7
|
||||
#define ixPCIE_PRBS_ERRCNT_8 0x00D8
|
||||
#define ixPCIE_PRBS_ERRCNT_9 0x00D9
|
||||
#define ixPCIE_PRBS_FREERUN 0x00CB
|
||||
#define ixPCIE_PRBS_HI_BITCNT 0x00CF
|
||||
#define ixPCIE_PRBS_LO_BITCNT 0x00CE
|
||||
#define ixPCIE_PRBS_MISC 0x00CC
|
||||
#define ixPCIE_PRBS_STATUS1 0x00C9
|
||||
#define ixPCIE_PRBS_STATUS2 0x00CA
|
||||
#define ixPCIE_PRBS_USER_PATTERN 0x00CD
|
||||
#define ixPCIE_P_RCV_L0S_FTS_DET 0x0050
|
||||
#define ixPCIEP_RESERVED 0x0000
|
||||
#define ixPCIEP_SCRATCH 0x0001
|
||||
#define ixPCIEP_STRAP_LC 0x00C0
|
||||
#define ixPCIEP_STRAP_MISC 0x00C1
|
||||
#define ixPCIE_RESERVED 0x0000
|
||||
#define ixPCIE_RX_CNTL 0x0070
|
||||
#define ixPCIE_RX_CNTL2 0x001D
|
||||
#define ixPCIE_RX_CNTL3 0x0074
|
||||
#define ixPCIE_RX_CREDITS_ALLOCATED_CPL 0x0082
|
||||
#define ixPCIE_RX_CREDITS_ALLOCATED_NP 0x0081
|
||||
#define ixPCIE_RX_CREDITS_ALLOCATED_P 0x0080
|
||||
#define ixPCIE_RX_EXPECTED_SEQNUM 0x0071
|
||||
#define ixPCIE_RX_LAST_TLP0 0x0031
|
||||
#define ixPCIE_RX_LAST_TLP1 0x0032
|
||||
#define ixPCIE_RX_LAST_TLP2 0x0033
|
||||
#define ixPCIE_RX_LAST_TLP3 0x0034
|
||||
#define ixPCIE_RX_NUM_NAK 0x000E
|
||||
#define ixPCIE_RX_NUM_NAK_GENERATED 0x000F
|
||||
#define ixPCIE_RX_VENDOR_SPECIFIC 0x0072
|
||||
#define ixPCIE_SCRATCH 0x0001
|
||||
#define ixPCIE_STRAP_F0 0x00B0
|
||||
#define ixPCIE_STRAP_F1 0x00B1
|
||||
#define ixPCIE_STRAP_F2 0x00B2
|
||||
#define ixPCIE_STRAP_F3 0x00B3
|
||||
#define ixPCIE_STRAP_F4 0x00B4
|
||||
#define ixPCIE_STRAP_F5 0x00B5
|
||||
#define ixPCIE_STRAP_F6 0x00B6
|
||||
#define ixPCIE_STRAP_F7 0x00B7
|
||||
#define ixPCIE_STRAP_I2C_BD 0x00C4
|
||||
#define ixPCIE_STRAP_MISC 0x00C0
|
||||
#define ixPCIE_STRAP_MISC2 0x00C1
|
||||
#define ixPCIE_STRAP_PI 0x00C2
|
||||
#define ixPCIE_TX_ACK_LATENCY_LIMIT 0x0026
|
||||
#define ixPCIE_TX_CNTL 0x0020
|
||||
#define ixPCIE_TX_CREDITS_ADVT_CPL 0x0032
|
||||
#define ixPCIE_TX_CREDITS_ADVT_NP 0x0031
|
||||
#define ixPCIE_TX_CREDITS_ADVT_P 0x0030
|
||||
#define ixPCIE_TX_CREDITS_FCU_THRESHOLD 0x0037
|
||||
#define ixPCIE_TX_CREDITS_INIT_CPL 0x0035
|
||||
#define ixPCIE_TX_CREDITS_INIT_NP 0x0034
|
||||
#define ixPCIE_TX_CREDITS_INIT_P 0x0033
|
||||
#define ixPCIE_TX_CREDITS_STATUS 0x0036
|
||||
#define ixPCIE_TX_LAST_TLP0 0x0035
|
||||
#define ixPCIE_TX_LAST_TLP1 0x0036
|
||||
#define ixPCIE_TX_LAST_TLP2 0x0037
|
||||
#define ixPCIE_TX_LAST_TLP3 0x0038
|
||||
#define ixPCIE_TX_REPLAY 0x0025
|
||||
#define ixPCIE_TX_REQUESTER_ID 0x0021
|
||||
#define ixPCIE_TX_REQUEST_NUM_CNTL 0x0023
|
||||
#define ixPCIE_TX_SEQ 0x0024
|
||||
#define ixPCIE_TX_VENDOR_SPECIFIC 0x0022
|
||||
#define ixPCIE_WPR_CNTL 0x0030
|
||||
#define mmBACO_CNTL 0x14E5
|
||||
#define mmBF_ANA_ISO_CNTL 0x14C7
|
||||
#define mmBIF_BACO_DEBUG 0x14DF
|
||||
#define mmBIF_BACO_DEBUG_LATCH 0x14DC
|
||||
#define mmBIF_BACO_MSIC 0x14DE
|
||||
#define mmBIF_BUSNUM_CNTL1 0x1525
|
||||
#define mmBIF_BUSNUM_CNTL2 0x152B
|
||||
#define mmBIF_BUSNUM_LIST0 0x1526
|
||||
#define mmBIF_BUSNUM_LIST1 0x1527
|
||||
#define mmBIF_BUSY_DELAY_CNTR 0x1529
|
||||
#define mmBIF_CLK_PDWN_DELAY_TIMER 0x151F
|
||||
#define mmBIF_DEBUG_CNTL 0x151C
|
||||
#define mmBIF_DEBUG_MUX 0x151D
|
||||
#define mmBIF_DEBUG_OUT 0x151E
|
||||
#define mmBIF_DEVFUNCNUM_LIST0 0x14E8
|
||||
#define mmBIF_DEVFUNCNUM_LIST1 0x14E7
|
||||
#define mmBIF_FB_EN 0x1524
|
||||
#define mmBIF_FEATURES_CONTROL_MISC 0x14C2
|
||||
#define mmBIF_PERFCOUNTER0_RESULT 0x152D
|
||||
#define mmBIF_PERFCOUNTER1_RESULT 0x152E
|
||||
#define mmBIF_PERFMON_CNTL 0x152C
|
||||
#define mmBIF_PIF_TXCLK_SWITCH_TIMER 0x152F
|
||||
#define mmBIF_RESET_EN 0x1511
|
||||
#define mmBIF_SCRATCH0 0x150E
|
||||
#define mmBIF_SCRATCH1 0x150F
|
||||
#define mmBIF_SSA_DISP_LOWER 0x14D2
|
||||
#define mmBIF_SSA_DISP_UPPER 0x14D3
|
||||
#define mmBIF_SSA_GFX0_LOWER 0x14CA
|
||||
#define mmBIF_SSA_GFX0_UPPER 0x14CB
|
||||
#define mmBIF_SSA_GFX1_LOWER 0x14CC
|
||||
#define mmBIF_SSA_GFX1_UPPER 0x14CD
|
||||
#define mmBIF_SSA_GFX2_LOWER 0x14CE
|
||||
#define mmBIF_SSA_GFX2_UPPER 0x14CF
|
||||
#define mmBIF_SSA_GFX3_LOWER 0x14D0
|
||||
#define mmBIF_SSA_GFX3_UPPER 0x14D1
|
||||
#define mmBIF_SSA_MC_LOWER 0x14D4
|
||||
#define mmBIF_SSA_MC_UPPER 0x14D5
|
||||
#define mmBIF_SSA_PWR_STATUS 0x14C8
|
||||
#define mmBIF_XDMA_HI 0x14C1
|
||||
#define mmBIF_XDMA_LO 0x14C0
|
||||
#define mmBIOS_SCRATCH_0 0x05C9
|
||||
#define mmBIOS_SCRATCH_10 0x05D3
|
||||
#define mmBIOS_SCRATCH_1 0x05CA
|
||||
#define mmBIOS_SCRATCH_11 0x05D4
|
||||
#define mmBIOS_SCRATCH_12 0x05D5
|
||||
#define mmBIOS_SCRATCH_13 0x05D6
|
||||
#define mmBIOS_SCRATCH_14 0x05D7
|
||||
#define mmBIOS_SCRATCH_15 0x05D8
|
||||
#define mmBIOS_SCRATCH_2 0x05CB
|
||||
#define mmBIOS_SCRATCH_3 0x05CC
|
||||
#define mmBIOS_SCRATCH_4 0x05CD
|
||||
#define mmBIOS_SCRATCH_5 0x05CE
|
||||
#define mmBIOS_SCRATCH_6 0x05CF
|
||||
#define mmBIOS_SCRATCH_7 0x05D0
|
||||
#define mmBIOS_SCRATCH_8 0x05D1
|
||||
#define mmBIOS_SCRATCH_9 0x05D2
|
||||
#define mmBUS_CNTL 0x1508
|
||||
#define mmCAPTURE_HOST_BUSNUM 0x153C
|
||||
#define mmCLKREQB_PAD_CNTL 0x1521
|
||||
#define mmCONFIG_APER_SIZE 0x150C
|
||||
#define mmCONFIG_CNTL 0x1509
|
||||
#define mmCONFIG_F0_BASE 0x150B
|
||||
#define mmCONFIG_MEMSIZE 0x150A
|
||||
#define mmCONFIG_REG_APER_SIZE 0x150D
|
||||
#define mmHDP_MEM_COHERENCY_FLUSH_CNTL 0x1520
|
||||
#define mmHDP_REG_COHERENCY_FLUSH_CNTL 0x1528
|
||||
#define mmHOST_BUSNUM 0x153D
|
||||
#define mmHW_DEBUG 0x1515
|
||||
#define mmIMPCTL_RESET 0x14F5
|
||||
#define mmINTERRUPT_CNTL 0x151A
|
||||
#define mmINTERRUPT_CNTL2 0x151B
|
||||
#define mmMASTER_CREDIT_CNTL 0x1516
|
||||
#define mmMM_CFGREGS_CNTL 0x1513
|
||||
#define mmMM_DATA 0x0001
|
||||
#define mmMM_INDEX 0x0000
|
||||
#define mmMM_INDEX_HI 0x0006
|
||||
#define mmNEW_REFCLKB_TIMER 0x14EA
|
||||
#define mmNEW_REFCLKB_TIMER_1 0x14E9
|
||||
#define mmPCIE_DATA 0x000D
|
||||
#define mmPCIE_INDEX 0x000C
|
||||
#define mmPEER0_FB_OFFSET_HI 0x14F3
|
||||
#define mmPEER0_FB_OFFSET_LO 0x14F2
|
||||
#define mmPEER1_FB_OFFSET_HI 0x14F1
|
||||
#define mmPEER1_FB_OFFSET_LO 0x14F0
|
||||
#define mmPEER2_FB_OFFSET_HI 0x14EF
|
||||
#define mmPEER2_FB_OFFSET_LO 0x14EE
|
||||
#define mmPEER3_FB_OFFSET_HI 0x14ED
|
||||
#define mmPEER3_FB_OFFSET_LO 0x14EC
|
||||
#define mmPEER_REG_RANGE0 0x153E
|
||||
#define mmPEER_REG_RANGE1 0x153F
|
||||
#define mmSLAVE_HANG_ERROR 0x153B
|
||||
#define mmSLAVE_HANG_PROTECTION_CNTL 0x1536
|
||||
#define mmSLAVE_REQ_CREDIT_CNTL 0x1517
|
||||
#define mmSMBCLK_PAD_CNTL 0x1523
|
||||
#define mmSMBDAT_PAD_CNTL 0x1522
|
||||
#define mmSMBUS_BACO_DUMMY 0x14C6
|
||||
|
||||
#endif
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,275 @@
|
|||
/*
|
||||
*
|
||||
* Copyright (C) 2016 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included
|
||||
* in all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
||||
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
|
||||
* AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef OSS_1_0_D_H
|
||||
#define OSS_1_0_D_H
|
||||
|
||||
#define ixCLIENT0_BM 0x0220
|
||||
#define ixCLIENT0_CD0 0x0210
|
||||
#define ixCLIENT0_CD1 0x0214
|
||||
#define ixCLIENT0_CD2 0x0218
|
||||
#define ixCLIENT0_CD3 0x021C
|
||||
#define ixCLIENT0_CK0 0x0200
|
||||
#define ixCLIENT0_CK1 0x0204
|
||||
#define ixCLIENT0_CK2 0x0208
|
||||
#define ixCLIENT0_CK3 0x020C
|
||||
#define ixCLIENT0_K0 0x01F0
|
||||
#define ixCLIENT0_K1 0x01F4
|
||||
#define ixCLIENT0_K2 0x01F8
|
||||
#define ixCLIENT0_K3 0x01FC
|
||||
#define ixCLIENT0_OFFSET 0x0224
|
||||
#define ixCLIENT0_OFFSET_HI 0x0290
|
||||
#define ixCLIENT0_STATUS 0x0228
|
||||
#define ixCLIENT1_BM 0x025C
|
||||
#define ixCLIENT1_CD0 0x024C
|
||||
#define ixCLIENT1_CD1 0x0250
|
||||
#define ixCLIENT1_CD2 0x0254
|
||||
#define ixCLIENT1_CD3 0x0258
|
||||
#define ixCLIENT1_CK0 0x023C
|
||||
#define ixCLIENT1_CK1 0x0240
|
||||
#define ixCLIENT1_CK2 0x0244
|
||||
#define ixCLIENT1_CK3 0x0248
|
||||
#define ixCLIENT1_K0 0x022C
|
||||
#define ixCLIENT1_K1 0x0230
|
||||
#define ixCLIENT1_K2 0x0234
|
||||
#define ixCLIENT1_K3 0x0238
|
||||
#define ixCLIENT1_OFFSET 0x0260
|
||||
#define ixCLIENT1_OFFSET_HI 0x0294
|
||||
#define ixCLIENT1_PORT_STATUS 0x0264
|
||||
#define ixCLIENT2_BM 0x01E4
|
||||
#define ixCLIENT2_CD0 0x01D4
|
||||
#define ixCLIENT2_CD1 0x01D8
|
||||
#define ixCLIENT2_CD2 0x01DC
|
||||
#define ixCLIENT2_CD3 0x01E0
|
||||
#define ixCLIENT2_CK0 0x01C4
|
||||
#define ixCLIENT2_CK1 0x01C8
|
||||
#define ixCLIENT2_CK2 0x01CC
|
||||
#define ixCLIENT2_CK3 0x01D0
|
||||
#define ixCLIENT2_K0 0x01B4
|
||||
#define ixCLIENT2_K1 0x01B8
|
||||
#define ixCLIENT2_K2 0x01BC
|
||||
#define ixCLIENT2_K3 0x01C0
|
||||
#define ixCLIENT2_OFFSET 0x01E8
|
||||
#define ixCLIENT2_OFFSET_HI 0x0298
|
||||
#define ixCLIENT2_STATUS 0x01EC
|
||||
#define ixCLIENT3_BM 0x02D4
|
||||
#define ixCLIENT3_CD0 0x02C4
|
||||
#define ixCLIENT3_CD1 0x02C8
|
||||
#define ixCLIENT3_CD2 0x02CC
|
||||
#define ixCLIENT3_CD3 0x02D0
|
||||
#define ixCLIENT3_CK0 0x02B4
|
||||
#define ixCLIENT3_CK1 0x02B8
|
||||
#define ixCLIENT3_CK2 0x02BC
|
||||
#define ixCLIENT3_CK3 0x02C0
|
||||
#define ixCLIENT3_K0 0x02A4
|
||||
#define ixCLIENT3_K1 0x02A8
|
||||
#define ixCLIENT3_K2 0x02AC
|
||||
#define ixCLIENT3_K3 0x02B0
|
||||
#define ixCLIENT3_OFFSET 0x02D8
|
||||
#define ixCLIENT3_OFFSET_HI 0x02A0
|
||||
#define ixCLIENT3_STATUS 0x02DC
|
||||
#define ixDH_TEST 0x0000
|
||||
#define ixEXP0 0x0034
|
||||
#define ixEXP1 0x0038
|
||||
#define ixEXP2 0x003C
|
||||
#define ixEXP3 0x0040
|
||||
#define ixEXP4 0x0044
|
||||
#define ixEXP5 0x0048
|
||||
#define ixEXP6 0x004C
|
||||
#define ixEXP7 0x0050
|
||||
#define ixHFS_SEED0 0x0278
|
||||
#define ixHFS_SEED1 0x027C
|
||||
#define ixHFS_SEED2 0x0280
|
||||
#define ixHFS_SEED3 0x0284
|
||||
#define ixKEFUSE0 0x0268
|
||||
#define ixKEFUSE1 0x026C
|
||||
#define ixKEFUSE2 0x0270
|
||||
#define ixKEFUSE3 0x0274
|
||||
#define ixKHFS0 0x0004
|
||||
#define ixKHFS1 0x0008
|
||||
#define ixKHFS2 0x000C
|
||||
#define ixKHFS3 0x0010
|
||||
#define ixKSESSION0 0x0014
|
||||
#define ixKSESSION1 0x0018
|
||||
#define ixKSESSION2 0x001C
|
||||
#define ixKSESSION3 0x0020
|
||||
#define ixKSIG0 0x0024
|
||||
#define ixKSIG1 0x0028
|
||||
#define ixKSIG2 0x002C
|
||||
#define ixKSIG3 0x0030
|
||||
#define ixLX0 0x0054
|
||||
#define ixLX1 0x0058
|
||||
#define ixLX2 0x005C
|
||||
#define ixLX3 0x0060
|
||||
#define ixRINGOSC_MASK 0x0288
|
||||
#define ixSPU_PORT_STATUS 0x029C
|
||||
#define mmCC_DRM_ID_STRAPS 0x1559
|
||||
#define mmCC_SYS_RB_BACKEND_DISABLE 0x03A0
|
||||
#define mmCC_SYS_RB_REDUNDANCY 0x039F
|
||||
#define mmCGTT_DRM_CLK_CTRL0 0x1579
|
||||
#define mmCP_CONFIG 0x0F92
|
||||
#define mmDC_TEST_DEBUG_DATA 0x157D
|
||||
#define mmDC_TEST_DEBUG_INDEX 0x157C
|
||||
#define mmGC_USER_SYS_RB_BACKEND_DISABLE 0x03A1
|
||||
#define mmHDP_ADDR_CONFIG 0x0BD2
|
||||
#define mmHDP_DEBUG0 0x0BCC
|
||||
#define mmHDP_DEBUG1 0x0BCD
|
||||
#define mmHDP_HOST_PATH_CNTL 0x0B00
|
||||
#define mmHDP_LAST_SURFACE_HIT 0x0BCE
|
||||
#define mmHDP_MEMIO_ADDR 0x0BF7
|
||||
#define mmHDP_MEMIO_CNTL 0x0BF6
|
||||
#define mmHDP_MEMIO_RD_DATA 0x0BFA
|
||||
#define mmHDP_MEMIO_STATUS 0x0BF8
|
||||
#define mmHDP_MEMIO_WR_DATA 0x0BF9
|
||||
#define mmHDP_MEM_POWER_LS 0x0BD4
|
||||
#define mmHDP_MISC_CNTL 0x0BD3
|
||||
#define mmHDP_NONSURFACE_BASE 0x0B01
|
||||
#define mmHDP_NONSURFACE_INFO 0x0B02
|
||||
#define mmHDP_NONSURFACE_PREFETCH 0x0BD5
|
||||
#define mmHDP_NONSURFACE_SIZE 0x0B03
|
||||
#define mmHDP_NONSURF_FLAGS 0x0BC9
|
||||
#define mmHDP_NONSURF_FLAGS_CLR 0x0BCA
|
||||
#define mmHDP_OUTSTANDING_REQ 0x0BD1
|
||||
#define mmHDP_SC_MULTI_CHIP_CNTL 0x0BD0
|
||||
#define mmHDP_SW_SEMAPHORE 0x0BCB
|
||||
#define mmHDP_TILING_CONFIG 0x0BCF
|
||||
#define mmHDP_XDP_BARS_ADDR_39_36 0x0C44
|
||||
#define mmHDP_XDP_BUSY_STS 0x0C3E
|
||||
#define mmHDP_XDP_CGTT_BLK_CTRL 0x0C33
|
||||
#define mmHDP_XDP_CHKN 0x0C40
|
||||
#define mmHDP_XDP_D2H_BAR_UPDATE 0x0C02
|
||||
#define mmHDP_XDP_D2H_FLUSH 0x0C01
|
||||
#define mmHDP_XDP_D2H_RSVD_10 0x0C0A
|
||||
#define mmHDP_XDP_D2H_RSVD_11 0x0C0B
|
||||
#define mmHDP_XDP_D2H_RSVD_12 0x0C0C
|
||||
#define mmHDP_XDP_D2H_RSVD_13 0x0C0D
|
||||
#define mmHDP_XDP_D2H_RSVD_14 0x0C0E
|
||||
#define mmHDP_XDP_D2H_RSVD_15 0x0C0F
|
||||
#define mmHDP_XDP_D2H_RSVD_16 0x0C10
|
||||
#define mmHDP_XDP_D2H_RSVD_17 0x0C11
|
||||
#define mmHDP_XDP_D2H_RSVD_18 0x0C12
|
||||
#define mmHDP_XDP_D2H_RSVD_19 0x0C13
|
||||
#define mmHDP_XDP_D2H_RSVD_20 0x0C14
|
||||
#define mmHDP_XDP_D2H_RSVD_21 0x0C15
|
||||
#define mmHDP_XDP_D2H_RSVD_22 0x0C16
|
||||
#define mmHDP_XDP_D2H_RSVD_23 0x0C17
|
||||
#define mmHDP_XDP_D2H_RSVD_24 0x0C18
|
||||
#define mmHDP_XDP_D2H_RSVD_25 0x0C19
|
||||
#define mmHDP_XDP_D2H_RSVD_26 0x0C1A
|
||||
#define mmHDP_XDP_D2H_RSVD_27 0x0C1B
|
||||
#define mmHDP_XDP_D2H_RSVD_28 0x0C1C
|
||||
#define mmHDP_XDP_D2H_RSVD_29 0x0C1D
|
||||
#define mmHDP_XDP_D2H_RSVD_30 0x0C1E
|
||||
#define mmHDP_XDP_D2H_RSVD_3 0x0C03
|
||||
#define mmHDP_XDP_D2H_RSVD_31 0x0C1F
|
||||
#define mmHDP_XDP_D2H_RSVD_32 0x0C20
|
||||
#define mmHDP_XDP_D2H_RSVD_33 0x0C21
|
||||
#define mmHDP_XDP_D2H_RSVD_34 0x0C22
|
||||
#define mmHDP_XDP_D2H_RSVD_4 0x0C04
|
||||
#define mmHDP_XDP_D2H_RSVD_5 0x0C05
|
||||
#define mmHDP_XDP_D2H_RSVD_6 0x0C06
|
||||
#define mmHDP_XDP_D2H_RSVD_7 0x0C07
|
||||
#define mmHDP_XDP_D2H_RSVD_8 0x0C08
|
||||
#define mmHDP_XDP_D2H_RSVD_9 0x0C09
|
||||
#define mmHDP_XDP_DBG_ADDR 0x0C41
|
||||
#define mmHDP_XDP_DBG_DATA 0x0C42
|
||||
#define mmHDP_XDP_DBG_MASK 0x0C43
|
||||
#define mmHDP_XDP_DIRECT2HDP_FIRST 0x0C00
|
||||
#define mmHDP_XDP_DIRECT2HDP_LAST 0x0C23
|
||||
#define mmHDP_XDP_FLUSH_ARMED_STS 0x0C3C
|
||||
#define mmHDP_XDP_FLUSH_CNTR0_STS 0x0C3D
|
||||
#define mmHDP_XDP_HDP_IPH_CFG 0x0C31
|
||||
#define mmHDP_XDP_HDP_MBX_MC_CFG 0x0C2D
|
||||
#define mmHDP_XDP_HDP_MC_CFG 0x0C2E
|
||||
#define mmHDP_XDP_HST_CFG 0x0C2F
|
||||
#define mmHDP_XDP_P2P_BAR0 0x0C34
|
||||
#define mmHDP_XDP_P2P_BAR1 0x0C35
|
||||
#define mmHDP_XDP_P2P_BAR2 0x0C36
|
||||
#define mmHDP_XDP_P2P_BAR3 0x0C37
|
||||
#define mmHDP_XDP_P2P_BAR4 0x0C38
|
||||
#define mmHDP_XDP_P2P_BAR5 0x0C39
|
||||
#define mmHDP_XDP_P2P_BAR6 0x0C3A
|
||||
#define mmHDP_XDP_P2P_BAR7 0x0C3B
|
||||
#define mmHDP_XDP_P2P_BAR_CFG 0x0C24
|
||||
#define mmHDP_XDP_P2P_MBX_ADDR0 0x0C26
|
||||
#define mmHDP_XDP_P2P_MBX_ADDR1 0x0C27
|
||||
#define mmHDP_XDP_P2P_MBX_ADDR2 0x0C28
|
||||
#define mmHDP_XDP_P2P_MBX_ADDR3 0x0C29
|
||||
#define mmHDP_XDP_P2P_MBX_ADDR4 0x0C2A
|
||||
#define mmHDP_XDP_P2P_MBX_ADDR5 0x0C2B
|
||||
#define mmHDP_XDP_P2P_MBX_ADDR6 0x0C2C
|
||||
#define mmHDP_XDP_P2P_MBX_OFFSET 0x0C25
|
||||
#define mmHDP_XDP_SID_CFG 0x0C30
|
||||
#define mmHDP_XDP_SRBM_CFG 0x0C32
|
||||
#define mmHDP_XDP_STICKY 0x0C3F
|
||||
#define mmIH_ADVFAULT_CNTL 0x0F8C
|
||||
#define mmIH_CNTL 0x0F86
|
||||
#define mmIH_LEVEL_STATUS 0x0F87
|
||||
#define mmIH_PERFCOUNTER0_RESULT 0x0F8A
|
||||
#define mmIH_PERFCOUNTER1_RESULT 0x0F8B
|
||||
#define mmIH_PERFMON_CNTL 0x0F89
|
||||
#define mmIH_RB_BASE 0x0F81
|
||||
#define mmIH_RB_CNTL 0x0F80
|
||||
#define mmIH_RB_RPTR 0x0F82
|
||||
#define mmIH_RB_WPTR 0x0F83
|
||||
#define mmIH_RB_WPTR_ADDR_HI 0x0F84
|
||||
#define mmIH_RB_WPTR_ADDR_LO 0x0F85
|
||||
#define mmIH_STATUS 0x0F88
|
||||
#define mmSEM_MAILBOX 0x0F9B
|
||||
#define mmSEM_MAILBOX_CLIENTCONFIG 0x0F9A
|
||||
#define mmSEM_MAILBOX_CONTROL 0x0F9C
|
||||
#define mmSEM_MCIF_CONFIG 0x0F90
|
||||
#define mmSRBM_CAM_DATA 0x0397
|
||||
#define mmSRBM_CAM_INDEX 0x0396
|
||||
#define mmSRBM_CHIP_REVISION 0x039B
|
||||
#define mmSRBM_CNTL 0x0390
|
||||
#define mmSRBM_DEBUG 0x03A4
|
||||
#define mmSRBM_DEBUG_CNTL 0x0399
|
||||
#define mmSRBM_DEBUG_DATA 0x039A
|
||||
#define mmSRBM_DEBUG_SNAPSHOT 0x03A5
|
||||
#define mmSRBM_GFX_CNTL 0x0391
|
||||
#define mmSRBM_INT_ACK 0x03AA
|
||||
#define mmSRBM_INT_CNTL 0x03A8
|
||||
#define mmSRBM_INT_STATUS 0x03A9
|
||||
#define mmSRBM_MC_CLKEN_CNTL 0x03B3
|
||||
#define mmSRBM_PERFCOUNTER0_HI 0x0704
|
||||
#define mmSRBM_PERFCOUNTER0_LO 0x0703
|
||||
#define mmSRBM_PERFCOUNTER0_SELECT 0x0701
|
||||
#define mmSRBM_PERFCOUNTER1_HI 0x0706
|
||||
#define mmSRBM_PERFCOUNTER1_LO 0x0705
|
||||
#define mmSRBM_PERFCOUNTER1_SELECT 0x0702
|
||||
#define mmSRBM_PERFMON_CNTL 0x0700
|
||||
#define mmSRBM_READ_ERROR 0x03A6
|
||||
#define mmSRBM_SOFT_RESET 0x0398
|
||||
#define mmSRBM_STATUS 0x0394
|
||||
#define mmSRBM_STATUS2 0x0393
|
||||
#define mmSRBM_SYS_CLKEN_CNTL 0x03B4
|
||||
#define mmSRBM_UVD_CLKEN_CNTL 0x03B6
|
||||
#define mmSRBM_VCE_CLKEN_CNTL 0x03B5
|
||||
#define mmUVD_CONFIG 0x0F98
|
||||
#define mmVCE_CONFIG 0x0F94
|
||||
#define mmXDMA_MSTR_MEM_OVERFLOW_CNTL 0x03F8
|
||||
|
||||
/* from the old sid.h */
|
||||
#define mmDMA_TILING_CONFIG 0x342E
|
||||
|
||||
#endif
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,148 @@
|
|||
/*
|
||||
*
|
||||
* Copyright (C) 2016 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included
|
||||
* in all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
||||
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
|
||||
* AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef SMU_6_0_D_H
|
||||
#define SMU_6_0_D_H
|
||||
|
||||
#define ixLCAC_MC0_CNTL 0x011C
|
||||
#define ixLCAC_MC0_OVR_SEL 0x011D
|
||||
#define ixLCAC_MC0_OVR_VAL 0x011E
|
||||
#define ixLCAC_MC1_CNTL 0x011F
|
||||
#define ixLCAC_MC1_OVR_SEL 0x0120
|
||||
#define ixLCAC_MC1_OVR_VAL 0x0121
|
||||
#define ixLCAC_MC2_CNTL 0x0122
|
||||
#define ixLCAC_MC2_OVR_SEL 0x0123
|
||||
#define ixLCAC_MC2_OVR_VAL 0x0124
|
||||
#define ixLCAC_MC3_CNTL 0x0125
|
||||
#define ixLCAC_MC3_OVR_SEL 0x0126
|
||||
#define ixLCAC_MC3_OVR_VAL 0x0127
|
||||
#define ixLCAC_MC4_CNTL 0x0128
|
||||
#define ixLCAC_MC4_OVR_SEL 0x0129
|
||||
#define ixLCAC_MC4_OVR_VAL 0x012A
|
||||
#define ixLCAC_MC5_CNTL 0x012B
|
||||
#define ixLCAC_MC5_OVR_SEL 0x012C
|
||||
#define ixLCAC_MC5_OVR_VAL 0x012D
|
||||
#define ixSMC_PC_C 0x80000370
|
||||
#define ixTHM_TMON0_DEBUG 0x03F0
|
||||
#define ixTHM_TMON0_INT_DATA 0x0380
|
||||
#define ixTHM_TMON0_RDIL0_DATA 0x0300
|
||||
#define ixTHM_TMON0_RDIL10_DATA 0x030A
|
||||
#define ixTHM_TMON0_RDIL11_DATA 0x030B
|
||||
#define ixTHM_TMON0_RDIL12_DATA 0x030C
|
||||
#define ixTHM_TMON0_RDIL13_DATA 0x030D
|
||||
#define ixTHM_TMON0_RDIL14_DATA 0x030E
|
||||
#define ixTHM_TMON0_RDIL15_DATA 0x030F
|
||||
#define ixTHM_TMON0_RDIL1_DATA 0x0301
|
||||
#define ixTHM_TMON0_RDIL2_DATA 0x0302
|
||||
#define ixTHM_TMON0_RDIL3_DATA 0x0303
|
||||
#define ixTHM_TMON0_RDIL4_DATA 0x0304
|
||||
#define ixTHM_TMON0_RDIL5_DATA 0x0305
|
||||
#define ixTHM_TMON0_RDIL6_DATA 0x0306
|
||||
#define ixTHM_TMON0_RDIL7_DATA 0x0307
|
||||
#define ixTHM_TMON0_RDIL8_DATA 0x0308
|
||||
#define ixTHM_TMON0_RDIL9_DATA 0x0309
|
||||
#define ixTHM_TMON0_RDIR0_DATA 0x0310
|
||||
#define ixTHM_TMON0_RDIR10_DATA 0x031A
|
||||
#define ixTHM_TMON0_RDIR11_DATA 0x031B
|
||||
#define ixTHM_TMON0_RDIR12_DATA 0x031C
|
||||
#define ixTHM_TMON0_RDIR13_DATA 0x031D
|
||||
#define ixTHM_TMON0_RDIR14_DATA 0x031E
|
||||
#define ixTHM_TMON0_RDIR15_DATA 0x031F
|
||||
#define ixTHM_TMON0_RDIR1_DATA 0x0311
|
||||
#define ixTHM_TMON0_RDIR2_DATA 0x0312
|
||||
#define ixTHM_TMON0_RDIR3_DATA 0x0313
|
||||
#define ixTHM_TMON0_RDIR4_DATA 0x0314
|
||||
#define ixTHM_TMON0_RDIR5_DATA 0x0315
|
||||
#define ixTHM_TMON0_RDIR6_DATA 0x0316
|
||||
#define ixTHM_TMON0_RDIR7_DATA 0x0317
|
||||
#define ixTHM_TMON0_RDIR8_DATA 0x0318
|
||||
#define ixTHM_TMON0_RDIR9_DATA 0x0319
|
||||
#define ixTHM_TMON1_DEBUG 0x03F1
|
||||
#define ixTHM_TMON1_INT_DATA 0x0381
|
||||
#define ixTHM_TMON1_RDIL0_DATA 0x0320
|
||||
#define ixTHM_TMON1_RDIL10_DATA 0x032A
|
||||
#define ixTHM_TMON1_RDIL11_DATA 0x032B
|
||||
#define ixTHM_TMON1_RDIL12_DATA 0x032C
|
||||
#define ixTHM_TMON1_RDIL13_DATA 0x032D
|
||||
#define ixTHM_TMON1_RDIL14_DATA 0x032E
|
||||
#define ixTHM_TMON1_RDIL15_DATA 0x032F
|
||||
#define ixTHM_TMON1_RDIL1_DATA 0x0321
|
||||
#define ixTHM_TMON1_RDIL2_DATA 0x0322
|
||||
#define ixTHM_TMON1_RDIL3_DATA 0x0323
|
||||
#define ixTHM_TMON1_RDIL4_DATA 0x0324
|
||||
#define ixTHM_TMON1_RDIL5_DATA 0x0325
|
||||
#define ixTHM_TMON1_RDIL6_DATA 0x0326
|
||||
#define ixTHM_TMON1_RDIL7_DATA 0x0327
|
||||
#define ixTHM_TMON1_RDIL8_DATA 0x0328
|
||||
#define ixTHM_TMON1_RDIL9_DATA 0x0329
|
||||
#define ixTHM_TMON1_RDIR0_DATA 0x0330
|
||||
#define ixTHM_TMON1_RDIR10_DATA 0x033A
|
||||
#define ixTHM_TMON1_RDIR11_DATA 0x033B
|
||||
#define ixTHM_TMON1_RDIR12_DATA 0x033C
|
||||
#define ixTHM_TMON1_RDIR13_DATA 0x033D
|
||||
#define ixTHM_TMON1_RDIR14_DATA 0x033E
|
||||
#define ixTHM_TMON1_RDIR15_DATA 0x033F
|
||||
#define ixTHM_TMON1_RDIR1_DATA 0x0331
|
||||
#define ixTHM_TMON1_RDIR2_DATA 0x0332
|
||||
#define ixTHM_TMON1_RDIR3_DATA 0x0333
|
||||
#define ixTHM_TMON1_RDIR4_DATA 0x0334
|
||||
#define ixTHM_TMON1_RDIR5_DATA 0x0335
|
||||
#define ixTHM_TMON1_RDIR6_DATA 0x0336
|
||||
#define ixTHM_TMON1_RDIR7_DATA 0x0337
|
||||
#define ixTHM_TMON1_RDIR8_DATA 0x0338
|
||||
#define ixTHM_TMON1_RDIR9_DATA 0x0339
|
||||
#define mmGPIOPAD_A 0x05E7
|
||||
#define mmGPIOPAD_EN 0x05E8
|
||||
#define mmGPIOPAD_EXTERN_TRIG_CNTL 0x05F1
|
||||
#define mmGPIOPAD_INT_EN 0x05EE
|
||||
#define mmGPIOPAD_INT_POLARITY 0x05F0
|
||||
#define mmGPIOPAD_INT_STAT 0x05EC
|
||||
#define mmGPIOPAD_INT_STAT_AK 0x05ED
|
||||
#define mmGPIOPAD_INT_STAT_EN 0x05EB
|
||||
#define mmGPIOPAD_INT_TYPE 0x05EF
|
||||
#define mmGPIOPAD_MASK 0x05E6
|
||||
#define mmGPIOPAD_PD_EN 0x05F4
|
||||
#define mmGPIOPAD_PINSTRAPS 0x05EA
|
||||
#define mmGPIOPAD_PU_EN 0x05F3
|
||||
#define mmGPIOPAD_RCVR_SEL 0x05F2
|
||||
#define mmGPIOPAD_STRENGTH 0x05E5
|
||||
#define mmGPIOPAD_SW_INT_STAT 0x05E4
|
||||
#define mmGPIOPAD_Y 0x05E9
|
||||
#define mmSMC_IND_ACCESS_CNTL 0x008A
|
||||
#define mmSMC_IND_DATA_0 0x0081
|
||||
#define mmSMC_IND_DATA 0x0081
|
||||
#define mmSMC_IND_DATA_1 0x0083
|
||||
#define mmSMC_IND_DATA_2 0x0085
|
||||
#define mmSMC_IND_DATA_3 0x0087
|
||||
#define mmSMC_IND_INDEX_0 0x0080
|
||||
#define mmSMC_IND_INDEX 0x0080
|
||||
#define mmSMC_IND_INDEX_1 0x0082
|
||||
#define mmSMC_IND_INDEX_2 0x0084
|
||||
#define mmSMC_IND_INDEX_3 0x0086
|
||||
#define mmSMC_MESSAGE_0 0x008B
|
||||
#define mmSMC_MESSAGE_1 0x008D
|
||||
#define mmSMC_MESSAGE_2 0x008F
|
||||
#define mmSMC_RESP_0 0x008C
|
||||
#define mmSMC_RESP_1 0x008E
|
||||
#define mmSMC_RESP_2 0x0090
|
||||
|
||||
#endif
|
|
@ -0,0 +1,715 @@
|
|||
/*
|
||||
*
|
||||
* Copyright (C) 2016 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included
|
||||
* in all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
||||
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
|
||||
* AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef SMU_6_0_SH_MASK_H
|
||||
#define SMU_6_0_SH_MASK_H
|
||||
|
||||
#define CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV_MASK 0x03ffffffL
|
||||
#define CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV__SHIFT 0x00000000
|
||||
#define CG_SPLL_FUNC_CNTL__SPLL_REF_DIV_MASK 0x000003f0L
|
||||
#define CG_SPLL_FUNC_CNTL__SPLL_REF_DIV__SHIFT 0x00000004
|
||||
#define GPIOPAD_A__GPIO_A_MASK 0x7fffffffL
|
||||
#define GPIOPAD_A__GPIO_A__SHIFT 0x00000000
|
||||
#define GPIOPAD_EN__GPIO_EN_MASK 0x7fffffffL
|
||||
#define GPIOPAD_EN__GPIO_EN__SHIFT 0x00000000
|
||||
#define GPIOPAD_EXTERN_TRIG_CNTL__EXTERN_TRIG_CLR_MASK 0x00000020L
|
||||
#define GPIOPAD_EXTERN_TRIG_CNTL__EXTERN_TRIG_CLR__SHIFT 0x00000005
|
||||
#define GPIOPAD_EXTERN_TRIG_CNTL__EXTERN_TRIG_READ_MASK 0x00000040L
|
||||
#define GPIOPAD_EXTERN_TRIG_CNTL__EXTERN_TRIG_READ__SHIFT 0x00000006
|
||||
#define GPIOPAD_EXTERN_TRIG_CNTL__EXTERN_TRIG_SEL_MASK 0x0000001fL
|
||||
#define GPIOPAD_EXTERN_TRIG_CNTL__EXTERN_TRIG_SEL__SHIFT 0x00000000
|
||||
#define GPIOPAD_INT_EN__GPIO_INT_EN_MASK 0x1fffffffL
|
||||
#define GPIOPAD_INT_EN__GPIO_INT_EN__SHIFT 0x00000000
|
||||
#define GPIOPAD_INT_EN__SW_INITIATED_INT_EN_MASK 0x80000000L
|
||||
#define GPIOPAD_INT_EN__SW_INITIATED_INT_EN__SHIFT 0x0000001f
|
||||
#define GPIOPAD_INT_POLARITY__GPIO_INT_POLARITY_MASK 0x1fffffffL
|
||||
#define GPIOPAD_INT_POLARITY__GPIO_INT_POLARITY__SHIFT 0x00000000
|
||||
#define GPIOPAD_INT_POLARITY__SW_INITIATED_INT_POLARITY_MASK 0x80000000L
|
||||
#define GPIOPAD_INT_POLARITY__SW_INITIATED_INT_POLARITY__SHIFT 0x0000001f
|
||||
#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_0_MASK 0x00000001L
|
||||
#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_0__SHIFT 0x00000000
|
||||
#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_10_MASK 0x00000400L
|
||||
#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_10__SHIFT 0x0000000a
|
||||
#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_11_MASK 0x00000800L
|
||||
#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_11__SHIFT 0x0000000b
|
||||
#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_12_MASK 0x00001000L
|
||||
#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_12__SHIFT 0x0000000c
|
||||
#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_13_MASK 0x00002000L
|
||||
#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_13__SHIFT 0x0000000d
|
||||
#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_14_MASK 0x00004000L
|
||||
#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_14__SHIFT 0x0000000e
|
||||
#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_15_MASK 0x00008000L
|
||||
#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_15__SHIFT 0x0000000f
|
||||
#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_16_MASK 0x00010000L
|
||||
#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_16__SHIFT 0x00000010
|
||||
#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_17_MASK 0x00020000L
|
||||
#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_17__SHIFT 0x00000011
|
||||
#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_18_MASK 0x00040000L
|
||||
#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_18__SHIFT 0x00000012
|
||||
#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_19_MASK 0x00080000L
|
||||
#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_19__SHIFT 0x00000013
|
||||
#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_1_MASK 0x00000002L
|
||||
#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_1__SHIFT 0x00000001
|
||||
#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_20_MASK 0x00100000L
|
||||
#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_20__SHIFT 0x00000014
|
||||
#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_21_MASK 0x00200000L
|
||||
#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_21__SHIFT 0x00000015
|
||||
#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_22_MASK 0x00400000L
|
||||
#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_22__SHIFT 0x00000016
|
||||
#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_23_MASK 0x00800000L
|
||||
#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_23__SHIFT 0x00000017
|
||||
#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_24_MASK 0x01000000L
|
||||
#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_24__SHIFT 0x00000018
|
||||
#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_25_MASK 0x02000000L
|
||||
#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_25__SHIFT 0x00000019
|
||||
#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_26_MASK 0x04000000L
|
||||
#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_26__SHIFT 0x0000001a
|
||||
#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_27_MASK 0x08000000L
|
||||
#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_27__SHIFT 0x0000001b
|
||||
#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_28_MASK 0x10000000L
|
||||
#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_28__SHIFT 0x0000001c
|
||||
#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_2_MASK 0x00000004L
|
||||
#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_2__SHIFT 0x00000002
|
||||
#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_3_MASK 0x00000008L
|
||||
#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_3__SHIFT 0x00000003
|
||||
#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_4_MASK 0x00000010L
|
||||
#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_4__SHIFT 0x00000004
|
||||
#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_5_MASK 0x00000020L
|
||||
#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_5__SHIFT 0x00000005
|
||||
#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_6_MASK 0x00000040L
|
||||
#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_6__SHIFT 0x00000006
|
||||
#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_7_MASK 0x00000080L
|
||||
#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_7__SHIFT 0x00000007
|
||||
#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_8_MASK 0x00000100L
|
||||
#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_8__SHIFT 0x00000008
|
||||
#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_9_MASK 0x00000200L
|
||||
#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_9__SHIFT 0x00000009
|
||||
#define GPIOPAD_INT_STAT_AK__SW_INITIATED_INT_STAT_AK_MASK 0x80000000L
|
||||
#define GPIOPAD_INT_STAT_AK__SW_INITIATED_INT_STAT_AK__SHIFT 0x0000001f
|
||||
#define GPIOPAD_INT_STAT_EN__GPIO_INT_STAT_EN_MASK 0x1fffffffL
|
||||
#define GPIOPAD_INT_STAT_EN__GPIO_INT_STAT_EN__SHIFT 0x00000000
|
||||
#define GPIOPAD_INT_STAT_EN__SW_INITIATED_INT_STAT_EN_MASK 0x80000000L
|
||||
#define GPIOPAD_INT_STAT_EN__SW_INITIATED_INT_STAT_EN__SHIFT 0x0000001f
|
||||
#define GPIOPAD_INT_STAT__GPIO_INT_STAT_MASK 0x1fffffffL
|
||||
#define GPIOPAD_INT_STAT__GPIO_INT_STAT__SHIFT 0x00000000
|
||||
#define GPIOPAD_INT_STAT__SW_INITIATED_INT_STAT_MASK 0x80000000L
|
||||
#define GPIOPAD_INT_STAT__SW_INITIATED_INT_STAT__SHIFT 0x0000001f
|
||||
#define GPIOPAD_INT_TYPE__GPIO_INT_TYPE_MASK 0x1fffffffL
|
||||
#define GPIOPAD_INT_TYPE__GPIO_INT_TYPE__SHIFT 0x00000000
|
||||
#define GPIOPAD_INT_TYPE__SW_INITIATED_INT_TYPE_MASK 0x80000000L
|
||||
#define GPIOPAD_INT_TYPE__SW_INITIATED_INT_TYPE__SHIFT 0x0000001f
|
||||
#define GPIOPAD_MASK__GPIO_MASK_MASK 0x7fffffffL
|
||||
#define GPIOPAD_MASK__GPIO_MASK__SHIFT 0x00000000
|
||||
#define GPIOPAD_PD_EN__GPIO_PD_EN_MASK 0x7fffffffL
|
||||
#define GPIOPAD_PD_EN__GPIO_PD_EN__SHIFT 0x00000000
|
||||
#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_0_MASK 0x00000001L
|
||||
#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_0__SHIFT 0x00000000
|
||||
#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_10_MASK 0x00000400L
|
||||
#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_10__SHIFT 0x0000000a
|
||||
#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_11_MASK 0x00000800L
|
||||
#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_11__SHIFT 0x0000000b
|
||||
#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_12_MASK 0x00001000L
|
||||
#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_12__SHIFT 0x0000000c
|
||||
#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_13_MASK 0x00002000L
|
||||
#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_13__SHIFT 0x0000000d
|
||||
#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_14_MASK 0x00004000L
|
||||
#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_14__SHIFT 0x0000000e
|
||||
#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_15_MASK 0x00008000L
|
||||
#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_15__SHIFT 0x0000000f
|
||||
#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_16_MASK 0x00010000L
|
||||
#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_16__SHIFT 0x00000010
|
||||
#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_17_MASK 0x00020000L
|
||||
#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_17__SHIFT 0x00000011
|
||||
#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_18_MASK 0x00040000L
|
||||
#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_18__SHIFT 0x00000012
|
||||
#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_19_MASK 0x00080000L
|
||||
#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_19__SHIFT 0x00000013
|
||||
#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_1_MASK 0x00000002L
|
||||
#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_1__SHIFT 0x00000001
|
||||
#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_20_MASK 0x00100000L
|
||||
#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_20__SHIFT 0x00000014
|
||||
#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_21_MASK 0x00200000L
|
||||
#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_21__SHIFT 0x00000015
|
||||
#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_22_MASK 0x00400000L
|
||||
#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_22__SHIFT 0x00000016
|
||||
#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_23_MASK 0x00800000L
|
||||
#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_23__SHIFT 0x00000017
|
||||
#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_24_MASK 0x01000000L
|
||||
#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_24__SHIFT 0x00000018
|
||||
#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_25_MASK 0x02000000L
|
||||
#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_25__SHIFT 0x00000019
|
||||
#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_26_MASK 0x04000000L
|
||||
#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_26__SHIFT 0x0000001a
|
||||
#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_27_MASK 0x08000000L
|
||||
#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_27__SHIFT 0x0000001b
|
||||
#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_28_MASK 0x10000000L
|
||||
#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_28__SHIFT 0x0000001c
|
||||
#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_29_MASK 0x20000000L
|
||||
#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_29__SHIFT 0x0000001d
|
||||
#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_2_MASK 0x00000004L
|
||||
#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_2__SHIFT 0x00000002
|
||||
#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_30_MASK 0x40000000L
|
||||
#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_30__SHIFT 0x0000001e
|
||||
#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_3_MASK 0x00000008L
|
||||
#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_3__SHIFT 0x00000003
|
||||
#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_4_MASK 0x00000010L
|
||||
#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_4__SHIFT 0x00000004
|
||||
#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_5_MASK 0x00000020L
|
||||
#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_5__SHIFT 0x00000005
|
||||
#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_6_MASK 0x00000040L
|
||||
#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_6__SHIFT 0x00000006
|
||||
#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_7_MASK 0x00000080L
|
||||
#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_7__SHIFT 0x00000007
|
||||
#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_8_MASK 0x00000100L
|
||||
#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_8__SHIFT 0x00000008
|
||||
#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_9_MASK 0x00000200L
|
||||
#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_9__SHIFT 0x00000009
|
||||
#define GPIOPAD_PU_EN__GPIO_PU_EN_MASK 0x7fffffffL
|
||||
#define GPIOPAD_PU_EN__GPIO_PU_EN__SHIFT 0x00000000
|
||||
#define GPIOPAD_RCVR_SEL__GPIO_RCVR_SEL_MASK 0x7fffffffL
|
||||
#define GPIOPAD_RCVR_SEL__GPIO_RCVR_SEL__SHIFT 0x00000000
|
||||
#define GPIOPAD_STRENGTH__GPIO_STRENGTH_SN_MASK 0x0000000fL
|
||||
#define GPIOPAD_STRENGTH__GPIO_STRENGTH_SN__SHIFT 0x00000000
|
||||
#define GPIOPAD_STRENGTH__GPIO_STRENGTH_SP_MASK 0x000000f0L
|
||||
#define GPIOPAD_STRENGTH__GPIO_STRENGTH_SP__SHIFT 0x00000004
|
||||
#define GPIOPAD_SW_INT_STAT__SW_INT_STAT_MASK 0x00000001L
|
||||
#define GPIOPAD_SW_INT_STAT__SW_INT_STAT__SHIFT 0x00000000
|
||||
#define GPIOPAD_Y__GPIO_Y_MASK 0x7fffffffL
|
||||
#define GPIOPAD_Y__GPIO_Y__SHIFT 0x00000000
|
||||
#define LCAC_MC0_CNTL__MC0_ENABLE_MASK 0x00000001L
|
||||
#define LCAC_MC0_CNTL__MC0_ENABLE__SHIFT 0x00000000
|
||||
#define LCAC_MC0_CNTL__MC0_THRESHOLD_MASK 0x0001fffeL
|
||||
#define LCAC_MC0_CNTL__MC0_THRESHOLD__SHIFT 0x00000001
|
||||
#define LCAC_MC0_OVR_SEL__MC0_OVR_SEL_MASK 0xffffffffL
|
||||
#define LCAC_MC0_OVR_SEL__MC0_OVR_SEL__SHIFT 0x00000000
|
||||
#define LCAC_MC0_OVR_VAL__MC0_OVR_VAL_MASK 0xffffffffL
|
||||
#define LCAC_MC0_OVR_VAL__MC0_OVR_VAL__SHIFT 0x00000000
|
||||
#define LCAC_MC1_CNTL__MC1_ENABLE_MASK 0x00000001L
|
||||
#define LCAC_MC1_CNTL__MC1_ENABLE__SHIFT 0x00000000
|
||||
#define LCAC_MC1_CNTL__MC1_THRESHOLD_MASK 0x0001fffeL
|
||||
#define LCAC_MC1_CNTL__MC1_THRESHOLD__SHIFT 0x00000001
|
||||
#define LCAC_MC1_OVR_SEL__MC1_OVR_SEL_MASK 0xffffffffL
|
||||
#define LCAC_MC1_OVR_SEL__MC1_OVR_SEL__SHIFT 0x00000000
|
||||
#define LCAC_MC1_OVR_VAL__MC1_OVR_VAL_MASK 0xffffffffL
|
||||
#define LCAC_MC1_OVR_VAL__MC1_OVR_VAL__SHIFT 0x00000000
|
||||
#define LCAC_MC2_CNTL__MC2_ENABLE_MASK 0x00000001L
|
||||
#define LCAC_MC2_CNTL__MC2_ENABLE__SHIFT 0x00000000
|
||||
#define LCAC_MC2_CNTL__MC2_THRESHOLD_MASK 0x0001fffeL
|
||||
#define LCAC_MC2_CNTL__MC2_THRESHOLD__SHIFT 0x00000001
|
||||
#define LCAC_MC2_OVR_SEL__MC2_OVR_SEL_MASK 0xffffffffL
|
||||
#define LCAC_MC2_OVR_SEL__MC2_OVR_SEL__SHIFT 0x00000000
|
||||
#define LCAC_MC2_OVR_VAL__MC2_OVR_VAL_MASK 0xffffffffL
|
||||
#define LCAC_MC2_OVR_VAL__MC2_OVR_VAL__SHIFT 0x00000000
|
||||
#define LCAC_MC3_CNTL__MC3_ENABLE_MASK 0x00000001L
|
||||
#define LCAC_MC3_CNTL__MC3_ENABLE__SHIFT 0x00000000
|
||||
#define LCAC_MC3_CNTL__MC3_THRESHOLD_MASK 0x0001fffeL
|
||||
#define LCAC_MC3_CNTL__MC3_THRESHOLD__SHIFT 0x00000001
|
||||
#define LCAC_MC3_OVR_SEL__MC3_OVR_SEL_MASK 0xffffffffL
|
||||
#define LCAC_MC3_OVR_SEL__MC3_OVR_SEL__SHIFT 0x00000000
|
||||
#define LCAC_MC3_OVR_VAL__MC3_OVR_VAL_MASK 0xffffffffL
|
||||
#define LCAC_MC3_OVR_VAL__MC3_OVR_VAL__SHIFT 0x00000000
|
||||
#define LCAC_MC4_CNTL__MC4_ENABLE_MASK 0x00000001L
|
||||
#define LCAC_MC4_CNTL__MC4_ENABLE__SHIFT 0x00000000
|
||||
#define LCAC_MC4_CNTL__MC4_THRESHOLD_MASK 0x0001fffeL
|
||||
#define LCAC_MC4_CNTL__MC4_THRESHOLD__SHIFT 0x00000001
|
||||
#define LCAC_MC4_OVR_SEL__MC4_OVR_SEL_MASK 0xffffffffL
|
||||
#define LCAC_MC4_OVR_SEL__MC4_OVR_SEL__SHIFT 0x00000000
|
||||
#define LCAC_MC4_OVR_VAL__MC4_OVR_VAL_MASK 0xffffffffL
|
||||
#define LCAC_MC4_OVR_VAL__MC4_OVR_VAL__SHIFT 0x00000000
|
||||
#define LCAC_MC5_CNTL__MC5_ENABLE_MASK 0x00000001L
|
||||
#define LCAC_MC5_CNTL__MC5_ENABLE__SHIFT 0x00000000
|
||||
#define LCAC_MC5_CNTL__MC5_THRESHOLD_MASK 0x0001fffeL
|
||||
#define LCAC_MC5_CNTL__MC5_THRESHOLD__SHIFT 0x00000001
|
||||
#define LCAC_MC5_OVR_SEL__MC5_OVR_SEL_MASK 0xffffffffL
|
||||
#define LCAC_MC5_OVR_SEL__MC5_OVR_SEL__SHIFT 0x00000000
|
||||
#define LCAC_MC5_OVR_VAL__MC5_OVR_VAL_MASK 0xffffffffL
|
||||
#define LCAC_MC5_OVR_VAL__MC5_OVR_VAL__SHIFT 0x00000000
|
||||
#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0_MASK 0x00000001L
|
||||
#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0__SHIFT 0x00000000
|
||||
#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_1_MASK 0x00000100L
|
||||
#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_1__SHIFT 0x00000008
|
||||
#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_2_MASK 0x00010000L
|
||||
#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_2__SHIFT 0x00000010
|
||||
#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_3_MASK 0x01000000L
|
||||
#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_3__SHIFT 0x00000018
|
||||
#define SMC_IND_DATA_0__SMC_IND_DATA_MASK 0xffffffffL
|
||||
#define SMC_IND_DATA_0__SMC_IND_DATA__SHIFT 0x00000000
|
||||
#define SMC_IND_DATA_1__SMC_IND_DATA_MASK 0xffffffffL
|
||||
#define SMC_IND_DATA_1__SMC_IND_DATA__SHIFT 0x00000000
|
||||
#define SMC_IND_DATA_2__SMC_IND_DATA_MASK 0xffffffffL
|
||||
#define SMC_IND_DATA_2__SMC_IND_DATA__SHIFT 0x00000000
|
||||
#define SMC_IND_DATA_3__SMC_IND_DATA_MASK 0xffffffffL
|
||||
#define SMC_IND_DATA_3__SMC_IND_DATA__SHIFT 0x00000000
|
||||
#define SMC_IND_DATA__SMC_IND_DATA_MASK 0xffffffffL
|
||||
#define SMC_IND_DATA__SMC_IND_DATA__SHIFT 0x00000000
|
||||
#define SMC_IND_INDEX_0__SMC_IND_ADDR_MASK 0xffffffffL
|
||||
#define SMC_IND_INDEX_0__SMC_IND_ADDR__SHIFT 0x00000000
|
||||
#define SMC_IND_INDEX_1__SMC_IND_ADDR_MASK 0xffffffffL
|
||||
#define SMC_IND_INDEX_1__SMC_IND_ADDR__SHIFT 0x00000000
|
||||
#define SMC_IND_INDEX_2__SMC_IND_ADDR_MASK 0xffffffffL
|
||||
#define SMC_IND_INDEX_2__SMC_IND_ADDR__SHIFT 0x00000000
|
||||
#define SMC_IND_INDEX_3__SMC_IND_ADDR_MASK 0xffffffffL
|
||||
#define SMC_IND_INDEX_3__SMC_IND_ADDR__SHIFT 0x00000000
|
||||
#define SMC_IND_INDEX__SMC_IND_ADDR_MASK 0xffffffffL
|
||||
#define SMC_IND_INDEX__SMC_IND_ADDR__SHIFT 0x00000000
|
||||
#define SMC_MESSAGE_0__SMC_MSG_MASK 0xffffffffL
|
||||
#define SMC_MESSAGE_0__SMC_MSG__SHIFT 0x00000000
|
||||
#define SMC_MESSAGE_1__SMC_MSG_MASK 0xffffffffL
|
||||
#define SMC_MESSAGE_1__SMC_MSG__SHIFT 0x00000000
|
||||
#define SMC_MESSAGE_2__SMC_MSG_MASK 0xffffffffL
|
||||
#define SMC_MESSAGE_2__SMC_MSG__SHIFT 0x00000000
|
||||
#define SMC_PC_C__smc_pc_c_MASK 0xffffffffL
|
||||
#define SMC_PC_C__smc_pc_c__SHIFT 0x00000000
|
||||
#define SMC_RESP_0__SMC_RESP_MASK 0xffffffffL
|
||||
#define SMC_RESP_0__SMC_RESP__SHIFT 0x00000000
|
||||
#define SMC_RESP_1__SMC_RESP_MASK 0xffffffffL
|
||||
#define SMC_RESP_1__SMC_RESP__SHIFT 0x00000000
|
||||
#define SMC_RESP_2__SMC_RESP_MASK 0xffffffffL
|
||||
#define SMC_RESP_2__SMC_RESP__SHIFT 0x00000000
|
||||
#define SPLL_CNTL_MODE__SPLL_CTLREQ_DLY_CNT_MASK 0x000ff000L
|
||||
#define SPLL_CNTL_MODE__SPLL_CTLREQ_DLY_CNT__SHIFT 0x0000000c
|
||||
#define SPLL_CNTL_MODE__SPLL_ENSAT_MASK 0x00000010L
|
||||
#define SPLL_CNTL_MODE__SPLL_ENSAT__SHIFT 0x00000004
|
||||
#define SPLL_CNTL_MODE__SPLL_FASTEN_MASK 0x00000008L
|
||||
#define SPLL_CNTL_MODE__SPLL_FASTEN__SHIFT 0x00000003
|
||||
#define SPLL_CNTL_MODE__SPLL_LEGACY_PDIV_MASK 0x00000002L
|
||||
#define SPLL_CNTL_MODE__SPLL_LEGACY_PDIV__SHIFT 0x00000001
|
||||
#define SPLL_CNTL_MODE__SPLL_RESET_EN_MASK 0x10000000L
|
||||
#define SPLL_CNTL_MODE__SPLL_RESET_EN__SHIFT 0x0000001c
|
||||
#define SPLL_CNTL_MODE__SPLL_SW_DIR_CONTROL_MASK 0x00000001L
|
||||
#define SPLL_CNTL_MODE__SPLL_SW_DIR_CONTROL__SHIFT 0x00000000
|
||||
#define SPLL_CNTL_MODE__SPLL_TEST_CLK_EXT_DIV_MASK 0x00000c00L
|
||||
#define SPLL_CNTL_MODE__SPLL_TEST_CLK_EXT_DIV__SHIFT 0x0000000a
|
||||
#define SPLL_CNTL_MODE__SPLL_TEST_MASK 0x00000004L
|
||||
#define SPLL_CNTL_MODE__SPLL_TEST__SHIFT 0x00000002
|
||||
#define SPLL_CNTL_MODE__SPLL_VCO_MODE_MASK 0x60000000L
|
||||
#define SPLL_CNTL_MODE__SPLL_VCO_MODE__SHIFT 0x0000001d
|
||||
#define TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX_MASK 0x0f000000L
|
||||
#define TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX__SHIFT 0x00000018
|
||||
#define TARGET_AND_CURRENT_PROFILE_INDEX_1__TARG_PCIE_INDEX_MASK 0xf0000000L
|
||||
#define TARGET_AND_CURRENT_PROFILE_INDEX_1__TARG_PCIE_INDEX__SHIFT 0x0000001c
|
||||
#define THM_TMON0_DEBUG__DEBUG_RDI_MASK 0x0000001fL
|
||||
#define THM_TMON0_DEBUG__DEBUG_RDI__SHIFT 0x00000000
|
||||
#define THM_TMON0_DEBUG__DEBUG_Z_MASK 0x0000ffe0L
|
||||
#define THM_TMON0_DEBUG__DEBUG_Z__SHIFT 0x00000005
|
||||
#define THM_TMON0_INT_DATA__TEMP_MASK 0x00fff000L
|
||||
#define THM_TMON0_INT_DATA__TEMP__SHIFT 0x0000000c
|
||||
#define THM_TMON0_INT_DATA__VALID_MASK 0x00000800L
|
||||
#define THM_TMON0_INT_DATA__VALID__SHIFT 0x0000000b
|
||||
#define THM_TMON0_INT_DATA__Z_MASK 0x000007ffL
|
||||
#define THM_TMON0_INT_DATA__Z__SHIFT 0x00000000
|
||||
#define THM_TMON0_RDIL0_DATA__TEMP_MASK 0x00fff000L
|
||||
#define THM_TMON0_RDIL0_DATA__TEMP__SHIFT 0x0000000c
|
||||
#define THM_TMON0_RDIL0_DATA__VALID_MASK 0x00000800L
|
||||
#define THM_TMON0_RDIL0_DATA__VALID__SHIFT 0x0000000b
|
||||
#define THM_TMON0_RDIL0_DATA__Z_MASK 0x000007ffL
|
||||
#define THM_TMON0_RDIL0_DATA__Z__SHIFT 0x00000000
|
||||
#define THM_TMON0_RDIL10_DATA__TEMP_MASK 0x00fff000L
|
||||
#define THM_TMON0_RDIL10_DATA__TEMP__SHIFT 0x0000000c
|
||||
#define THM_TMON0_RDIL10_DATA__VALID_MASK 0x00000800L
|
||||
#define THM_TMON0_RDIL10_DATA__VALID__SHIFT 0x0000000b
|
||||
#define THM_TMON0_RDIL10_DATA__Z_MASK 0x000007ffL
|
||||
#define THM_TMON0_RDIL10_DATA__Z__SHIFT 0x00000000
|
||||
#define THM_TMON0_RDIL11_DATA__TEMP_MASK 0x00fff000L
|
||||
#define THM_TMON0_RDIL11_DATA__TEMP__SHIFT 0x0000000c
|
||||
#define THM_TMON0_RDIL11_DATA__VALID_MASK 0x00000800L
|
||||
#define THM_TMON0_RDIL11_DATA__VALID__SHIFT 0x0000000b
|
||||
#define THM_TMON0_RDIL11_DATA__Z_MASK 0x000007ffL
|
||||
#define THM_TMON0_RDIL11_DATA__Z__SHIFT 0x00000000
|
||||
#define THM_TMON0_RDIL12_DATA__TEMP_MASK 0x00fff000L
|
||||
#define THM_TMON0_RDIL12_DATA__TEMP__SHIFT 0x0000000c
|
||||
#define THM_TMON0_RDIL12_DATA__VALID_MASK 0x00000800L
|
||||
#define THM_TMON0_RDIL12_DATA__VALID__SHIFT 0x0000000b
|
||||
#define THM_TMON0_RDIL12_DATA__Z_MASK 0x000007ffL
|
||||
#define THM_TMON0_RDIL12_DATA__Z__SHIFT 0x00000000
|
||||
#define THM_TMON0_RDIL13_DATA__TEMP_MASK 0x00fff000L
|
||||
#define THM_TMON0_RDIL13_DATA__TEMP__SHIFT 0x0000000c
|
||||
#define THM_TMON0_RDIL13_DATA__VALID_MASK 0x00000800L
|
||||
#define THM_TMON0_RDIL13_DATA__VALID__SHIFT 0x0000000b
|
||||
#define THM_TMON0_RDIL13_DATA__Z_MASK 0x000007ffL
|
||||
#define THM_TMON0_RDIL13_DATA__Z__SHIFT 0x00000000
|
||||
#define THM_TMON0_RDIL14_DATA__TEMP_MASK 0x00fff000L
|
||||
#define THM_TMON0_RDIL14_DATA__TEMP__SHIFT 0x0000000c
|
||||
#define THM_TMON0_RDIL14_DATA__VALID_MASK 0x00000800L
|
||||
#define THM_TMON0_RDIL14_DATA__VALID__SHIFT 0x0000000b
|
||||
#define THM_TMON0_RDIL14_DATA__Z_MASK 0x000007ffL
|
||||
#define THM_TMON0_RDIL14_DATA__Z__SHIFT 0x00000000
|
||||
#define THM_TMON0_RDIL15_DATA__TEMP_MASK 0x00fff000L
|
||||
#define THM_TMON0_RDIL15_DATA__TEMP__SHIFT 0x0000000c
|
||||
#define THM_TMON0_RDIL15_DATA__VALID_MASK 0x00000800L
|
||||
#define THM_TMON0_RDIL15_DATA__VALID__SHIFT 0x0000000b
|
||||
#define THM_TMON0_RDIL15_DATA__Z_MASK 0x000007ffL
|
||||
#define THM_TMON0_RDIL15_DATA__Z__SHIFT 0x00000000
|
||||
#define THM_TMON0_RDIL1_DATA__TEMP_MASK 0x00fff000L
|
||||
#define THM_TMON0_RDIL1_DATA__TEMP__SHIFT 0x0000000c
|
||||
#define THM_TMON0_RDIL1_DATA__VALID_MASK 0x00000800L
|
||||
#define THM_TMON0_RDIL1_DATA__VALID__SHIFT 0x0000000b
|
||||
#define THM_TMON0_RDIL1_DATA__Z_MASK 0x000007ffL
|
||||
#define THM_TMON0_RDIL1_DATA__Z__SHIFT 0x00000000
|
||||
#define THM_TMON0_RDIL2_DATA__TEMP_MASK 0x00fff000L
|
||||
#define THM_TMON0_RDIL2_DATA__TEMP__SHIFT 0x0000000c
|
||||
#define THM_TMON0_RDIL2_DATA__VALID_MASK 0x00000800L
|
||||
#define THM_TMON0_RDIL2_DATA__VALID__SHIFT 0x0000000b
|
||||
#define THM_TMON0_RDIL2_DATA__Z_MASK 0x000007ffL
|
||||
#define THM_TMON0_RDIL2_DATA__Z__SHIFT 0x00000000
|
||||
#define THM_TMON0_RDIL3_DATA__TEMP_MASK 0x00fff000L
|
||||
#define THM_TMON0_RDIL3_DATA__TEMP__SHIFT 0x0000000c
|
||||
#define THM_TMON0_RDIL3_DATA__VALID_MASK 0x00000800L
|
||||
#define THM_TMON0_RDIL3_DATA__VALID__SHIFT 0x0000000b
|
||||
#define THM_TMON0_RDIL3_DATA__Z_MASK 0x000007ffL
|
||||
#define THM_TMON0_RDIL3_DATA__Z__SHIFT 0x00000000
|
||||
#define THM_TMON0_RDIL4_DATA__TEMP_MASK 0x00fff000L
|
||||
#define THM_TMON0_RDIL4_DATA__TEMP__SHIFT 0x0000000c
|
||||
#define THM_TMON0_RDIL4_DATA__VALID_MASK 0x00000800L
|
||||
#define THM_TMON0_RDIL4_DATA__VALID__SHIFT 0x0000000b
|
||||
#define THM_TMON0_RDIL4_DATA__Z_MASK 0x000007ffL
|
||||
#define THM_TMON0_RDIL4_DATA__Z__SHIFT 0x00000000
|
||||
#define THM_TMON0_RDIL5_DATA__TEMP_MASK 0x00fff000L
|
||||
#define THM_TMON0_RDIL5_DATA__TEMP__SHIFT 0x0000000c
|
||||
#define THM_TMON0_RDIL5_DATA__VALID_MASK 0x00000800L
|
||||
#define THM_TMON0_RDIL5_DATA__VALID__SHIFT 0x0000000b
|
||||
#define THM_TMON0_RDIL5_DATA__Z_MASK 0x000007ffL
|
||||
#define THM_TMON0_RDIL5_DATA__Z__SHIFT 0x00000000
|
||||
#define THM_TMON0_RDIL6_DATA__TEMP_MASK 0x00fff000L
|
||||
#define THM_TMON0_RDIL6_DATA__TEMP__SHIFT 0x0000000c
|
||||
#define THM_TMON0_RDIL6_DATA__VALID_MASK 0x00000800L
|
||||
#define THM_TMON0_RDIL6_DATA__VALID__SHIFT 0x0000000b
|
||||
#define THM_TMON0_RDIL6_DATA__Z_MASK 0x000007ffL
|
||||
#define THM_TMON0_RDIL6_DATA__Z__SHIFT 0x00000000
|
||||
#define THM_TMON0_RDIL7_DATA__TEMP_MASK 0x00fff000L
|
||||
#define THM_TMON0_RDIL7_DATA__TEMP__SHIFT 0x0000000c
|
||||
#define THM_TMON0_RDIL7_DATA__VALID_MASK 0x00000800L
|
||||
#define THM_TMON0_RDIL7_DATA__VALID__SHIFT 0x0000000b
|
||||
#define THM_TMON0_RDIL7_DATA__Z_MASK 0x000007ffL
|
||||
#define THM_TMON0_RDIL7_DATA__Z__SHIFT 0x00000000
|
||||
#define THM_TMON0_RDIL8_DATA__TEMP_MASK 0x00fff000L
|
||||
#define THM_TMON0_RDIL8_DATA__TEMP__SHIFT 0x0000000c
|
||||
#define THM_TMON0_RDIL8_DATA__VALID_MASK 0x00000800L
|
||||
#define THM_TMON0_RDIL8_DATA__VALID__SHIFT 0x0000000b
|
||||
#define THM_TMON0_RDIL8_DATA__Z_MASK 0x000007ffL
|
||||
#define THM_TMON0_RDIL8_DATA__Z__SHIFT 0x00000000
|
||||
#define THM_TMON0_RDIL9_DATA__TEMP_MASK 0x00fff000L
|
||||
#define THM_TMON0_RDIL9_DATA__TEMP__SHIFT 0x0000000c
|
||||
#define THM_TMON0_RDIL9_DATA__VALID_MASK 0x00000800L
|
||||
#define THM_TMON0_RDIL9_DATA__VALID__SHIFT 0x0000000b
|
||||
#define THM_TMON0_RDIL9_DATA__Z_MASK 0x000007ffL
|
||||
#define THM_TMON0_RDIL9_DATA__Z__SHIFT 0x00000000
|
||||
#define THM_TMON0_RDIR0_DATA__TEMP_MASK 0x00fff000L
|
||||
#define THM_TMON0_RDIR0_DATA__TEMP__SHIFT 0x0000000c
|
||||
#define THM_TMON0_RDIR0_DATA__VALID_MASK 0x00000800L
|
||||
#define THM_TMON0_RDIR0_DATA__VALID__SHIFT 0x0000000b
|
||||
#define THM_TMON0_RDIR0_DATA__Z_MASK 0x000007ffL
|
||||
#define THM_TMON0_RDIR0_DATA__Z__SHIFT 0x00000000
|
||||
#define THM_TMON0_RDIR10_DATA__TEMP_MASK 0x00fff000L
|
||||
#define THM_TMON0_RDIR10_DATA__TEMP__SHIFT 0x0000000c
|
||||
#define THM_TMON0_RDIR10_DATA__VALID_MASK 0x00000800L
|
||||
#define THM_TMON0_RDIR10_DATA__VALID__SHIFT 0x0000000b
|
||||
#define THM_TMON0_RDIR10_DATA__Z_MASK 0x000007ffL
|
||||
#define THM_TMON0_RDIR10_DATA__Z__SHIFT 0x00000000
|
||||
#define THM_TMON0_RDIR11_DATA__TEMP_MASK 0x00fff000L
|
||||
#define THM_TMON0_RDIR11_DATA__TEMP__SHIFT 0x0000000c
|
||||
#define THM_TMON0_RDIR11_DATA__VALID_MASK 0x00000800L
|
||||
#define THM_TMON0_RDIR11_DATA__VALID__SHIFT 0x0000000b
|
||||
#define THM_TMON0_RDIR11_DATA__Z_MASK 0x000007ffL
|
||||
#define THM_TMON0_RDIR11_DATA__Z__SHIFT 0x00000000
|
||||
#define THM_TMON0_RDIR12_DATA__TEMP_MASK 0x00fff000L
|
||||
#define THM_TMON0_RDIR12_DATA__TEMP__SHIFT 0x0000000c
|
||||
#define THM_TMON0_RDIR12_DATA__VALID_MASK 0x00000800L
|
||||
#define THM_TMON0_RDIR12_DATA__VALID__SHIFT 0x0000000b
|
||||
#define THM_TMON0_RDIR12_DATA__Z_MASK 0x000007ffL
|
||||
#define THM_TMON0_RDIR12_DATA__Z__SHIFT 0x00000000
|
||||
#define THM_TMON0_RDIR13_DATA__TEMP_MASK 0x00fff000L
|
||||
#define THM_TMON0_RDIR13_DATA__TEMP__SHIFT 0x0000000c
|
||||
#define THM_TMON0_RDIR13_DATA__VALID_MASK 0x00000800L
|
||||
#define THM_TMON0_RDIR13_DATA__VALID__SHIFT 0x0000000b
|
||||
#define THM_TMON0_RDIR13_DATA__Z_MASK 0x000007ffL
|
||||
#define THM_TMON0_RDIR13_DATA__Z__SHIFT 0x00000000
|
||||
#define THM_TMON0_RDIR14_DATA__TEMP_MASK 0x00fff000L
|
||||
#define THM_TMON0_RDIR14_DATA__TEMP__SHIFT 0x0000000c
|
||||
#define THM_TMON0_RDIR14_DATA__VALID_MASK 0x00000800L
|
||||
#define THM_TMON0_RDIR14_DATA__VALID__SHIFT 0x0000000b
|
||||
#define THM_TMON0_RDIR14_DATA__Z_MASK 0x000007ffL
|
||||
#define THM_TMON0_RDIR14_DATA__Z__SHIFT 0x00000000
|
||||
#define THM_TMON0_RDIR15_DATA__TEMP_MASK 0x00fff000L
|
||||
#define THM_TMON0_RDIR15_DATA__TEMP__SHIFT 0x0000000c
|
||||
#define THM_TMON0_RDIR15_DATA__VALID_MASK 0x00000800L
|
||||
#define THM_TMON0_RDIR15_DATA__VALID__SHIFT 0x0000000b
|
||||
#define THM_TMON0_RDIR15_DATA__Z_MASK 0x000007ffL
|
||||
#define THM_TMON0_RDIR15_DATA__Z__SHIFT 0x00000000
|
||||
#define THM_TMON0_RDIR1_DATA__TEMP_MASK 0x00fff000L
|
||||
#define THM_TMON0_RDIR1_DATA__TEMP__SHIFT 0x0000000c
|
||||
#define THM_TMON0_RDIR1_DATA__VALID_MASK 0x00000800L
|
||||
#define THM_TMON0_RDIR1_DATA__VALID__SHIFT 0x0000000b
|
||||
#define THM_TMON0_RDIR1_DATA__Z_MASK 0x000007ffL
|
||||
#define THM_TMON0_RDIR1_DATA__Z__SHIFT 0x00000000
|
||||
#define THM_TMON0_RDIR2_DATA__TEMP_MASK 0x00fff000L
|
||||
#define THM_TMON0_RDIR2_DATA__TEMP__SHIFT 0x0000000c
|
||||
#define THM_TMON0_RDIR2_DATA__VALID_MASK 0x00000800L
|
||||
#define THM_TMON0_RDIR2_DATA__VALID__SHIFT 0x0000000b
|
||||
#define THM_TMON0_RDIR2_DATA__Z_MASK 0x000007ffL
|
||||
#define THM_TMON0_RDIR2_DATA__Z__SHIFT 0x00000000
|
||||
#define THM_TMON0_RDIR3_DATA__TEMP_MASK 0x00fff000L
|
||||
#define THM_TMON0_RDIR3_DATA__TEMP__SHIFT 0x0000000c
|
||||
#define THM_TMON0_RDIR3_DATA__VALID_MASK 0x00000800L
|
||||
#define THM_TMON0_RDIR3_DATA__VALID__SHIFT 0x0000000b
|
||||
#define THM_TMON0_RDIR3_DATA__Z_MASK 0x000007ffL
|
||||
#define THM_TMON0_RDIR3_DATA__Z__SHIFT 0x00000000
|
||||
#define THM_TMON0_RDIR4_DATA__TEMP_MASK 0x00fff000L
|
||||
#define THM_TMON0_RDIR4_DATA__TEMP__SHIFT 0x0000000c
|
||||
#define THM_TMON0_RDIR4_DATA__VALID_MASK 0x00000800L
|
||||
#define THM_TMON0_RDIR4_DATA__VALID__SHIFT 0x0000000b
|
||||
#define THM_TMON0_RDIR4_DATA__Z_MASK 0x000007ffL
|
||||
#define THM_TMON0_RDIR4_DATA__Z__SHIFT 0x00000000
|
||||
#define THM_TMON0_RDIR5_DATA__TEMP_MASK 0x00fff000L
|
||||
#define THM_TMON0_RDIR5_DATA__TEMP__SHIFT 0x0000000c
|
||||
#define THM_TMON0_RDIR5_DATA__VALID_MASK 0x00000800L
|
||||
#define THM_TMON0_RDIR5_DATA__VALID__SHIFT 0x0000000b
|
||||
#define THM_TMON0_RDIR5_DATA__Z_MASK 0x000007ffL
|
||||
#define THM_TMON0_RDIR5_DATA__Z__SHIFT 0x00000000
|
||||
#define THM_TMON0_RDIR6_DATA__TEMP_MASK 0x00fff000L
|
||||
#define THM_TMON0_RDIR6_DATA__TEMP__SHIFT 0x0000000c
|
||||
#define THM_TMON0_RDIR6_DATA__VALID_MASK 0x00000800L
|
||||
#define THM_TMON0_RDIR6_DATA__VALID__SHIFT 0x0000000b
|
||||
#define THM_TMON0_RDIR6_DATA__Z_MASK 0x000007ffL
|
||||
#define THM_TMON0_RDIR6_DATA__Z__SHIFT 0x00000000
|
||||
#define THM_TMON0_RDIR7_DATA__TEMP_MASK 0x00fff000L
|
||||
#define THM_TMON0_RDIR7_DATA__TEMP__SHIFT 0x0000000c
|
||||
#define THM_TMON0_RDIR7_DATA__VALID_MASK 0x00000800L
|
||||
#define THM_TMON0_RDIR7_DATA__VALID__SHIFT 0x0000000b
|
||||
#define THM_TMON0_RDIR7_DATA__Z_MASK 0x000007ffL
|
||||
#define THM_TMON0_RDIR7_DATA__Z__SHIFT 0x00000000
|
||||
#define THM_TMON0_RDIR8_DATA__TEMP_MASK 0x00fff000L
|
||||
#define THM_TMON0_RDIR8_DATA__TEMP__SHIFT 0x0000000c
|
||||
#define THM_TMON0_RDIR8_DATA__VALID_MASK 0x00000800L
|
||||
#define THM_TMON0_RDIR8_DATA__VALID__SHIFT 0x0000000b
|
||||
#define THM_TMON0_RDIR8_DATA__Z_MASK 0x000007ffL
|
||||
#define THM_TMON0_RDIR8_DATA__Z__SHIFT 0x00000000
|
||||
#define THM_TMON0_RDIR9_DATA__TEMP_MASK 0x00fff000L
|
||||
#define THM_TMON0_RDIR9_DATA__TEMP__SHIFT 0x0000000c
|
||||
#define THM_TMON0_RDIR9_DATA__VALID_MASK 0x00000800L
|
||||
#define THM_TMON0_RDIR9_DATA__VALID__SHIFT 0x0000000b
|
||||
#define THM_TMON0_RDIR9_DATA__Z_MASK 0x000007ffL
|
||||
#define THM_TMON0_RDIR9_DATA__Z__SHIFT 0x00000000
|
||||
#define THM_TMON1_DEBUG__DEBUG_RDI_MASK 0x0000001fL
|
||||
#define THM_TMON1_DEBUG__DEBUG_RDI__SHIFT 0x00000000
|
||||
#define THM_TMON1_DEBUG__DEBUG_Z_MASK 0x0000ffe0L
|
||||
#define THM_TMON1_DEBUG__DEBUG_Z__SHIFT 0x00000005
|
||||
#define THM_TMON1_INT_DATA__TEMP_MASK 0x00fff000L
|
||||
#define THM_TMON1_INT_DATA__TEMP__SHIFT 0x0000000c
|
||||
#define THM_TMON1_INT_DATA__VALID_MASK 0x00000800L
|
||||
#define THM_TMON1_INT_DATA__VALID__SHIFT 0x0000000b
|
||||
#define THM_TMON1_INT_DATA__Z_MASK 0x000007ffL
|
||||
#define THM_TMON1_INT_DATA__Z__SHIFT 0x00000000
|
||||
#define THM_TMON1_RDIL0_DATA__TEMP_MASK 0x00fff000L
|
||||
#define THM_TMON1_RDIL0_DATA__TEMP__SHIFT 0x0000000c
|
||||
#define THM_TMON1_RDIL0_DATA__VALID_MASK 0x00000800L
|
||||
#define THM_TMON1_RDIL0_DATA__VALID__SHIFT 0x0000000b
|
||||
#define THM_TMON1_RDIL0_DATA__Z_MASK 0x000007ffL
|
||||
#define THM_TMON1_RDIL0_DATA__Z__SHIFT 0x00000000
|
||||
#define THM_TMON1_RDIL10_DATA__TEMP_MASK 0x00fff000L
|
||||
#define THM_TMON1_RDIL10_DATA__TEMP__SHIFT 0x0000000c
|
||||
#define THM_TMON1_RDIL10_DATA__VALID_MASK 0x00000800L
|
||||
#define THM_TMON1_RDIL10_DATA__VALID__SHIFT 0x0000000b
|
||||
#define THM_TMON1_RDIL10_DATA__Z_MASK 0x000007ffL
|
||||
#define THM_TMON1_RDIL10_DATA__Z__SHIFT 0x00000000
|
||||
#define THM_TMON1_RDIL11_DATA__TEMP_MASK 0x00fff000L
|
||||
#define THM_TMON1_RDIL11_DATA__TEMP__SHIFT 0x0000000c
|
||||
#define THM_TMON1_RDIL11_DATA__VALID_MASK 0x00000800L
|
||||
#define THM_TMON1_RDIL11_DATA__VALID__SHIFT 0x0000000b
|
||||
#define THM_TMON1_RDIL11_DATA__Z_MASK 0x000007ffL
|
||||
#define THM_TMON1_RDIL11_DATA__Z__SHIFT 0x00000000
|
||||
#define THM_TMON1_RDIL12_DATA__TEMP_MASK 0x00fff000L
|
||||
#define THM_TMON1_RDIL12_DATA__TEMP__SHIFT 0x0000000c
|
||||
#define THM_TMON1_RDIL12_DATA__VALID_MASK 0x00000800L
|
||||
#define THM_TMON1_RDIL12_DATA__VALID__SHIFT 0x0000000b
|
||||
#define THM_TMON1_RDIL12_DATA__Z_MASK 0x000007ffL
|
||||
#define THM_TMON1_RDIL12_DATA__Z__SHIFT 0x00000000
|
||||
#define THM_TMON1_RDIL13_DATA__TEMP_MASK 0x00fff000L
|
||||
#define THM_TMON1_RDIL13_DATA__TEMP__SHIFT 0x0000000c
|
||||
#define THM_TMON1_RDIL13_DATA__VALID_MASK 0x00000800L
|
||||
#define THM_TMON1_RDIL13_DATA__VALID__SHIFT 0x0000000b
|
||||
#define THM_TMON1_RDIL13_DATA__Z_MASK 0x000007ffL
|
||||
#define THM_TMON1_RDIL13_DATA__Z__SHIFT 0x00000000
|
||||
#define THM_TMON1_RDIL14_DATA__TEMP_MASK 0x00fff000L
|
||||
#define THM_TMON1_RDIL14_DATA__TEMP__SHIFT 0x0000000c
|
||||
#define THM_TMON1_RDIL14_DATA__VALID_MASK 0x00000800L
|
||||
#define THM_TMON1_RDIL14_DATA__VALID__SHIFT 0x0000000b
|
||||
#define THM_TMON1_RDIL14_DATA__Z_MASK 0x000007ffL
|
||||
#define THM_TMON1_RDIL14_DATA__Z__SHIFT 0x00000000
|
||||
#define THM_TMON1_RDIL15_DATA__TEMP_MASK 0x00fff000L
|
||||
#define THM_TMON1_RDIL15_DATA__TEMP__SHIFT 0x0000000c
|
||||
#define THM_TMON1_RDIL15_DATA__VALID_MASK 0x00000800L
|
||||
#define THM_TMON1_RDIL15_DATA__VALID__SHIFT 0x0000000b
|
||||
#define THM_TMON1_RDIL15_DATA__Z_MASK 0x000007ffL
|
||||
#define THM_TMON1_RDIL15_DATA__Z__SHIFT 0x00000000
|
||||
#define THM_TMON1_RDIL1_DATA__TEMP_MASK 0x00fff000L
|
||||
#define THM_TMON1_RDIL1_DATA__TEMP__SHIFT 0x0000000c
|
||||
#define THM_TMON1_RDIL1_DATA__VALID_MASK 0x00000800L
|
||||
#define THM_TMON1_RDIL1_DATA__VALID__SHIFT 0x0000000b
|
||||
#define THM_TMON1_RDIL1_DATA__Z_MASK 0x000007ffL
|
||||
#define THM_TMON1_RDIL1_DATA__Z__SHIFT 0x00000000
|
||||
#define THM_TMON1_RDIL2_DATA__TEMP_MASK 0x00fff000L
|
||||
#define THM_TMON1_RDIL2_DATA__TEMP__SHIFT 0x0000000c
|
||||
#define THM_TMON1_RDIL2_DATA__VALID_MASK 0x00000800L
|
||||
#define THM_TMON1_RDIL2_DATA__VALID__SHIFT 0x0000000b
|
||||
#define THM_TMON1_RDIL2_DATA__Z_MASK 0x000007ffL
|
||||
#define THM_TMON1_RDIL2_DATA__Z__SHIFT 0x00000000
|
||||
#define THM_TMON1_RDIL3_DATA__TEMP_MASK 0x00fff000L
|
||||
#define THM_TMON1_RDIL3_DATA__TEMP__SHIFT 0x0000000c
|
||||
#define THM_TMON1_RDIL3_DATA__VALID_MASK 0x00000800L
|
||||
#define THM_TMON1_RDIL3_DATA__VALID__SHIFT 0x0000000b
|
||||
#define THM_TMON1_RDIL3_DATA__Z_MASK 0x000007ffL
|
||||
#define THM_TMON1_RDIL3_DATA__Z__SHIFT 0x00000000
|
||||
#define THM_TMON1_RDIL4_DATA__TEMP_MASK 0x00fff000L
|
||||
#define THM_TMON1_RDIL4_DATA__TEMP__SHIFT 0x0000000c
|
||||
#define THM_TMON1_RDIL4_DATA__VALID_MASK 0x00000800L
|
||||
#define THM_TMON1_RDIL4_DATA__VALID__SHIFT 0x0000000b
|
||||
#define THM_TMON1_RDIL4_DATA__Z_MASK 0x000007ffL
|
||||
#define THM_TMON1_RDIL4_DATA__Z__SHIFT 0x00000000
|
||||
#define THM_TMON1_RDIL5_DATA__TEMP_MASK 0x00fff000L
|
||||
#define THM_TMON1_RDIL5_DATA__TEMP__SHIFT 0x0000000c
|
||||
#define THM_TMON1_RDIL5_DATA__VALID_MASK 0x00000800L
|
||||
#define THM_TMON1_RDIL5_DATA__VALID__SHIFT 0x0000000b
|
||||
#define THM_TMON1_RDIL5_DATA__Z_MASK 0x000007ffL
|
||||
#define THM_TMON1_RDIL5_DATA__Z__SHIFT 0x00000000
|
||||
#define THM_TMON1_RDIL6_DATA__TEMP_MASK 0x00fff000L
|
||||
#define THM_TMON1_RDIL6_DATA__TEMP__SHIFT 0x0000000c
|
||||
#define THM_TMON1_RDIL6_DATA__VALID_MASK 0x00000800L
|
||||
#define THM_TMON1_RDIL6_DATA__VALID__SHIFT 0x0000000b
|
||||
#define THM_TMON1_RDIL6_DATA__Z_MASK 0x000007ffL
|
||||
#define THM_TMON1_RDIL6_DATA__Z__SHIFT 0x00000000
|
||||
#define THM_TMON1_RDIL7_DATA__TEMP_MASK 0x00fff000L
|
||||
#define THM_TMON1_RDIL7_DATA__TEMP__SHIFT 0x0000000c
|
||||
#define THM_TMON1_RDIL7_DATA__VALID_MASK 0x00000800L
|
||||
#define THM_TMON1_RDIL7_DATA__VALID__SHIFT 0x0000000b
|
||||
#define THM_TMON1_RDIL7_DATA__Z_MASK 0x000007ffL
|
||||
#define THM_TMON1_RDIL7_DATA__Z__SHIFT 0x00000000
|
||||
#define THM_TMON1_RDIL8_DATA__TEMP_MASK 0x00fff000L
|
||||
#define THM_TMON1_RDIL8_DATA__TEMP__SHIFT 0x0000000c
|
||||
#define THM_TMON1_RDIL8_DATA__VALID_MASK 0x00000800L
|
||||
#define THM_TMON1_RDIL8_DATA__VALID__SHIFT 0x0000000b
|
||||
#define THM_TMON1_RDIL8_DATA__Z_MASK 0x000007ffL
|
||||
#define THM_TMON1_RDIL8_DATA__Z__SHIFT 0x00000000
|
||||
#define THM_TMON1_RDIL9_DATA__TEMP_MASK 0x00fff000L
|
||||
#define THM_TMON1_RDIL9_DATA__TEMP__SHIFT 0x0000000c
|
||||
#define THM_TMON1_RDIL9_DATA__VALID_MASK 0x00000800L
|
||||
#define THM_TMON1_RDIL9_DATA__VALID__SHIFT 0x0000000b
|
||||
#define THM_TMON1_RDIL9_DATA__Z_MASK 0x000007ffL
|
||||
#define THM_TMON1_RDIL9_DATA__Z__SHIFT 0x00000000
|
||||
#define THM_TMON1_RDIR0_DATA__TEMP_MASK 0x00fff000L
|
||||
#define THM_TMON1_RDIR0_DATA__TEMP__SHIFT 0x0000000c
|
||||
#define THM_TMON1_RDIR0_DATA__VALID_MASK 0x00000800L
|
||||
#define THM_TMON1_RDIR0_DATA__VALID__SHIFT 0x0000000b
|
||||
#define THM_TMON1_RDIR0_DATA__Z_MASK 0x000007ffL
|
||||
#define THM_TMON1_RDIR0_DATA__Z__SHIFT 0x00000000
|
||||
#define THM_TMON1_RDIR10_DATA__TEMP_MASK 0x00fff000L
|
||||
#define THM_TMON1_RDIR10_DATA__TEMP__SHIFT 0x0000000c
|
||||
#define THM_TMON1_RDIR10_DATA__VALID_MASK 0x00000800L
|
||||
#define THM_TMON1_RDIR10_DATA__VALID__SHIFT 0x0000000b
|
||||
#define THM_TMON1_RDIR10_DATA__Z_MASK 0x000007ffL
|
||||
#define THM_TMON1_RDIR10_DATA__Z__SHIFT 0x00000000
|
||||
#define THM_TMON1_RDIR11_DATA__TEMP_MASK 0x00fff000L
|
||||
#define THM_TMON1_RDIR11_DATA__TEMP__SHIFT 0x0000000c
|
||||
#define THM_TMON1_RDIR11_DATA__VALID_MASK 0x00000800L
|
||||
#define THM_TMON1_RDIR11_DATA__VALID__SHIFT 0x0000000b
|
||||
#define THM_TMON1_RDIR11_DATA__Z_MASK 0x000007ffL
|
||||
#define THM_TMON1_RDIR11_DATA__Z__SHIFT 0x00000000
|
||||
#define THM_TMON1_RDIR12_DATA__TEMP_MASK 0x00fff000L
|
||||
#define THM_TMON1_RDIR12_DATA__TEMP__SHIFT 0x0000000c
|
||||
#define THM_TMON1_RDIR12_DATA__VALID_MASK 0x00000800L
|
||||
#define THM_TMON1_RDIR12_DATA__VALID__SHIFT 0x0000000b
|
||||
#define THM_TMON1_RDIR12_DATA__Z_MASK 0x000007ffL
|
||||
#define THM_TMON1_RDIR12_DATA__Z__SHIFT 0x00000000
|
||||
#define THM_TMON1_RDIR13_DATA__TEMP_MASK 0x00fff000L
|
||||
#define THM_TMON1_RDIR13_DATA__TEMP__SHIFT 0x0000000c
|
||||
#define THM_TMON1_RDIR13_DATA__VALID_MASK 0x00000800L
|
||||
#define THM_TMON1_RDIR13_DATA__VALID__SHIFT 0x0000000b
|
||||
#define THM_TMON1_RDIR13_DATA__Z_MASK 0x000007ffL
|
||||
#define THM_TMON1_RDIR13_DATA__Z__SHIFT 0x00000000
|
||||
#define THM_TMON1_RDIR14_DATA__TEMP_MASK 0x00fff000L
|
||||
#define THM_TMON1_RDIR14_DATA__TEMP__SHIFT 0x0000000c
|
||||
#define THM_TMON1_RDIR14_DATA__VALID_MASK 0x00000800L
|
||||
#define THM_TMON1_RDIR14_DATA__VALID__SHIFT 0x0000000b
|
||||
#define THM_TMON1_RDIR14_DATA__Z_MASK 0x000007ffL
|
||||
#define THM_TMON1_RDIR14_DATA__Z__SHIFT 0x00000000
|
||||
#define THM_TMON1_RDIR15_DATA__TEMP_MASK 0x00fff000L
|
||||
#define THM_TMON1_RDIR15_DATA__TEMP__SHIFT 0x0000000c
|
||||
#define THM_TMON1_RDIR15_DATA__VALID_MASK 0x00000800L
|
||||
#define THM_TMON1_RDIR15_DATA__VALID__SHIFT 0x0000000b
|
||||
#define THM_TMON1_RDIR15_DATA__Z_MASK 0x000007ffL
|
||||
#define THM_TMON1_RDIR15_DATA__Z__SHIFT 0x00000000
|
||||
#define THM_TMON1_RDIR1_DATA__TEMP_MASK 0x00fff000L
|
||||
#define THM_TMON1_RDIR1_DATA__TEMP__SHIFT 0x0000000c
|
||||
#define THM_TMON1_RDIR1_DATA__VALID_MASK 0x00000800L
|
||||
#define THM_TMON1_RDIR1_DATA__VALID__SHIFT 0x0000000b
|
||||
#define THM_TMON1_RDIR1_DATA__Z_MASK 0x000007ffL
|
||||
#define THM_TMON1_RDIR1_DATA__Z__SHIFT 0x00000000
|
||||
#define THM_TMON1_RDIR2_DATA__TEMP_MASK 0x00fff000L
|
||||
#define THM_TMON1_RDIR2_DATA__TEMP__SHIFT 0x0000000c
|
||||
#define THM_TMON1_RDIR2_DATA__VALID_MASK 0x00000800L
|
||||
#define THM_TMON1_RDIR2_DATA__VALID__SHIFT 0x0000000b
|
||||
#define THM_TMON1_RDIR2_DATA__Z_MASK 0x000007ffL
|
||||
#define THM_TMON1_RDIR2_DATA__Z__SHIFT 0x00000000
|
||||
#define THM_TMON1_RDIR3_DATA__TEMP_MASK 0x00fff000L
|
||||
#define THM_TMON1_RDIR3_DATA__TEMP__SHIFT 0x0000000c
|
||||
#define THM_TMON1_RDIR3_DATA__VALID_MASK 0x00000800L
|
||||
#define THM_TMON1_RDIR3_DATA__VALID__SHIFT 0x0000000b
|
||||
#define THM_TMON1_RDIR3_DATA__Z_MASK 0x000007ffL
|
||||
#define THM_TMON1_RDIR3_DATA__Z__SHIFT 0x00000000
|
||||
#define THM_TMON1_RDIR4_DATA__TEMP_MASK 0x00fff000L
|
||||
#define THM_TMON1_RDIR4_DATA__TEMP__SHIFT 0x0000000c
|
||||
#define THM_TMON1_RDIR4_DATA__VALID_MASK 0x00000800L
|
||||
#define THM_TMON1_RDIR4_DATA__VALID__SHIFT 0x0000000b
|
||||
#define THM_TMON1_RDIR4_DATA__Z_MASK 0x000007ffL
|
||||
#define THM_TMON1_RDIR4_DATA__Z__SHIFT 0x00000000
|
||||
#define THM_TMON1_RDIR5_DATA__TEMP_MASK 0x00fff000L
|
||||
#define THM_TMON1_RDIR5_DATA__TEMP__SHIFT 0x0000000c
|
||||
#define THM_TMON1_RDIR5_DATA__VALID_MASK 0x00000800L
|
||||
#define THM_TMON1_RDIR5_DATA__VALID__SHIFT 0x0000000b
|
||||
#define THM_TMON1_RDIR5_DATA__Z_MASK 0x000007ffL
|
||||
#define THM_TMON1_RDIR5_DATA__Z__SHIFT 0x00000000
|
||||
#define THM_TMON1_RDIR6_DATA__TEMP_MASK 0x00fff000L
|
||||
#define THM_TMON1_RDIR6_DATA__TEMP__SHIFT 0x0000000c
|
||||
#define THM_TMON1_RDIR6_DATA__VALID_MASK 0x00000800L
|
||||
#define THM_TMON1_RDIR6_DATA__VALID__SHIFT 0x0000000b
|
||||
#define THM_TMON1_RDIR6_DATA__Z_MASK 0x000007ffL
|
||||
#define THM_TMON1_RDIR6_DATA__Z__SHIFT 0x00000000
|
||||
#define THM_TMON1_RDIR7_DATA__TEMP_MASK 0x00fff000L
|
||||
#define THM_TMON1_RDIR7_DATA__TEMP__SHIFT 0x0000000c
|
||||
#define THM_TMON1_RDIR7_DATA__VALID_MASK 0x00000800L
|
||||
#define THM_TMON1_RDIR7_DATA__VALID__SHIFT 0x0000000b
|
||||
#define THM_TMON1_RDIR7_DATA__Z_MASK 0x000007ffL
|
||||
#define THM_TMON1_RDIR7_DATA__Z__SHIFT 0x00000000
|
||||
#define THM_TMON1_RDIR8_DATA__TEMP_MASK 0x00fff000L
|
||||
#define THM_TMON1_RDIR8_DATA__TEMP__SHIFT 0x0000000c
|
||||
#define THM_TMON1_RDIR8_DATA__VALID_MASK 0x00000800L
|
||||
#define THM_TMON1_RDIR8_DATA__VALID__SHIFT 0x0000000b
|
||||
#define THM_TMON1_RDIR8_DATA__Z_MASK 0x000007ffL
|
||||
#define THM_TMON1_RDIR8_DATA__Z__SHIFT 0x00000000
|
||||
#define THM_TMON1_RDIR9_DATA__TEMP_MASK 0x00fff000L
|
||||
#define THM_TMON1_RDIR9_DATA__TEMP__SHIFT 0x0000000c
|
||||
#define THM_TMON1_RDIR9_DATA__VALID_MASK 0x00000800L
|
||||
#define THM_TMON1_RDIR9_DATA__VALID__SHIFT 0x0000000b
|
||||
#define THM_TMON1_RDIR9_DATA__Z_MASK 0x000007ffL
|
||||
#define THM_TMON1_RDIR9_DATA__Z__SHIFT 0x00000000
|
||||
|
||||
#endif
|
|
@ -0,0 +1,96 @@
|
|||
/*
|
||||
*
|
||||
* Copyright (C) 2016 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included
|
||||
* in all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
||||
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
|
||||
* AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef UVD_4_0_D_H
|
||||
#define UVD_4_0_D_H
|
||||
|
||||
#define ixUVD_CGC_CTRL2 0x00C1
|
||||
#define ixUVD_CGC_MEM_CTRL 0x00C0
|
||||
#define ixUVD_LMI_ADDR_EXT2 0x00AB
|
||||
#define ixUVD_LMI_CACHE_CTRL 0x009B
|
||||
#define ixUVD_LMI_SWAP_CNTL2 0x00AA
|
||||
#define ixUVD_MIF_CURR_ADDR_CONFIG 0x0048
|
||||
#define ixUVD_MIF_RECON1_ADDR_CONFIG 0x0114
|
||||
#define ixUVD_MIF_REF_ADDR_CONFIG 0x004C
|
||||
#define mmUVD_CGC_CTRL 0x3D2C
|
||||
#define mmUVD_CGC_GATE 0x3D2A
|
||||
#define mmUVD_CGC_STATUS 0x3D2B
|
||||
#define mmUVD_CGC_UDEC_STATUS 0x3D2D
|
||||
#define mmUVD_CONTEXT_ID 0x3DBD
|
||||
#define mmUVD_CTX_DATA 0x3D29
|
||||
#define mmUVD_CTX_INDEX 0x3D28
|
||||
#define mmUVD_ENGINE_CNTL 0x3BC6
|
||||
#define mmUVD_GPCOM_VCPU_CMD 0x3BC3
|
||||
#define mmUVD_GPCOM_VCPU_DATA0 0x3BC4
|
||||
#define mmUVD_GPCOM_VCPU_DATA1 0x3BC5
|
||||
#define mmUVD_GP_SCRATCH4 0x3D38
|
||||
#define mmUVD_LMI_ADDR_EXT 0x3D65
|
||||
#define mmUVD_LMI_CTRL 0x3D66
|
||||
#define mmUVD_LMI_CTRL2 0x3D3D
|
||||
#define mmUVD_LMI_EXT40_ADDR 0x3D26
|
||||
#define mmUVD_LMI_STATUS 0x3D67
|
||||
#define mmUVD_LMI_SWAP_CNTL 0x3D6D
|
||||
#define mmUVD_MASTINT_EN 0x3D40
|
||||
#define mmUVD_MPC_CNTL 0x3D77
|
||||
#define mmUVD_MPC_SET_ALU 0x3D7E
|
||||
#define mmUVD_MPC_SET_MUX 0x3D7D
|
||||
#define mmUVD_MPC_SET_MUXA0 0x3D79
|
||||
#define mmUVD_MPC_SET_MUXA1 0x3D7A
|
||||
#define mmUVD_MPC_SET_MUXB0 0x3D7B
|
||||
#define mmUVD_MPC_SET_MUXB1 0x3D7C
|
||||
#define mmUVD_MP_SWAP_CNTL 0x3D6F
|
||||
#define mmUVD_NO_OP 0x3BFF
|
||||
#define mmUVD_PGFSM_CONFIG 0x38F8
|
||||
#define mmUVD_PGFSM_READ_TILE1 0x38FA
|
||||
#define mmUVD_PGFSM_READ_TILE2 0x38FB
|
||||
#define mmUVD_POWER_STATUS 0x38FC
|
||||
#define mmUVD_RBC_IB_BASE 0x3DA1
|
||||
#define mmUVD_RBC_IB_SIZE 0x3DA2
|
||||
#define mmUVD_RBC_IB_SIZE_UPDATE 0x3DF1
|
||||
#define mmUVD_RBC_RB_BASE 0x3DA3
|
||||
#define mmUVD_RBC_RB_CNTL 0x3DA9
|
||||
#define mmUVD_RBC_RB_RPTR 0x3DA4
|
||||
#define mmUVD_RBC_RB_RPTR_ADDR 0x3DAA
|
||||
#define mmUVD_RBC_RB_WPTR 0x3DA5
|
||||
#define mmUVD_RBC_RB_WPTR_CNTL 0x3DA6
|
||||
#define mmUVD_SEMA_ADDR_HIGH 0x3BC1
|
||||
#define mmUVD_SEMA_ADDR_LOW 0x3BC0
|
||||
#define mmUVD_SEMA_CMD 0x3BC2
|
||||
#define mmUVD_SEMA_CNTL 0x3D00
|
||||
#define mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL 0x3DB3
|
||||
#define mmUVD_SEMA_TIMEOUT_STATUS 0x3DB0
|
||||
#define mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL 0x3DB2
|
||||
#define mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL 0x3DB1
|
||||
#define mmUVD_SOFT_RESET 0x3DA0
|
||||
#define mmUVD_STATUS 0x3DAF
|
||||
#define mmUVD_UDEC_ADDR_CONFIG 0x3BD3
|
||||
#define mmUVD_UDEC_DB_ADDR_CONFIG 0x3BD4
|
||||
#define mmUVD_UDEC_DBW_ADDR_CONFIG 0x3BD5
|
||||
#define mmUVD_VCPU_CACHE_OFFSET0 0x3D36
|
||||
#define mmUVD_VCPU_CACHE_OFFSET1 0x3D38
|
||||
#define mmUVD_VCPU_CACHE_OFFSET2 0x3D3A
|
||||
#define mmUVD_VCPU_CACHE_SIZE0 0x3D37
|
||||
#define mmUVD_VCPU_CACHE_SIZE1 0x3D39
|
||||
#define mmUVD_VCPU_CACHE_SIZE2 0x3D3B
|
||||
#define mmUVD_VCPU_CNTL 0x3D98
|
||||
|
||||
#endif
|
|
@ -0,0 +1,795 @@
|
|||
/*
|
||||
*
|
||||
* Copyright (C) 2016 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included
|
||||
* in all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
||||
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
|
||||
* AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef UVD_4_0_SH_MASK_H
|
||||
#define UVD_4_0_SH_MASK_H
|
||||
|
||||
#define UVD_CGC_CTRL2__DYN_OCLK_RAMP_EN_MASK 0x00000001L
|
||||
#define UVD_CGC_CTRL2__DYN_OCLK_RAMP_EN__SHIFT 0x00000000
|
||||
#define UVD_CGC_CTRL2__DYN_RCLK_RAMP_EN_MASK 0x00000002L
|
||||
#define UVD_CGC_CTRL2__DYN_RCLK_RAMP_EN__SHIFT 0x00000001
|
||||
#define UVD_CGC_CTRL2__GATER_DIV_ID_MASK 0x0000001cL
|
||||
#define UVD_CGC_CTRL2__GATER_DIV_ID__SHIFT 0x00000002
|
||||
#define UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK 0x0000003cL
|
||||
#define UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT 0x00000002
|
||||
#define UVD_CGC_CTRL__CLK_OFF_DELAY_MASK 0x000007c0L
|
||||
#define UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT 0x00000006
|
||||
#define UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK 0x00000001L
|
||||
#define UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT 0x00000000
|
||||
#define UVD_CGC_CTRL__IDCT_MODE_MASK 0x00800000L
|
||||
#define UVD_CGC_CTRL__IDCT_MODE__SHIFT 0x00000017
|
||||
#define UVD_CGC_CTRL__LBSI_MODE_MASK 0x04000000L
|
||||
#define UVD_CGC_CTRL__LBSI_MODE__SHIFT 0x0000001a
|
||||
#define UVD_CGC_CTRL__LMI_MC_MODE_MASK 0x00200000L
|
||||
#define UVD_CGC_CTRL__LMI_MC_MODE__SHIFT 0x00000015
|
||||
#define UVD_CGC_CTRL__LMI_UMC_MODE_MASK 0x00400000L
|
||||
#define UVD_CGC_CTRL__LMI_UMC_MODE__SHIFT 0x00000016
|
||||
#define UVD_CGC_CTRL__LRBBM_MODE_MASK 0x08000000L
|
||||
#define UVD_CGC_CTRL__LRBBM_MODE__SHIFT 0x0000001b
|
||||
#define UVD_CGC_CTRL__MPC_MODE_MASK 0x02000000L
|
||||
#define UVD_CGC_CTRL__MPC_MODE__SHIFT 0x00000019
|
||||
#define UVD_CGC_CTRL__MPEG2_MODE_MASK 0x00040000L
|
||||
#define UVD_CGC_CTRL__MPEG2_MODE__SHIFT 0x00000012
|
||||
#define UVD_CGC_CTRL__MPRD_MODE_MASK 0x01000000L
|
||||
#define UVD_CGC_CTRL__MPRD_MODE__SHIFT 0x00000018
|
||||
#define UVD_CGC_CTRL__RBC_MODE_MASK 0x00100000L
|
||||
#define UVD_CGC_CTRL__RBC_MODE__SHIFT 0x00000014
|
||||
#define UVD_CGC_CTRL__REGS_MODE_MASK 0x00080000L
|
||||
#define UVD_CGC_CTRL__REGS_MODE__SHIFT 0x00000013
|
||||
#define UVD_CGC_CTRL__SCPU_MODE_MASK 0x40000000L
|
||||
#define UVD_CGC_CTRL__SCPU_MODE__SHIFT 0x0000001e
|
||||
#define UVD_CGC_CTRL__SYS_MODE_MASK 0x00010000L
|
||||
#define UVD_CGC_CTRL__SYS_MODE__SHIFT 0x00000010
|
||||
#define UVD_CGC_CTRL__UDEC_CM_MODE_MASK 0x00001000L
|
||||
#define UVD_CGC_CTRL__UDEC_CM_MODE__SHIFT 0x0000000c
|
||||
#define UVD_CGC_CTRL__UDEC_DB_MODE_MASK 0x00004000L
|
||||
#define UVD_CGC_CTRL__UDEC_DB_MODE__SHIFT 0x0000000e
|
||||
#define UVD_CGC_CTRL__UDEC_IT_MODE_MASK 0x00002000L
|
||||
#define UVD_CGC_CTRL__UDEC_IT_MODE__SHIFT 0x0000000d
|
||||
#define UVD_CGC_CTRL__UDEC_MODE_MASK 0x00020000L
|
||||
#define UVD_CGC_CTRL__UDEC_MODE__SHIFT 0x00000011
|
||||
#define UVD_CGC_CTRL__UDEC_MP_MODE_MASK 0x00008000L
|
||||
#define UVD_CGC_CTRL__UDEC_MP_MODE__SHIFT 0x0000000f
|
||||
#define UVD_CGC_CTRL__UDEC_RE_MODE_MASK 0x00000800L
|
||||
#define UVD_CGC_CTRL__UDEC_RE_MODE__SHIFT 0x0000000b
|
||||
#define UVD_CGC_CTRL__VCPU_MODE_MASK 0x20000000L
|
||||
#define UVD_CGC_CTRL__VCPU_MODE__SHIFT 0x0000001d
|
||||
#define UVD_CGC_CTRL__WCB_MODE_MASK 0x10000000L
|
||||
#define UVD_CGC_CTRL__WCB_MODE__SHIFT 0x0000001c
|
||||
#define UVD_CGC_GATE__IDCT_MASK 0x00000080L
|
||||
#define UVD_CGC_GATE__IDCT__SHIFT 0x00000007
|
||||
#define UVD_CGC_GATE__LBSI_MASK 0x00000400L
|
||||
#define UVD_CGC_GATE__LBSI__SHIFT 0x0000000a
|
||||
#define UVD_CGC_GATE__LMI_MC_MASK 0x00000020L
|
||||
#define UVD_CGC_GATE__LMI_MC__SHIFT 0x00000005
|
||||
#define UVD_CGC_GATE__LMI_UMC_MASK 0x00000040L
|
||||
#define UVD_CGC_GATE__LMI_UMC__SHIFT 0x00000006
|
||||
#define UVD_CGC_GATE__LRBBM_MASK 0x00000800L
|
||||
#define UVD_CGC_GATE__LRBBM__SHIFT 0x0000000b
|
||||
#define UVD_CGC_GATE__MPC_MASK 0x00000200L
|
||||
#define UVD_CGC_GATE__MPC__SHIFT 0x00000009
|
||||
#define UVD_CGC_GATE__MPEG2_MASK 0x00000004L
|
||||
#define UVD_CGC_GATE__MPEG2__SHIFT 0x00000002
|
||||
#define UVD_CGC_GATE__MPRD_MASK 0x00000100L
|
||||
#define UVD_CGC_GATE__MPRD__SHIFT 0x00000008
|
||||
#define UVD_CGC_GATE__RBC_MASK 0x00000010L
|
||||
#define UVD_CGC_GATE__RBC__SHIFT 0x00000004
|
||||
#define UVD_CGC_GATE__REGS_MASK 0x00000008L
|
||||
#define UVD_CGC_GATE__REGS__SHIFT 0x00000003
|
||||
#define UVD_CGC_GATE__SCPU_MASK 0x00080000L
|
||||
#define UVD_CGC_GATE__SCPU__SHIFT 0x00000013
|
||||
#define UVD_CGC_GATE__SYS_MASK 0x00000001L
|
||||
#define UVD_CGC_GATE__SYS__SHIFT 0x00000000
|
||||
#define UVD_CGC_GATE__UDEC_CM_MASK 0x00002000L
|
||||
#define UVD_CGC_GATE__UDEC_CM__SHIFT 0x0000000d
|
||||
#define UVD_CGC_GATE__UDEC_DB_MASK 0x00008000L
|
||||
#define UVD_CGC_GATE__UDEC_DB__SHIFT 0x0000000f
|
||||
#define UVD_CGC_GATE__UDEC_IT_MASK 0x00004000L
|
||||
#define UVD_CGC_GATE__UDEC_IT__SHIFT 0x0000000e
|
||||
#define UVD_CGC_GATE__UDEC_MASK 0x00000002L
|
||||
#define UVD_CGC_GATE__UDEC_MP_MASK 0x00010000L
|
||||
#define UVD_CGC_GATE__UDEC_MP__SHIFT 0x00000010
|
||||
#define UVD_CGC_GATE__UDEC_RE_MASK 0x00001000L
|
||||
#define UVD_CGC_GATE__UDEC_RE__SHIFT 0x0000000c
|
||||
#define UVD_CGC_GATE__UDEC__SHIFT 0x00000001
|
||||
#define UVD_CGC_GATE__VCPU_MASK 0x00040000L
|
||||
#define UVD_CGC_GATE__VCPU__SHIFT 0x00000012
|
||||
#define UVD_CGC_GATE__WCB_MASK 0x00020000L
|
||||
#define UVD_CGC_GATE__WCB__SHIFT 0x00000011
|
||||
#define UVD_CGC_MEM_CTRL__LCM_LS_EN_MASK 0x00002000L
|
||||
#define UVD_CGC_MEM_CTRL__LCM_LS_EN__SHIFT 0x0000000d
|
||||
#define UVD_CGC_MEM_CTRL__LMI_MC_LS_EN_MASK 0x00000001L
|
||||
#define UVD_CGC_MEM_CTRL__LMI_MC_LS_EN__SHIFT 0x00000000
|
||||
#define UVD_CGC_MEM_CTRL__LS_CLEAR_DELAY_MASK 0x00f00000L
|
||||
#define UVD_CGC_MEM_CTRL__LS_CLEAR_DELAY__SHIFT 0x00000014
|
||||
#define UVD_CGC_MEM_CTRL__LS_SET_DELAY_MASK 0x000f0000L
|
||||
#define UVD_CGC_MEM_CTRL__LS_SET_DELAY__SHIFT 0x00000010
|
||||
#define UVD_CGC_MEM_CTRL__MIF_LS_EN_MASK 0x00001000L
|
||||
#define UVD_CGC_MEM_CTRL__MIF_LS_EN__SHIFT 0x0000000c
|
||||
#define UVD_CGC_MEM_CTRL__MPC_LS_EN_MASK 0x00000002L
|
||||
#define UVD_CGC_MEM_CTRL__MPC_LS_EN__SHIFT 0x00000001
|
||||
#define UVD_CGC_MEM_CTRL__MPRD_LS_EN_MASK 0x00000004L
|
||||
#define UVD_CGC_MEM_CTRL__MPRD_LS_EN__SHIFT 0x00000002
|
||||
#define UVD_CGC_MEM_CTRL__SCPU_LS_EN_MASK 0x00000800L
|
||||
#define UVD_CGC_MEM_CTRL__SCPU_LS_EN__SHIFT 0x0000000b
|
||||
#define UVD_CGC_MEM_CTRL__SYS_LS_EN_MASK 0x00000200L
|
||||
#define UVD_CGC_MEM_CTRL__SYS_LS_EN__SHIFT 0x00000009
|
||||
#define UVD_CGC_MEM_CTRL__UDEC_CM_LS_EN_MASK 0x00000020L
|
||||
#define UVD_CGC_MEM_CTRL__UDEC_CM_LS_EN__SHIFT 0x00000005
|
||||
#define UVD_CGC_MEM_CTRL__UDEC_DB_LS_EN_MASK 0x00000080L
|
||||
#define UVD_CGC_MEM_CTRL__UDEC_DB_LS_EN__SHIFT 0x00000007
|
||||
#define UVD_CGC_MEM_CTRL__UDEC_IT_LS_EN_MASK 0x00000040L
|
||||
#define UVD_CGC_MEM_CTRL__UDEC_IT_LS_EN__SHIFT 0x00000006
|
||||
#define UVD_CGC_MEM_CTRL__UDEC_MP_LS_EN_MASK 0x00000100L
|
||||
#define UVD_CGC_MEM_CTRL__UDEC_MP_LS_EN__SHIFT 0x00000008
|
||||
#define UVD_CGC_MEM_CTRL__UDEC_RE_LS_EN_MASK 0x00000010L
|
||||
#define UVD_CGC_MEM_CTRL__UDEC_RE_LS_EN__SHIFT 0x00000004
|
||||
#define UVD_CGC_MEM_CTRL__VCPU_LS_EN_MASK 0x00000400L
|
||||
#define UVD_CGC_MEM_CTRL__VCPU_LS_EN__SHIFT 0x0000000a
|
||||
#define UVD_CGC_MEM_CTRL__WCB_LS_EN_MASK 0x00000008L
|
||||
#define UVD_CGC_MEM_CTRL__WCB_LS_EN__SHIFT 0x00000003
|
||||
#define UVD_CGC_STATUS__IDCT_SCLK_MASK 0x00004000L
|
||||
#define UVD_CGC_STATUS__IDCT_SCLK__SHIFT 0x0000000e
|
||||
#define UVD_CGC_STATUS__IDCT_VCLK_MASK 0x00008000L
|
||||
#define UVD_CGC_STATUS__IDCT_VCLK__SHIFT 0x0000000f
|
||||
#define UVD_CGC_STATUS__LBSI_SCLK_MASK 0x00200000L
|
||||
#define UVD_CGC_STATUS__LBSI_SCLK__SHIFT 0x00000015
|
||||
#define UVD_CGC_STATUS__LBSI_VCLK_MASK 0x00400000L
|
||||
#define UVD_CGC_STATUS__LBSI_VCLK__SHIFT 0x00000016
|
||||
#define UVD_CGC_STATUS__LMI_MC_SCLK_MASK 0x00001000L
|
||||
#define UVD_CGC_STATUS__LMI_MC_SCLK__SHIFT 0x0000000c
|
||||
#define UVD_CGC_STATUS__LMI_UMC_SCLK_MASK 0x00002000L
|
||||
#define UVD_CGC_STATUS__LMI_UMC_SCLK__SHIFT 0x0000000d
|
||||
#define UVD_CGC_STATUS__LRBBM_SCLK_MASK 0x00800000L
|
||||
#define UVD_CGC_STATUS__LRBBM_SCLK__SHIFT 0x00000017
|
||||
#define UVD_CGC_STATUS__MPC_DCLK_MASK 0x00100000L
|
||||
#define UVD_CGC_STATUS__MPC_DCLK__SHIFT 0x00000014
|
||||
#define UVD_CGC_STATUS__MPC_SCLK_MASK 0x00080000L
|
||||
#define UVD_CGC_STATUS__MPC_SCLK__SHIFT 0x00000013
|
||||
#define UVD_CGC_STATUS__MPEG2_DCLK_MASK 0x00000080L
|
||||
#define UVD_CGC_STATUS__MPEG2_DCLK__SHIFT 0x00000007
|
||||
#define UVD_CGC_STATUS__MPEG2_SCLK_MASK 0x00000040L
|
||||
#define UVD_CGC_STATUS__MPEG2_SCLK__SHIFT 0x00000006
|
||||
#define UVD_CGC_STATUS__MPEG2_VCLK_MASK 0x00000100L
|
||||
#define UVD_CGC_STATUS__MPEG2_VCLK__SHIFT 0x00000008
|
||||
#define UVD_CGC_STATUS__MPRD_DCLK_MASK 0x00020000L
|
||||
#define UVD_CGC_STATUS__MPRD_DCLK__SHIFT 0x00000011
|
||||
#define UVD_CGC_STATUS__MPRD_SCLK_MASK 0x00010000L
|
||||
#define UVD_CGC_STATUS__MPRD_SCLK__SHIFT 0x00000010
|
||||
#define UVD_CGC_STATUS__MPRD_VCLK_MASK 0x00040000L
|
||||
#define UVD_CGC_STATUS__MPRD_VCLK__SHIFT 0x00000012
|
||||
#define UVD_CGC_STATUS__RBC_SCLK_MASK 0x00000800L
|
||||
#define UVD_CGC_STATUS__RBC_SCLK__SHIFT 0x0000000b
|
||||
#define UVD_CGC_STATUS__REGS_SCLK_MASK 0x00000200L
|
||||
#define UVD_CGC_STATUS__REGS_SCLK__SHIFT 0x00000009
|
||||
#define UVD_CGC_STATUS__REGS_VCLK_MASK 0x00000400L
|
||||
#define UVD_CGC_STATUS__REGS_VCLK__SHIFT 0x0000000a
|
||||
#define UVD_CGC_STATUS__SCPU_SCLK_MASK 0x08000000L
|
||||
#define UVD_CGC_STATUS__SCPU_SCLK__SHIFT 0x0000001b
|
||||
#define UVD_CGC_STATUS__SCPU_VCLK_MASK 0x10000000L
|
||||
#define UVD_CGC_STATUS__SCPU_VCLK__SHIFT 0x0000001c
|
||||
#define UVD_CGC_STATUS__SYS_DCLK_MASK 0x00000002L
|
||||
#define UVD_CGC_STATUS__SYS_DCLK__SHIFT 0x00000001
|
||||
#define UVD_CGC_STATUS__SYS_SCLK_MASK 0x00000001L
|
||||
#define UVD_CGC_STATUS__SYS_SCLK__SHIFT 0x00000000
|
||||
#define UVD_CGC_STATUS__SYS_VCLK_MASK 0x00000004L
|
||||
#define UVD_CGC_STATUS__SYS_VCLK__SHIFT 0x00000002
|
||||
#define UVD_CGC_STATUS__UDEC_DCLK_MASK 0x00000010L
|
||||
#define UVD_CGC_STATUS__UDEC_DCLK__SHIFT 0x00000004
|
||||
#define UVD_CGC_STATUS__UDEC_SCLK_MASK 0x00000008L
|
||||
#define UVD_CGC_STATUS__UDEC_SCLK__SHIFT 0x00000003
|
||||
#define UVD_CGC_STATUS__UDEC_VCLK_MASK 0x00000020L
|
||||
#define UVD_CGC_STATUS__UDEC_VCLK__SHIFT 0x00000005
|
||||
#define UVD_CGC_STATUS__VCPU_SCLK_MASK 0x02000000L
|
||||
#define UVD_CGC_STATUS__VCPU_SCLK__SHIFT 0x00000019
|
||||
#define UVD_CGC_STATUS__VCPU_VCLK_MASK 0x04000000L
|
||||
#define UVD_CGC_STATUS__VCPU_VCLK__SHIFT 0x0000001a
|
||||
#define UVD_CGC_STATUS__WCB_SCLK_MASK 0x01000000L
|
||||
#define UVD_CGC_STATUS__WCB_SCLK__SHIFT 0x00000018
|
||||
#define UVD_CGC_UDEC_STATUS__CM_DCLK_MASK 0x00000010L
|
||||
#define UVD_CGC_UDEC_STATUS__CM_DCLK__SHIFT 0x00000004
|
||||
#define UVD_CGC_UDEC_STATUS__CM_SCLK_MASK 0x00000008L
|
||||
#define UVD_CGC_UDEC_STATUS__CM_SCLK__SHIFT 0x00000003
|
||||
#define UVD_CGC_UDEC_STATUS__CM_VCLK_MASK 0x00000020L
|
||||
#define UVD_CGC_UDEC_STATUS__CM_VCLK__SHIFT 0x00000005
|
||||
#define UVD_CGC_UDEC_STATUS__DB_DCLK_MASK 0x00000400L
|
||||
#define UVD_CGC_UDEC_STATUS__DB_DCLK__SHIFT 0x0000000a
|
||||
#define UVD_CGC_UDEC_STATUS__DB_SCLK_MASK 0x00000200L
|
||||
#define UVD_CGC_UDEC_STATUS__DB_SCLK__SHIFT 0x00000009
|
||||
#define UVD_CGC_UDEC_STATUS__DB_VCLK_MASK 0x00000800L
|
||||
#define UVD_CGC_UDEC_STATUS__DB_VCLK__SHIFT 0x0000000b
|
||||
#define UVD_CGC_UDEC_STATUS__IT_DCLK_MASK 0x00000080L
|
||||
#define UVD_CGC_UDEC_STATUS__IT_DCLK__SHIFT 0x00000007
|
||||
#define UVD_CGC_UDEC_STATUS__IT_SCLK_MASK 0x00000040L
|
||||
#define UVD_CGC_UDEC_STATUS__IT_SCLK__SHIFT 0x00000006
|
||||
#define UVD_CGC_UDEC_STATUS__IT_VCLK_MASK 0x00000100L
|
||||
#define UVD_CGC_UDEC_STATUS__IT_VCLK__SHIFT 0x00000008
|
||||
#define UVD_CGC_UDEC_STATUS__MP_DCLK_MASK 0x00002000L
|
||||
#define UVD_CGC_UDEC_STATUS__MP_DCLK__SHIFT 0x0000000d
|
||||
#define UVD_CGC_UDEC_STATUS__MP_SCLK_MASK 0x00001000L
|
||||
#define UVD_CGC_UDEC_STATUS__MP_SCLK__SHIFT 0x0000000c
|
||||
#define UVD_CGC_UDEC_STATUS__MP_VCLK_MASK 0x00004000L
|
||||
#define UVD_CGC_UDEC_STATUS__MP_VCLK__SHIFT 0x0000000e
|
||||
#define UVD_CGC_UDEC_STATUS__RE_DCLK_MASK 0x00000002L
|
||||
#define UVD_CGC_UDEC_STATUS__RE_DCLK__SHIFT 0x00000001
|
||||
#define UVD_CGC_UDEC_STATUS__RE_SCLK_MASK 0x00000001L
|
||||
#define UVD_CGC_UDEC_STATUS__RE_SCLK__SHIFT 0x00000000
|
||||
#define UVD_CGC_UDEC_STATUS__RE_VCLK_MASK 0x00000004L
|
||||
#define UVD_CGC_UDEC_STATUS__RE_VCLK__SHIFT 0x00000002
|
||||
#define UVD_CONTEXT_ID__CONTEXT_ID_MASK 0xffffffffL
|
||||
#define UVD_CONTEXT_ID__CONTEXT_ID__SHIFT 0x00000000
|
||||
#define UVD_CTX_DATA__DATA_MASK 0xffffffffL
|
||||
#define UVD_CTX_DATA__DATA__SHIFT 0x00000000
|
||||
#define UVD_CTX_INDEX__INDEX_MASK 0x000001ffL
|
||||
#define UVD_CTX_INDEX__INDEX__SHIFT 0x00000000
|
||||
#define UVD_ENGINE_CNTL__ENGINE_START_MASK 0x00000001L
|
||||
#define UVD_ENGINE_CNTL__ENGINE_START_MODE_MASK 0x00000002L
|
||||
#define UVD_ENGINE_CNTL__ENGINE_START_MODE__SHIFT 0x00000001
|
||||
#define UVD_ENGINE_CNTL__ENGINE_START__SHIFT 0x00000000
|
||||
#define UVD_GPCOM_VCPU_CMD__CMD_MASK 0x7ffffffeL
|
||||
#define UVD_GPCOM_VCPU_CMD__CMD_SEND_MASK 0x00000001L
|
||||
#define UVD_GPCOM_VCPU_CMD__CMD_SEND__SHIFT 0x00000000
|
||||
#define UVD_GPCOM_VCPU_CMD__CMD__SHIFT 0x00000001
|
||||
#define UVD_GPCOM_VCPU_CMD__CMD_SOURCE_MASK 0x80000000L
|
||||
#define UVD_GPCOM_VCPU_CMD__CMD_SOURCE__SHIFT 0x0000001f
|
||||
#define UVD_GPCOM_VCPU_DATA0__DATA0_MASK 0xffffffffL
|
||||
#define UVD_GPCOM_VCPU_DATA0__DATA0__SHIFT 0x00000000
|
||||
#define UVD_GPCOM_VCPU_DATA1__DATA1_MASK 0xffffffffL
|
||||
#define UVD_GPCOM_VCPU_DATA1__DATA1__SHIFT 0x00000000
|
||||
#define UVD_LMI_ADDR_EXT2__SCPU_ADDR_EXT_MASK 0x0000000fL
|
||||
#define UVD_LMI_ADDR_EXT2__SCPU_ADDR_EXT__SHIFT 0x00000000
|
||||
#define UVD_LMI_ADDR_EXT2__SCPU_NC0_ADDR_EXT_MASK 0x00000f00L
|
||||
#define UVD_LMI_ADDR_EXT2__SCPU_NC0_ADDR_EXT__SHIFT 0x00000008
|
||||
#define UVD_LMI_ADDR_EXT2__SCPU_NC1_ADDR_EXT_MASK 0x0000f000L
|
||||
#define UVD_LMI_ADDR_EXT2__SCPU_NC1_ADDR_EXT__SHIFT 0x0000000c
|
||||
#define UVD_LMI_ADDR_EXT2__SCPU_VM_ADDR_EXT_MASK 0x000000f0L
|
||||
#define UVD_LMI_ADDR_EXT2__SCPU_VM_ADDR_EXT__SHIFT 0x00000004
|
||||
#define UVD_LMI_ADDR_EXT__CM_ADDR_EXT_MASK 0x000000f0L
|
||||
#define UVD_LMI_ADDR_EXT__CM_ADDR_EXT__SHIFT 0x00000004
|
||||
#define UVD_LMI_ADDR_EXT__IT_ADDR_EXT_MASK 0x00000f00L
|
||||
#define UVD_LMI_ADDR_EXT__IT_ADDR_EXT__SHIFT 0x00000008
|
||||
#define UVD_LMI_ADDR_EXT__MP_ADDR_EXT_MASK 0x00f00000L
|
||||
#define UVD_LMI_ADDR_EXT__MP_ADDR_EXT__SHIFT 0x00000014
|
||||
#define UVD_LMI_ADDR_EXT__RE_ADDR_EXT_MASK 0x000f0000L
|
||||
#define UVD_LMI_ADDR_EXT__RE_ADDR_EXT__SHIFT 0x00000010
|
||||
#define UVD_LMI_ADDR_EXT__VCPU_ADDR_EXT_MASK 0x0000000fL
|
||||
#define UVD_LMI_ADDR_EXT__VCPU_ADDR_EXT__SHIFT 0x00000000
|
||||
#define UVD_LMI_ADDR_EXT__VCPU_NC0_ADDR_EXT_MASK 0x0f000000L
|
||||
#define UVD_LMI_ADDR_EXT__VCPU_NC0_ADDR_EXT__SHIFT 0x00000018
|
||||
#define UVD_LMI_ADDR_EXT__VCPU_NC1_ADDR_EXT_MASK 0xf0000000L
|
||||
#define UVD_LMI_ADDR_EXT__VCPU_NC1_ADDR_EXT__SHIFT 0x0000001c
|
||||
#define UVD_LMI_ADDR_EXT__VCPU_VM_ADDR_EXT_MASK 0x0000f000L
|
||||
#define UVD_LMI_ADDR_EXT__VCPU_VM_ADDR_EXT__SHIFT 0x0000000c
|
||||
#define UVD_LMI_CACHE_CTRL__CM_EN_MASK 0x00000004L
|
||||
#define UVD_LMI_CACHE_CTRL__CM_EN__SHIFT 0x00000002
|
||||
#define UVD_LMI_CACHE_CTRL__CM_FLUSH_MASK 0x00000008L
|
||||
#define UVD_LMI_CACHE_CTRL__CM_FLUSH__SHIFT 0x00000003
|
||||
#define UVD_LMI_CACHE_CTRL__IT_EN_MASK 0x00000001L
|
||||
#define UVD_LMI_CACHE_CTRL__IT_EN__SHIFT 0x00000000
|
||||
#define UVD_LMI_CACHE_CTRL__IT_FLUSH_MASK 0x00000002L
|
||||
#define UVD_LMI_CACHE_CTRL__IT_FLUSH__SHIFT 0x00000001
|
||||
#define UVD_LMI_CACHE_CTRL__VCPU_EN_MASK 0x00000010L
|
||||
#define UVD_LMI_CACHE_CTRL__VCPU_EN__SHIFT 0x00000004
|
||||
#define UVD_LMI_CACHE_CTRL__VCPU_FLUSH_MASK 0x00000020L
|
||||
#define UVD_LMI_CACHE_CTRL__VCPU_FLUSH__SHIFT 0x00000005
|
||||
#define UVD_LMI_CTRL2__ASSERT_UMC_URGENT_MASK 0x00000004L
|
||||
#define UVD_LMI_CTRL2__ASSERT_UMC_URGENT__SHIFT 0x00000002
|
||||
#define UVD_LMI_CTRL2__DRCITF_BUBBLE_FIX_DIS_MASK 0x00000080L
|
||||
#define UVD_LMI_CTRL2__DRCITF_BUBBLE_FIX_DIS__SHIFT 0x00000007
|
||||
#define UVD_LMI_CTRL2__MASK_UMC_URGENT_MASK 0x00000008L
|
||||
#define UVD_LMI_CTRL2__MASK_UMC_URGENT__SHIFT 0x00000003
|
||||
#define UVD_LMI_CTRL2__MCIF_WR_WATERMARK_MASK 0x00000070L
|
||||
#define UVD_LMI_CTRL2__MCIF_WR_WATERMARK__SHIFT 0x00000004
|
||||
#define UVD_LMI_CTRL2__MC_READ_ID_SEL_MASK 0x00000600L
|
||||
#define UVD_LMI_CTRL2__MC_READ_ID_SEL__SHIFT 0x00000009
|
||||
#define UVD_LMI_CTRL2__MC_WRITE_ID_SEL_MASK 0x00001800L
|
||||
#define UVD_LMI_CTRL2__MC_WRITE_ID_SEL__SHIFT 0x0000000b
|
||||
#define UVD_LMI_CTRL2__SPH_DIS_MASK 0x00000001L
|
||||
#define UVD_LMI_CTRL2__SPH_DIS__SHIFT 0x00000000
|
||||
#define UVD_LMI_CTRL2__SPU_EXTRA_CID_EN_MASK 0x00008000L
|
||||
#define UVD_LMI_CTRL2__SPU_EXTRA_CID_EN__SHIFT 0x0000000f
|
||||
#define UVD_LMI_CTRL2__STALL_ARB_MASK 0x00000002L
|
||||
#define UVD_LMI_CTRL2__STALL_ARB__SHIFT 0x00000001
|
||||
#define UVD_LMI_CTRL2__STALL_ARB_UMC_MASK 0x00000100L
|
||||
#define UVD_LMI_CTRL2__STALL_ARB_UMC__SHIFT 0x00000008
|
||||
#define UVD_LMI_CTRL2__VCPU_NC0_EXT_EN_MASK 0x00002000L
|
||||
#define UVD_LMI_CTRL2__VCPU_NC0_EXT_EN__SHIFT 0x0000000d
|
||||
#define UVD_LMI_CTRL2__VCPU_NC1_EXT_EN_MASK 0x00004000L
|
||||
#define UVD_LMI_CTRL2__VCPU_NC1_EXT_EN__SHIFT 0x0000000e
|
||||
#define UVD_LMI_CTRL__ASSERT_MC_URGENT_MASK 0x00000800L
|
||||
#define UVD_LMI_CTRL__ASSERT_MC_URGENT__SHIFT 0x0000000b
|
||||
#define UVD_LMI_CTRL__CM_DATA_COHERENCY_EN_MASK 0x00400000L
|
||||
#define UVD_LMI_CTRL__CM_DATA_COHERENCY_EN__SHIFT 0x00000016
|
||||
#define UVD_LMI_CTRL__CRC_RESET_MASK 0x00004000L
|
||||
#define UVD_LMI_CTRL__CRC_RESET__SHIFT 0x0000000e
|
||||
#define UVD_LMI_CTRL__CRC_SEL_MASK 0x000f8000L
|
||||
#define UVD_LMI_CTRL__CRC_SEL__SHIFT 0x0000000f
|
||||
#define UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK 0x00002000L
|
||||
#define UVD_LMI_CTRL__DATA_COHERENCY_EN__SHIFT 0x0000000d
|
||||
#define UVD_LMI_CTRL__DB_DB_DATA_COHERENCY_EN_MASK 0x00800000L
|
||||
#define UVD_LMI_CTRL__DB_DB_DATA_COHERENCY_EN__SHIFT 0x00000017
|
||||
#define UVD_LMI_CTRL__DB_IT_DATA_COHERENCY_EN_MASK 0x01000000L
|
||||
#define UVD_LMI_CTRL__DB_IT_DATA_COHERENCY_EN__SHIFT 0x00000018
|
||||
#define UVD_LMI_CTRL__DISABLE_ON_FWV_FAIL_MASK 0x00100000L
|
||||
#define UVD_LMI_CTRL__DISABLE_ON_FWV_FAIL__SHIFT 0x00000014
|
||||
#define UVD_LMI_CTRL__IT_IT_DATA_COHERENCY_EN_MASK 0x02000000L
|
||||
#define UVD_LMI_CTRL__IT_IT_DATA_COHERENCY_EN__SHIFT 0x00000019
|
||||
#define UVD_LMI_CTRL__MASK_MC_URGENT_MASK 0x00001000L
|
||||
#define UVD_LMI_CTRL__MASK_MC_URGENT__SHIFT 0x0000000c
|
||||
#define UVD_LMI_CTRL__MIF_MIF_DATA_COHERENCY_EN_MASK 0x04000000L
|
||||
#define UVD_LMI_CTRL__MIF_MIF_DATA_COHERENCY_EN__SHIFT 0x0000001a
|
||||
#define UVD_LMI_CTRL__REQ_MODE_MASK 0x00000200L
|
||||
#define UVD_LMI_CTRL__REQ_MODE__SHIFT 0x00000009
|
||||
#define UVD_LMI_CTRL__RFU_MASK 0xf8000000L
|
||||
#define UVD_LMI_CTRL__RFU_MASK 0xfc000000L
|
||||
#define UVD_LMI_CTRL__RFU__SHIFT 0x0000001a
|
||||
#define UVD_LMI_CTRL__RFU__SHIFT 0x0000001b
|
||||
#define UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK 0x00200000L
|
||||
#define UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN__SHIFT 0x00000015
|
||||
#define UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK 0x00000100L
|
||||
#define UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN__SHIFT 0x00000008
|
||||
#define UVD_LMI_CTRL__WRITE_CLEAN_TIMER_MASK 0x000000ffL
|
||||
#define UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT 0x00000000
|
||||
#define UVD_LMI_EXT40_ADDR__ADDR_MASK 0x000000ffL
|
||||
#define UVD_LMI_EXT40_ADDR__ADDR__SHIFT 0x00000000
|
||||
#define UVD_LMI_EXT40_ADDR__INDEX_MASK 0x001f0000L
|
||||
#define UVD_LMI_EXT40_ADDR__INDEX__SHIFT 0x00000010
|
||||
#define UVD_LMI_EXT40_ADDR__WRITE_ADDR_MASK 0x80000000L
|
||||
#define UVD_LMI_EXT40_ADDR__WRITE_ADDR__SHIFT 0x0000001f
|
||||
#define UVD_LMI_STATUS__ADP_MC_READ_CLEAN_MASK 0x00001000L
|
||||
#define UVD_LMI_STATUS__ADP_MC_READ_CLEAN__SHIFT 0x0000000c
|
||||
#define UVD_LMI_STATUS__ADP_UMC_READ_CLEAN_MASK 0x00002000L
|
||||
#define UVD_LMI_STATUS__ADP_UMC_READ_CLEAN__SHIFT 0x0000000d
|
||||
#define UVD_LMI_STATUS__PENDING_UVD_MC_WRITE_MASK 0x00000080L
|
||||
#define UVD_LMI_STATUS__PENDING_UVD_MC_WRITE__SHIFT 0x00000007
|
||||
#define UVD_LMI_STATUS__READ_CLEAN_MASK 0x00000001L
|
||||
#define UVD_LMI_STATUS__READ_CLEAN_RAW_MASK 0x00000100L
|
||||
#define UVD_LMI_STATUS__READ_CLEAN_RAW__SHIFT 0x00000008
|
||||
#define UVD_LMI_STATUS__READ_CLEAN__SHIFT 0x00000000
|
||||
#define UVD_LMI_STATUS__UMC_AVP_IDLE_MASK 0x00000800L
|
||||
#define UVD_LMI_STATUS__UMC_AVP_IDLE__SHIFT 0x0000000b
|
||||
#define UVD_LMI_STATUS__UMC_READ_CLEAN_MASK 0x00000010L
|
||||
#define UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK 0x00000200L
|
||||
#define UVD_LMI_STATUS__UMC_READ_CLEAN_RAW__SHIFT 0x00000009
|
||||
#define UVD_LMI_STATUS__UMC_READ_CLEAN__SHIFT 0x00000004
|
||||
#define UVD_LMI_STATUS__UMC_UVD_IDLE_MASK 0x00000400L
|
||||
#define UVD_LMI_STATUS__UMC_UVD_IDLE__SHIFT 0x0000000a
|
||||
#define UVD_LMI_STATUS__UMC_WRITE_CLEAN_MASK 0x00000020L
|
||||
#define UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK 0x00000040L
|
||||
#define UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW__SHIFT 0x00000006
|
||||
#define UVD_LMI_STATUS__UMC_WRITE_CLEAN__SHIFT 0x00000005
|
||||
#define UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK 0x00000008L
|
||||
#define UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN__SHIFT 0x00000003
|
||||
#define UVD_LMI_STATUS__WRITE_CLEAN_MASK 0x00000002L
|
||||
#define UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK 0x00000004L
|
||||
#define UVD_LMI_STATUS__WRITE_CLEAN_RAW__SHIFT 0x00000002
|
||||
#define UVD_LMI_STATUS__WRITE_CLEAN__SHIFT 0x00000001
|
||||
#define UVD_LMI_SWAP_CNTL2__SCPU_R_MC_SWAP_MASK 0x00000003L
|
||||
#define UVD_LMI_SWAP_CNTL2__SCPU_R_MC_SWAP__SHIFT 0x00000000
|
||||
#define UVD_LMI_SWAP_CNTL2__SCPU_W_MC_SWAP_MASK 0x0000000cL
|
||||
#define UVD_LMI_SWAP_CNTL2__SCPU_W_MC_SWAP__SHIFT 0x00000002
|
||||
#define UVD_LMI_SWAP_CNTL__CM_MC_SWAP_MASK 0x00000c00L
|
||||
#define UVD_LMI_SWAP_CNTL__CM_MC_SWAP__SHIFT 0x0000000a
|
||||
#define UVD_LMI_SWAP_CNTL__CSM_MC_SWAP_MASK 0x000c0000L
|
||||
#define UVD_LMI_SWAP_CNTL__CSM_MC_SWAP__SHIFT 0x00000012
|
||||
#define UVD_LMI_SWAP_CNTL__DB_R_MC_SWAP_MASK 0x0000c000L
|
||||
#define UVD_LMI_SWAP_CNTL__DB_R_MC_SWAP__SHIFT 0x0000000e
|
||||
#define UVD_LMI_SWAP_CNTL__DB_W_MC_SWAP_MASK 0x00030000L
|
||||
#define UVD_LMI_SWAP_CNTL__DBW_MC_SWAP_MASK 0x03000000L
|
||||
#define UVD_LMI_SWAP_CNTL__DB_W_MC_SWAP__SHIFT 0x00000010
|
||||
#define UVD_LMI_SWAP_CNTL__DBW_MC_SWAP__SHIFT 0x00000018
|
||||
#define UVD_LMI_SWAP_CNTL__IB_MC_SWAP_MASK 0x0000000cL
|
||||
#define UVD_LMI_SWAP_CNTL__IB_MC_SWAP__SHIFT 0x00000002
|
||||
#define UVD_LMI_SWAP_CNTL__IT_MC_SWAP_MASK 0x00003000L
|
||||
#define UVD_LMI_SWAP_CNTL__IT_MC_SWAP__SHIFT 0x0000000c
|
||||
#define UVD_LMI_SWAP_CNTL__MP_MC_SWAP_MASK 0xc0000000L
|
||||
#define UVD_LMI_SWAP_CNTL__MP_MC_SWAP__SHIFT 0x0000001e
|
||||
#define UVD_LMI_SWAP_CNTL__MP_REF16_MC_SWAP_MASK 0x00c00000L
|
||||
#define UVD_LMI_SWAP_CNTL__MP_REF16_MC_SWAP__SHIFT 0x00000016
|
||||
#define UVD_LMI_SWAP_CNTL__RB_MC_SWAP_MASK 0x00000003L
|
||||
#define UVD_LMI_SWAP_CNTL__RB_MC_SWAP__SHIFT 0x00000000
|
||||
#define UVD_LMI_SWAP_CNTL__RB_RPTR_MC_SWAP_MASK 0x00000030L
|
||||
#define UVD_LMI_SWAP_CNTL__RB_RPTR_MC_SWAP__SHIFT 0x00000004
|
||||
#define UVD_LMI_SWAP_CNTL__RB_WR_MC_SWAP_MASK 0x0c000000L
|
||||
#define UVD_LMI_SWAP_CNTL__RB_WR_MC_SWAP__SHIFT 0x0000001a
|
||||
#define UVD_LMI_SWAP_CNTL__RE_MC_SWAP_MASK 0x30000000L
|
||||
#define UVD_LMI_SWAP_CNTL__RE_MC_SWAP__SHIFT 0x0000001c
|
||||
#define UVD_LMI_SWAP_CNTL__VCPU_R_MC_SWAP_MASK 0x000000c0L
|
||||
#define UVD_LMI_SWAP_CNTL__VCPU_R_MC_SWAP__SHIFT 0x00000006
|
||||
#define UVD_LMI_SWAP_CNTL__VCPU_W_MC_SWAP_MASK 0x00000300L
|
||||
#define UVD_LMI_SWAP_CNTL__VCPU_W_MC_SWAP__SHIFT 0x00000008
|
||||
#define UVD_MASTINT_EN__INT_OVERRUN_MASK 0x007ffff0L
|
||||
#define UVD_MASTINT_EN__INT_OVERRUN__SHIFT 0x00000004
|
||||
#define UVD_MASTINT_EN__OVERRUN_RST_MASK 0x00000001L
|
||||
#define UVD_MASTINT_EN__OVERRUN_RST__SHIFT 0x00000000
|
||||
#define UVD_MASTINT_EN__SYS_EN_MASK 0x00000004L
|
||||
#define UVD_MASTINT_EN__SYS_EN__SHIFT 0x00000002
|
||||
#define UVD_MASTINT_EN__VCPU_EN_MASK 0x00000002L
|
||||
#define UVD_MASTINT_EN__VCPU_EN__SHIFT 0x00000001
|
||||
#define UVD_MIF_CURR_ADDR_CONFIG__BANK_INTERLEAVE_SIZE_MASK 0x00000700L
|
||||
#define UVD_MIF_CURR_ADDR_CONFIG__BANK_INTERLEAVE_SIZE__SHIFT 0x00000008
|
||||
#define UVD_MIF_CURR_ADDR_CONFIG__MULTI_GPU_TILE_SIZE_MASK 0x03000000L
|
||||
#define UVD_MIF_CURR_ADDR_CONFIG__MULTI_GPU_TILE_SIZE__SHIFT 0x00000018
|
||||
#define UVD_MIF_CURR_ADDR_CONFIG__NUM_GPUS_MASK 0x00700000L
|
||||
#define UVD_MIF_CURR_ADDR_CONFIG__NUM_GPUS__SHIFT 0x00000014
|
||||
#define UVD_MIF_CURR_ADDR_CONFIG__NUM_LOWER_PIPES_MASK 0x40000000L
|
||||
#define UVD_MIF_CURR_ADDR_CONFIG__NUM_LOWER_PIPES__SHIFT 0x0000001e
|
||||
#define UVD_MIF_CURR_ADDR_CONFIG__NUM_PIPES_MASK 0x00000007L
|
||||
#define UVD_MIF_CURR_ADDR_CONFIG__NUM_PIPES__SHIFT 0x00000000
|
||||
#define UVD_MIF_CURR_ADDR_CONFIG__NUM_SHADER_ENGINES_MASK 0x00003000L
|
||||
#define UVD_MIF_CURR_ADDR_CONFIG__NUM_SHADER_ENGINES__SHIFT 0x0000000c
|
||||
#define UVD_MIF_CURR_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE_MASK 0x00000070L
|
||||
#define UVD_MIF_CURR_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE__SHIFT 0x00000004
|
||||
#define UVD_MIF_CURR_ADDR_CONFIG__ROW_SIZE_MASK 0x30000000L
|
||||
#define UVD_MIF_CURR_ADDR_CONFIG__ROW_SIZE__SHIFT 0x0000001c
|
||||
#define UVD_MIF_CURR_ADDR_CONFIG__SHADER_ENGINE_TILE_SIZE_MASK 0x00070000L
|
||||
#define UVD_MIF_CURR_ADDR_CONFIG__SHADER_ENGINE_TILE_SIZE__SHIFT 0x00000010
|
||||
#define UVD_MIF_RECON1_ADDR_CONFIG__BANK_INTERLEAVE_SIZE_MASK 0x00000700L
|
||||
#define UVD_MIF_RECON1_ADDR_CONFIG__BANK_INTERLEAVE_SIZE__SHIFT 0x00000008
|
||||
#define UVD_MIF_RECON1_ADDR_CONFIG__MULTI_GPU_TILE_SIZE_MASK 0x03000000L
|
||||
#define UVD_MIF_RECON1_ADDR_CONFIG__MULTI_GPU_TILE_SIZE__SHIFT 0x00000018
|
||||
#define UVD_MIF_RECON1_ADDR_CONFIG__NUM_GPUS_MASK 0x00700000L
|
||||
#define UVD_MIF_RECON1_ADDR_CONFIG__NUM_GPUS__SHIFT 0x00000014
|
||||
#define UVD_MIF_RECON1_ADDR_CONFIG__NUM_LOWER_PIPES_MASK 0x40000000L
|
||||
#define UVD_MIF_RECON1_ADDR_CONFIG__NUM_LOWER_PIPES__SHIFT 0x0000001e
|
||||
#define UVD_MIF_RECON1_ADDR_CONFIG__NUM_PIPES_MASK 0x00000007L
|
||||
#define UVD_MIF_RECON1_ADDR_CONFIG__NUM_PIPES__SHIFT 0x00000000
|
||||
#define UVD_MIF_RECON1_ADDR_CONFIG__NUM_SHADER_ENGINES_MASK 0x00003000L
|
||||
#define UVD_MIF_RECON1_ADDR_CONFIG__NUM_SHADER_ENGINES__SHIFT 0x0000000c
|
||||
#define UVD_MIF_RECON1_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE_MASK 0x00000070L
|
||||
#define UVD_MIF_RECON1_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE__SHIFT 0x00000004
|
||||
#define UVD_MIF_RECON1_ADDR_CONFIG__ROW_SIZE_MASK 0x30000000L
|
||||
#define UVD_MIF_RECON1_ADDR_CONFIG__ROW_SIZE__SHIFT 0x0000001c
|
||||
#define UVD_MIF_RECON1_ADDR_CONFIG__SHADER_ENGINE_TILE_SIZE_MASK 0x00070000L
|
||||
#define UVD_MIF_RECON1_ADDR_CONFIG__SHADER_ENGINE_TILE_SIZE__SHIFT 0x00000010
|
||||
#define UVD_MIF_REF_ADDR_CONFIG__BANK_INTERLEAVE_SIZE_MASK 0x00000700L
|
||||
#define UVD_MIF_REF_ADDR_CONFIG__BANK_INTERLEAVE_SIZE__SHIFT 0x00000008
|
||||
#define UVD_MIF_REF_ADDR_CONFIG__MULTI_GPU_TILE_SIZE_MASK 0x03000000L
|
||||
#define UVD_MIF_REF_ADDR_CONFIG__MULTI_GPU_TILE_SIZE__SHIFT 0x00000018
|
||||
#define UVD_MIF_REF_ADDR_CONFIG__NUM_GPUS_MASK 0x00700000L
|
||||
#define UVD_MIF_REF_ADDR_CONFIG__NUM_GPUS__SHIFT 0x00000014
|
||||
#define UVD_MIF_REF_ADDR_CONFIG__NUM_LOWER_PIPES_MASK 0x40000000L
|
||||
#define UVD_MIF_REF_ADDR_CONFIG__NUM_LOWER_PIPES__SHIFT 0x0000001e
|
||||
#define UVD_MIF_REF_ADDR_CONFIG__NUM_PIPES_MASK 0x00000007L
|
||||
#define UVD_MIF_REF_ADDR_CONFIG__NUM_PIPES__SHIFT 0x00000000
|
||||
#define UVD_MIF_REF_ADDR_CONFIG__NUM_SHADER_ENGINES_MASK 0x00003000L
|
||||
#define UVD_MIF_REF_ADDR_CONFIG__NUM_SHADER_ENGINES__SHIFT 0x0000000c
|
||||
#define UVD_MIF_REF_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE_MASK 0x00000070L
|
||||
#define UVD_MIF_REF_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE__SHIFT 0x00000004
|
||||
#define UVD_MIF_REF_ADDR_CONFIG__ROW_SIZE_MASK 0x30000000L
|
||||
#define UVD_MIF_REF_ADDR_CONFIG__ROW_SIZE__SHIFT 0x0000001c
|
||||
#define UVD_MIF_REF_ADDR_CONFIG__SHADER_ENGINE_TILE_SIZE_MASK 0x00070000L
|
||||
#define UVD_MIF_REF_ADDR_CONFIG__SHADER_ENGINE_TILE_SIZE__SHIFT 0x00000010
|
||||
#define UVD_MPC_CNTL__AVE_WEIGHT_MASK 0x00030000L
|
||||
#define UVD_MPC_CNTL__AVE_WEIGHT__SHIFT 0x00000010
|
||||
#define UVD_MPC_CNTL__DBG_MUX_MASK 0x00000700L
|
||||
#define UVD_MPC_CNTL__DBG_MUX__SHIFT 0x00000008
|
||||
#define UVD_MPC_CNTL__PERF_RST_MASK 0x00000040L
|
||||
#define UVD_MPC_CNTL__PERF_RST__SHIFT 0x00000006
|
||||
#define UVD_MPC_CNTL__REPLACEMENT_MODE_MASK 0x00000038L
|
||||
#define UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT 0x00000003
|
||||
#define UVD_MPC_CNTL__URGENT_EN_MASK 0x00040000L
|
||||
#define UVD_MPC_CNTL__URGENT_EN__SHIFT 0x00000012
|
||||
#define UVD_MPC_SET_ALU__FUNCT_MASK 0x00000007L
|
||||
#define UVD_MPC_SET_ALU__FUNCT__SHIFT 0x00000000
|
||||
#define UVD_MPC_SET_ALU__OPERAND_MASK 0x00000ff0L
|
||||
#define UVD_MPC_SET_ALU__OPERAND__SHIFT 0x00000004
|
||||
#define UVD_MPC_SET_MUXA0__VARA_0_MASK 0x0000003fL
|
||||
#define UVD_MPC_SET_MUXA0__VARA_0__SHIFT 0x00000000
|
||||
#define UVD_MPC_SET_MUXA0__VARA_1_MASK 0x00000fc0L
|
||||
#define UVD_MPC_SET_MUXA0__VARA_1__SHIFT 0x00000006
|
||||
#define UVD_MPC_SET_MUXA0__VARA_2_MASK 0x0003f000L
|
||||
#define UVD_MPC_SET_MUXA0__VARA_2__SHIFT 0x0000000c
|
||||
#define UVD_MPC_SET_MUXA0__VARA_3_MASK 0x00fc0000L
|
||||
#define UVD_MPC_SET_MUXA0__VARA_3__SHIFT 0x00000012
|
||||
#define UVD_MPC_SET_MUXA0__VARA_4_MASK 0x3f000000L
|
||||
#define UVD_MPC_SET_MUXA0__VARA_4__SHIFT 0x00000018
|
||||
#define UVD_MPC_SET_MUXA1__VARA_5_MASK 0x0000003fL
|
||||
#define UVD_MPC_SET_MUXA1__VARA_5__SHIFT 0x00000000
|
||||
#define UVD_MPC_SET_MUXA1__VARA_6_MASK 0x00000fc0L
|
||||
#define UVD_MPC_SET_MUXA1__VARA_6__SHIFT 0x00000006
|
||||
#define UVD_MPC_SET_MUXA1__VARA_7_MASK 0x0003f000L
|
||||
#define UVD_MPC_SET_MUXA1__VARA_7__SHIFT 0x0000000c
|
||||
#define UVD_MPC_SET_MUXB0__VARB_0_MASK 0x0000003fL
|
||||
#define UVD_MPC_SET_MUXB0__VARB_0__SHIFT 0x00000000
|
||||
#define UVD_MPC_SET_MUXB0__VARB_1_MASK 0x00000fc0L
|
||||
#define UVD_MPC_SET_MUXB0__VARB_1__SHIFT 0x00000006
|
||||
#define UVD_MPC_SET_MUXB0__VARB_2_MASK 0x0003f000L
|
||||
#define UVD_MPC_SET_MUXB0__VARB_2__SHIFT 0x0000000c
|
||||
#define UVD_MPC_SET_MUXB0__VARB_3_MASK 0x00fc0000L
|
||||
#define UVD_MPC_SET_MUXB0__VARB_3__SHIFT 0x00000012
|
||||
#define UVD_MPC_SET_MUXB0__VARB_4_MASK 0x3f000000L
|
||||
#define UVD_MPC_SET_MUXB0__VARB_4__SHIFT 0x00000018
|
||||
#define UVD_MPC_SET_MUXB1__VARB_5_MASK 0x0000003fL
|
||||
#define UVD_MPC_SET_MUXB1__VARB_5__SHIFT 0x00000000
|
||||
#define UVD_MPC_SET_MUXB1__VARB_6_MASK 0x00000fc0L
|
||||
#define UVD_MPC_SET_MUXB1__VARB_6__SHIFT 0x00000006
|
||||
#define UVD_MPC_SET_MUXB1__VARB_7_MASK 0x0003f000L
|
||||
#define UVD_MPC_SET_MUXB1__VARB_7__SHIFT 0x0000000c
|
||||
#define UVD_MPC_SET_MUX__SET_0_MASK 0x00000007L
|
||||
#define UVD_MPC_SET_MUX__SET_0__SHIFT 0x00000000
|
||||
#define UVD_MPC_SET_MUX__SET_1_MASK 0x00000038L
|
||||
#define UVD_MPC_SET_MUX__SET_1__SHIFT 0x00000003
|
||||
#define UVD_MPC_SET_MUX__SET_2_MASK 0x000001c0L
|
||||
#define UVD_MPC_SET_MUX__SET_2__SHIFT 0x00000006
|
||||
#define UVD_MP_SWAP_CNTL__MP_REF0_MC_SWAP_MASK 0x00000003L
|
||||
#define UVD_MP_SWAP_CNTL__MP_REF0_MC_SWAP__SHIFT 0x00000000
|
||||
#define UVD_MP_SWAP_CNTL__MP_REF10_MC_SWAP_MASK 0x00300000L
|
||||
#define UVD_MP_SWAP_CNTL__MP_REF10_MC_SWAP__SHIFT 0x00000014
|
||||
#define UVD_MP_SWAP_CNTL__MP_REF11_MC_SWAP_MASK 0x00c00000L
|
||||
#define UVD_MP_SWAP_CNTL__MP_REF11_MC_SWAP__SHIFT 0x00000016
|
||||
#define UVD_MP_SWAP_CNTL__MP_REF12_MC_SWAP_MASK 0x03000000L
|
||||
#define UVD_MP_SWAP_CNTL__MP_REF12_MC_SWAP__SHIFT 0x00000018
|
||||
#define UVD_MP_SWAP_CNTL__MP_REF13_MC_SWAP_MASK 0x0c000000L
|
||||
#define UVD_MP_SWAP_CNTL__MP_REF13_MC_SWAP__SHIFT 0x0000001a
|
||||
#define UVD_MP_SWAP_CNTL__MP_REF14_MC_SWAP_MASK 0x30000000L
|
||||
#define UVD_MP_SWAP_CNTL__MP_REF14_MC_SWAP__SHIFT 0x0000001c
|
||||
#define UVD_MP_SWAP_CNTL__MP_REF15_MC_SWAP_MASK 0xc0000000L
|
||||
#define UVD_MP_SWAP_CNTL__MP_REF15_MC_SWAP__SHIFT 0x0000001e
|
||||
#define UVD_MP_SWAP_CNTL__MP_REF1_MC_SWAP_MASK 0x0000000cL
|
||||
#define UVD_MP_SWAP_CNTL__MP_REF1_MC_SWAP__SHIFT 0x00000002
|
||||
#define UVD_MP_SWAP_CNTL__MP_REF2_MC_SWAP_MASK 0x00000030L
|
||||
#define UVD_MP_SWAP_CNTL__MP_REF2_MC_SWAP__SHIFT 0x00000004
|
||||
#define UVD_MP_SWAP_CNTL__MP_REF3_MC_SWAP_MASK 0x000000c0L
|
||||
#define UVD_MP_SWAP_CNTL__MP_REF3_MC_SWAP__SHIFT 0x00000006
|
||||
#define UVD_MP_SWAP_CNTL__MP_REF4_MC_SWAP_MASK 0x00000300L
|
||||
#define UVD_MP_SWAP_CNTL__MP_REF4_MC_SWAP__SHIFT 0x00000008
|
||||
#define UVD_MP_SWAP_CNTL__MP_REF5_MC_SWAP_MASK 0x00000c00L
|
||||
#define UVD_MP_SWAP_CNTL__MP_REF5_MC_SWAP__SHIFT 0x0000000a
|
||||
#define UVD_MP_SWAP_CNTL__MP_REF6_MC_SWAP_MASK 0x00003000L
|
||||
#define UVD_MP_SWAP_CNTL__MP_REF6_MC_SWAP__SHIFT 0x0000000c
|
||||
#define UVD_MP_SWAP_CNTL__MP_REF7_MC_SWAP_MASK 0x0000c000L
|
||||
#define UVD_MP_SWAP_CNTL__MP_REF7_MC_SWAP__SHIFT 0x0000000e
|
||||
#define UVD_MP_SWAP_CNTL__MP_REF8_MC_SWAP_MASK 0x00030000L
|
||||
#define UVD_MP_SWAP_CNTL__MP_REF8_MC_SWAP__SHIFT 0x00000010
|
||||
#define UVD_MP_SWAP_CNTL__MP_REF9_MC_SWAP_MASK 0x000c0000L
|
||||
#define UVD_MP_SWAP_CNTL__MP_REF9_MC_SWAP__SHIFT 0x00000012
|
||||
#define UVD_PGFSM_CONFIG__UVD_PGFSM_FSM_ADDR_MASK 0x000000ffL
|
||||
#define UVD_PGFSM_CONFIG__UVD_PGFSM_FSM_ADDR__SHIFT 0x00000000
|
||||
#define UVD_PGFSM_CONFIG__UVD_PGFSM_P1_SELECT_MASK 0x00000400L
|
||||
#define UVD_PGFSM_CONFIG__UVD_PGFSM_P1_SELECT__SHIFT 0x0000000a
|
||||
#define UVD_PGFSM_CONFIG__UVD_PGFSM_P2_SELECT_MASK 0x00000800L
|
||||
#define UVD_PGFSM_CONFIG__UVD_PGFSM_P2_SELECT__SHIFT 0x0000000b
|
||||
#define UVD_PGFSM_CONFIG__UVD_PGFSM_POWER_DOWN_MASK 0x00000100L
|
||||
#define UVD_PGFSM_CONFIG__UVD_PGFSM_POWER_DOWN__SHIFT 0x00000008
|
||||
#define UVD_PGFSM_CONFIG__UVD_PGFSM_POWER_UP_MASK 0x00000200L
|
||||
#define UVD_PGFSM_CONFIG__UVD_PGFSM_POWER_UP__SHIFT 0x00000009
|
||||
#define UVD_PGFSM_CONFIG__UVD_PGFSM_READ_MASK 0x00002000L
|
||||
#define UVD_PGFSM_CONFIG__UVD_PGFSM_READ__SHIFT 0x0000000d
|
||||
#define UVD_PGFSM_CONFIG__UVD_PGFSM_REG_ADDR_MASK 0xf0000000L
|
||||
#define UVD_PGFSM_CONFIG__UVD_PGFSM_REG_ADDR__SHIFT 0x0000001c
|
||||
#define UVD_PGFSM_CONFIG__UVD_PGFSM_WRITE_MASK 0x00001000L
|
||||
#define UVD_PGFSM_CONFIG__UVD_PGFSM_WRITE__SHIFT 0x0000000c
|
||||
#define UVD_PGFSM_READ_TILE1__UVD_PGFSM_READ_TILE1_VALUE_MASK 0x00ffffffL
|
||||
#define UVD_PGFSM_READ_TILE1__UVD_PGFSM_READ_TILE1_VALUE__SHIFT 0x00000000
|
||||
#define UVD_PGFSM_READ_TILE2__UVD_PGFSM_READ_TILE2_VALUE_MASK 0x00ffffffL
|
||||
#define UVD_PGFSM_READ_TILE2__UVD_PGFSM_READ_TILE2_VALUE__SHIFT 0x00000000
|
||||
#define UVD_POWER_STATUS__UVD_POWER_STATUS_MASK 0x00000001L
|
||||
#define UVD_POWER_STATUS__UVD_POWER_STATUS__SHIFT 0x00000000
|
||||
#define UVD_RBC_IB_BASE__IB_BASE_MASK 0xffffffc0L
|
||||
#define UVD_RBC_IB_BASE__IB_BASE__SHIFT 0x00000006
|
||||
#define UVD_RBC_IB_SIZE__IB_SIZE_MASK 0x007ffff0L
|
||||
#define UVD_RBC_IB_SIZE__IB_SIZE__SHIFT 0x00000004
|
||||
#define UVD_RBC_RB_BASE__RB_BASE_MASK 0xffffffc0L
|
||||
#define UVD_RBC_RB_BASE__RB_BASE__SHIFT 0x00000006
|
||||
#define UVD_RBC_RB_CNTL__RB_BLKSZ_MASK 0x00001f00L
|
||||
#define UVD_RBC_RB_CNTL__RB_BLKSZ__SHIFT 0x00000008
|
||||
#define UVD_RBC_RB_CNTL__RB_BUFSZ_MASK 0x0000001fL
|
||||
#define UVD_RBC_RB_CNTL__RB_BUFSZ__SHIFT 0x00000000
|
||||
#define UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK 0x00010000L
|
||||
#define UVD_RBC_RB_CNTL__RB_NO_FETCH__SHIFT 0x00000010
|
||||
#define UVD_RBC_RB_CNTL__RB_NO_UPDATE_MASK 0x01000000L
|
||||
#define UVD_RBC_RB_CNTL__RB_NO_UPDATE__SHIFT 0x00000018
|
||||
#define UVD_RBC_RB_CNTL__RB_RPTR_WR_EN_MASK 0x10000000L
|
||||
#define UVD_RBC_RB_CNTL__RB_RPTR_WR_EN__SHIFT 0x0000001c
|
||||
#define UVD_RBC_RB_CNTL__RB_WPTR_POLL_EN_MASK 0x00100000L
|
||||
#define UVD_RBC_RB_CNTL__RB_WPTR_POLL_EN__SHIFT 0x00000014
|
||||
#define UVD_RBC_RB_RPTR_ADDR__RB_RPTR_ADDR_MASK 0xffffffffL
|
||||
#define UVD_RBC_RB_RPTR_ADDR__RB_RPTR_ADDR__SHIFT 0x00000000
|
||||
#define UVD_RBC_RB_RPTR__RB_RPTR_MASK 0x007ffff0L
|
||||
#define UVD_RBC_RB_RPTR__RB_RPTR__SHIFT 0x00000004
|
||||
#define UVD_RBC_RB_WPTR__RB_WPTR_MASK 0x007ffff0L
|
||||
#define UVD_RBC_RB_WPTR__RB_WPTR__SHIFT 0x00000004
|
||||
#define UVD_SEMA_ADDR_HIGH__ADDR_42_23_MASK 0x000fffffL
|
||||
#define UVD_SEMA_ADDR_HIGH__ADDR_42_23__SHIFT 0x00000000
|
||||
#define UVD_SEMA_ADDR_LOW__ADDR_22_3_MASK 0x000fffffL
|
||||
#define UVD_SEMA_ADDR_LOW__ADDR_22_3__SHIFT 0x00000000
|
||||
#define UVD_SEMA_CMD__MODE_MASK 0x00000040L
|
||||
#define UVD_SEMA_CMD__MODE__SHIFT 0x00000006
|
||||
#define UVD_SEMA_CMD__REQ_CMD_MASK 0x0000000fL
|
||||
#define UVD_SEMA_CMD__REQ_CMD__SHIFT 0x00000000
|
||||
#define UVD_SEMA_CMD__VMID_EN_MASK 0x00000080L
|
||||
#define UVD_SEMA_CMD__VMID_EN__SHIFT 0x00000007
|
||||
#define UVD_SEMA_CMD__VMID_MASK 0x00000f00L
|
||||
#define UVD_SEMA_CMD__VMID__SHIFT 0x00000008
|
||||
#define UVD_SEMA_CMD__WR_PHASE_MASK 0x00000030L
|
||||
#define UVD_SEMA_CMD__WR_PHASE__SHIFT 0x00000004
|
||||
#define UVD_SEMA_CNTL__ADVANCED_MODE_DIS_MASK 0x00000002L
|
||||
#define UVD_SEMA_CNTL__ADVANCED_MODE_DIS__SHIFT 0x00000001
|
||||
#define UVD_SEMA_CNTL__SEMAPHORE_EN_MASK 0x00000001L
|
||||
#define UVD_SEMA_CNTL__SEMAPHORE_EN__SHIFT 0x00000000
|
||||
#define UVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL__RESEND_TIMER_MASK 0x07000000L
|
||||
#define UVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL__RESEND_TIMER__SHIFT 0x00000018
|
||||
#define UVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL__SIGNAL_INCOMPLETE_COUNT_MASK 0x001ffffeL
|
||||
#define UVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL__SIGNAL_INCOMPLETE_COUNT__SHIFT 0x00000001
|
||||
#define UVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL__SIGNAL_INCOMPLETE_EN_MASK 0x00000001L
|
||||
#define UVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL__SIGNAL_INCOMPLETE_EN__SHIFT 0x00000000
|
||||
#define UVD_SEMA_TIMEOUT_STATUS__SEMAPHORE_SIGNAL_INCOMPLETE_TIMEOUT_STAT_MASK 0x00000004L
|
||||
#define UVD_SEMA_TIMEOUT_STATUS__SEMAPHORE_SIGNAL_INCOMPLETE_TIMEOUT_STAT__SHIFT 0x00000002
|
||||
#define UVD_SEMA_TIMEOUT_STATUS__SEMAPHORE_TIMEOUT_CLEAR_MASK 0x00000008L
|
||||
#define UVD_SEMA_TIMEOUT_STATUS__SEMAPHORE_TIMEOUT_CLEAR__SHIFT 0x00000003
|
||||
#define UVD_SEMA_TIMEOUT_STATUS__SEMAPHORE_WAIT_FAULT_TIMEOUT_STAT_MASK 0x00000002L
|
||||
#define UVD_SEMA_TIMEOUT_STATUS__SEMAPHORE_WAIT_FAULT_TIMEOUT_STAT__SHIFT 0x00000001
|
||||
#define UVD_SEMA_TIMEOUT_STATUS__SEMAPHORE_WAIT_INCOMPLETE_TIMEOUT_STAT_MASK 0x00000001L
|
||||
#define UVD_SEMA_TIMEOUT_STATUS__SEMAPHORE_WAIT_INCOMPLETE_TIMEOUT_STAT__SHIFT 0x00000000
|
||||
#define UVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL__RESEND_TIMER_MASK 0x07000000L
|
||||
#define UVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL__RESEND_TIMER__SHIFT 0x00000018
|
||||
#define UVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL__WAIT_FAULT_COUNT_MASK 0x001ffffeL
|
||||
#define UVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL__WAIT_FAULT_COUNT__SHIFT 0x00000001
|
||||
#define UVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL__WAIT_FAULT_EN_MASK 0x00000001L
|
||||
#define UVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL__WAIT_FAULT_EN__SHIFT 0x00000000
|
||||
#define UVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL__RESEND_TIMER_MASK 0x07000000L
|
||||
#define UVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL__RESEND_TIMER__SHIFT 0x00000018
|
||||
#define UVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL__WAIT_INCOMPLETE_COUNT_MASK 0x001ffffeL
|
||||
#define UVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL__WAIT_INCOMPLETE_COUNT__SHIFT 0x00000001
|
||||
#define UVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL__WAIT_INCOMPLETE_EN_MASK 0x00000001L
|
||||
#define UVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL__WAIT_INCOMPLETE_EN__SHIFT 0x00000000
|
||||
#define UVD_SOFT_RESET__CSM_SOFT_RESET_MASK 0x00000020L
|
||||
#define UVD_SOFT_RESET__CSM_SOFT_RESET__SHIFT 0x00000005
|
||||
#define UVD_SOFT_RESET__CXW_SOFT_RESET_MASK 0x00000040L
|
||||
#define UVD_SOFT_RESET__CXW_SOFT_RESET__SHIFT 0x00000006
|
||||
#define UVD_SOFT_RESET__FWV_SOFT_RESET_MASK 0x00000200L
|
||||
#define UVD_SOFT_RESET__FWV_SOFT_RESET__SHIFT 0x00000009
|
||||
#define UVD_SOFT_RESET__IDCT_SOFT_RESET_MASK 0x00001000L
|
||||
#define UVD_SOFT_RESET__IDCT_SOFT_RESET__SHIFT 0x0000000c
|
||||
#define UVD_SOFT_RESET__IH_SOFT_RESET_MASK 0x00000400L
|
||||
#define UVD_SOFT_RESET__IH_SOFT_RESET__SHIFT 0x0000000a
|
||||
#define UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK 0x00000002L
|
||||
#define UVD_SOFT_RESET__LBSI_SOFT_RESET__SHIFT 0x00000001
|
||||
#define UVD_SOFT_RESET__LCM_SOFT_RESET_MASK 0x00010000L
|
||||
#define UVD_SOFT_RESET__LCM_SOFT_RESET__SHIFT 0x00000010
|
||||
#define UVD_SOFT_RESET__LMI_SOFT_RESET_MASK 0x00000004L
|
||||
#define UVD_SOFT_RESET__LMI_SOFT_RESET__SHIFT 0x00000002
|
||||
#define UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK 0x00002000L
|
||||
#define UVD_SOFT_RESET__LMI_UMC_SOFT_RESET__SHIFT 0x0000000d
|
||||
#define UVD_SOFT_RESET__MIF_SOFT_RESET_MASK 0x00008000L
|
||||
#define UVD_SOFT_RESET__MIF_SOFT_RESET__SHIFT 0x0000000f
|
||||
#define UVD_SOFT_RESET__MPC_SOFT_RESET_MASK 0x00000100L
|
||||
#define UVD_SOFT_RESET__MPC_SOFT_RESET__SHIFT 0x00000008
|
||||
#define UVD_SOFT_RESET__MPRD_SOFT_RESET_MASK 0x00000800L
|
||||
#define UVD_SOFT_RESET__MPRD_SOFT_RESET__SHIFT 0x0000000b
|
||||
#define UVD_SOFT_RESET__RBC_SOFT_RESET_MASK 0x00000001L
|
||||
#define UVD_SOFT_RESET__RBC_SOFT_RESET__SHIFT 0x00000000
|
||||
#define UVD_SOFT_RESET__SPH_SOFT_RESET_MASK 0x00004000L
|
||||
#define UVD_SOFT_RESET__SPH_SOFT_RESET__SHIFT 0x0000000e
|
||||
#define UVD_SOFT_RESET__TAP_SOFT_RESET_MASK 0x00000080L
|
||||
#define UVD_SOFT_RESET__TAP_SOFT_RESET__SHIFT 0x00000007
|
||||
#define UVD_SOFT_RESET__UDEC_SOFT_RESET_MASK 0x00000010L
|
||||
#define UVD_SOFT_RESET__UDEC_SOFT_RESET__SHIFT 0x00000004
|
||||
#define UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK 0x00000008L
|
||||
#define UVD_SOFT_RESET__VCPU_SOFT_RESET__SHIFT 0x00000003
|
||||
#define UVD_STATUS__RBC_BUSY_MASK 0x00000001L
|
||||
#define UVD_STATUS__RBC_BUSY__SHIFT 0x00000000
|
||||
#define UVD_STATUS__VCPU_REPORT_MASK 0x000000feL
|
||||
#define UVD_STATUS__VCPU_REPORT__SHIFT 0x00000001
|
||||
#define UVD_UDEC_ADDR_CONFIG__BANK_INTERLEAVE_SIZE_MASK 0x00000700L
|
||||
#define UVD_UDEC_ADDR_CONFIG__BANK_INTERLEAVE_SIZE__SHIFT 0x00000008
|
||||
#define UVD_UDEC_ADDR_CONFIG__MULTI_GPU_TILE_SIZE_MASK 0x03000000L
|
||||
#define UVD_UDEC_ADDR_CONFIG__MULTI_GPU_TILE_SIZE__SHIFT 0x00000018
|
||||
#define UVD_UDEC_ADDR_CONFIG__NUM_GPUS_MASK 0x00700000L
|
||||
#define UVD_UDEC_ADDR_CONFIG__NUM_GPUS__SHIFT 0x00000014
|
||||
#define UVD_UDEC_ADDR_CONFIG__NUM_LOWER_PIPES_MASK 0x40000000L
|
||||
#define UVD_UDEC_ADDR_CONFIG__NUM_LOWER_PIPES__SHIFT 0x0000001e
|
||||
#define UVD_UDEC_ADDR_CONFIG__NUM_PIPES_MASK 0x00000007L
|
||||
#define UVD_UDEC_ADDR_CONFIG__NUM_PIPES__SHIFT 0x00000000
|
||||
#define UVD_UDEC_ADDR_CONFIG__NUM_SHADER_ENGINES_MASK 0x00003000L
|
||||
#define UVD_UDEC_ADDR_CONFIG__NUM_SHADER_ENGINES__SHIFT 0x0000000c
|
||||
#define UVD_UDEC_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE_MASK 0x00000070L
|
||||
#define UVD_UDEC_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE__SHIFT 0x00000004
|
||||
#define UVD_UDEC_ADDR_CONFIG__ROW_SIZE_MASK 0x30000000L
|
||||
#define UVD_UDEC_ADDR_CONFIG__ROW_SIZE__SHIFT 0x0000001c
|
||||
#define UVD_UDEC_ADDR_CONFIG__SHADER_ENGINE_TILE_SIZE_MASK 0x00070000L
|
||||
#define UVD_UDEC_ADDR_CONFIG__SHADER_ENGINE_TILE_SIZE__SHIFT 0x00000010
|
||||
#define UVD_UDEC_DB_ADDR_CONFIG__BANK_INTERLEAVE_SIZE_MASK 0x00000700L
|
||||
#define UVD_UDEC_DB_ADDR_CONFIG__BANK_INTERLEAVE_SIZE__SHIFT 0x00000008
|
||||
#define UVD_UDEC_DB_ADDR_CONFIG__MULTI_GPU_TILE_SIZE_MASK 0x03000000L
|
||||
#define UVD_UDEC_DB_ADDR_CONFIG__MULTI_GPU_TILE_SIZE__SHIFT 0x00000018
|
||||
#define UVD_UDEC_DB_ADDR_CONFIG__NUM_GPUS_MASK 0x00700000L
|
||||
#define UVD_UDEC_DB_ADDR_CONFIG__NUM_GPUS__SHIFT 0x00000014
|
||||
#define UVD_UDEC_DB_ADDR_CONFIG__NUM_LOWER_PIPES_MASK 0x40000000L
|
||||
#define UVD_UDEC_DB_ADDR_CONFIG__NUM_LOWER_PIPES__SHIFT 0x0000001e
|
||||
#define UVD_UDEC_DB_ADDR_CONFIG__NUM_PIPES_MASK 0x00000007L
|
||||
#define UVD_UDEC_DB_ADDR_CONFIG__NUM_PIPES__SHIFT 0x00000000
|
||||
#define UVD_UDEC_DB_ADDR_CONFIG__NUM_SHADER_ENGINES_MASK 0x00003000L
|
||||
#define UVD_UDEC_DB_ADDR_CONFIG__NUM_SHADER_ENGINES__SHIFT 0x0000000c
|
||||
#define UVD_UDEC_DB_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE_MASK 0x00000070L
|
||||
#define UVD_UDEC_DB_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE__SHIFT 0x00000004
|
||||
#define UVD_UDEC_DB_ADDR_CONFIG__ROW_SIZE_MASK 0x30000000L
|
||||
#define UVD_UDEC_DB_ADDR_CONFIG__ROW_SIZE__SHIFT 0x0000001c
|
||||
#define UVD_UDEC_DB_ADDR_CONFIG__SHADER_ENGINE_TILE_SIZE_MASK 0x00070000L
|
||||
#define UVD_UDEC_DB_ADDR_CONFIG__SHADER_ENGINE_TILE_SIZE__SHIFT 0x00000010
|
||||
#define UVD_UDEC_DBW_ADDR_CONFIG__BANK_INTERLEAVE_SIZE_MASK 0x00000700L
|
||||
#define UVD_UDEC_DBW_ADDR_CONFIG__BANK_INTERLEAVE_SIZE__SHIFT 0x00000008
|
||||
#define UVD_UDEC_DBW_ADDR_CONFIG__MULTI_GPU_TILE_SIZE_MASK 0x03000000L
|
||||
#define UVD_UDEC_DBW_ADDR_CONFIG__MULTI_GPU_TILE_SIZE__SHIFT 0x00000018
|
||||
#define UVD_UDEC_DBW_ADDR_CONFIG__NUM_GPUS_MASK 0x00700000L
|
||||
#define UVD_UDEC_DBW_ADDR_CONFIG__NUM_GPUS__SHIFT 0x00000014
|
||||
#define UVD_UDEC_DBW_ADDR_CONFIG__NUM_LOWER_PIPES_MASK 0x40000000L
|
||||
#define UVD_UDEC_DBW_ADDR_CONFIG__NUM_LOWER_PIPES__SHIFT 0x0000001e
|
||||
#define UVD_UDEC_DBW_ADDR_CONFIG__NUM_PIPES_MASK 0x00000007L
|
||||
#define UVD_UDEC_DBW_ADDR_CONFIG__NUM_PIPES__SHIFT 0x00000000
|
||||
#define UVD_UDEC_DBW_ADDR_CONFIG__NUM_SHADER_ENGINES_MASK 0x00003000L
|
||||
#define UVD_UDEC_DBW_ADDR_CONFIG__NUM_SHADER_ENGINES__SHIFT 0x0000000c
|
||||
#define UVD_UDEC_DBW_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE_MASK 0x00000070L
|
||||
#define UVD_UDEC_DBW_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE__SHIFT 0x00000004
|
||||
#define UVD_UDEC_DBW_ADDR_CONFIG__ROW_SIZE_MASK 0x30000000L
|
||||
#define UVD_UDEC_DBW_ADDR_CONFIG__ROW_SIZE__SHIFT 0x0000001c
|
||||
#define UVD_UDEC_DBW_ADDR_CONFIG__SHADER_ENGINE_TILE_SIZE_MASK 0x00070000L
|
||||
#define UVD_UDEC_DBW_ADDR_CONFIG__SHADER_ENGINE_TILE_SIZE__SHIFT 0x00000010
|
||||
#define UVD_VCPU_CACHE_OFFSET0__CACHE_OFFSET0_MASK 0x01ffffffL
|
||||
#define UVD_VCPU_CACHE_OFFSET0__CACHE_OFFSET0__SHIFT 0x00000000
|
||||
#define UVD_VCPU_CACHE_OFFSET1__CACHE_OFFSET1_MASK 0x01ffffffL
|
||||
#define UVD_VCPU_CACHE_OFFSET1__CACHE_OFFSET1__SHIFT 0x00000000
|
||||
#define UVD_VCPU_CACHE_OFFSET2__CACHE_OFFSET2_MASK 0x01ffffffL
|
||||
#define UVD_VCPU_CACHE_OFFSET2__CACHE_OFFSET2__SHIFT 0x00000000
|
||||
#define UVD_VCPU_CACHE_SIZE0__CACHE_SIZE0_MASK 0x001fffffL
|
||||
#define UVD_VCPU_CACHE_SIZE0__CACHE_SIZE0__SHIFT 0x00000000
|
||||
#define UVD_VCPU_CACHE_SIZE1__CACHE_SIZE1_MASK 0x001fffffL
|
||||
#define UVD_VCPU_CACHE_SIZE1__CACHE_SIZE1__SHIFT 0x00000000
|
||||
#define UVD_VCPU_CACHE_SIZE2__CACHE_SIZE2_MASK 0x001fffffL
|
||||
#define UVD_VCPU_CACHE_SIZE2__CACHE_SIZE2__SHIFT 0x00000000
|
||||
#define UVD_VCPU_CNTL__ABORT_REQ_MASK 0x00000100L
|
||||
#define UVD_VCPU_CNTL__ABORT_REQ__SHIFT 0x00000008
|
||||
#define UVD_VCPU_CNTL__AXI_MAX_BRST_SIZE_IS_4_MASK 0x00000010L
|
||||
#define UVD_VCPU_CNTL__AXI_MAX_BRST_SIZE_IS_4__SHIFT 0x00000004
|
||||
#define UVD_VCPU_CNTL__CABAC_MB_ACC_MASK 0x10000000L
|
||||
#define UVD_VCPU_CNTL__CABAC_MB_ACC__SHIFT 0x0000001c
|
||||
#define UVD_VCPU_CNTL__CLK_ACTIVE_MASK 0x00020000L
|
||||
#define UVD_VCPU_CNTL__CLK_ACTIVE__SHIFT 0x00000011
|
||||
#define UVD_VCPU_CNTL__CLK_EN_MASK 0x00000200L
|
||||
#define UVD_VCPU_CNTL__CLK_EN__SHIFT 0x00000009
|
||||
#define UVD_VCPU_CNTL__DBG_MUX_MASK 0x0000e000L
|
||||
#define UVD_VCPU_CNTL__DBG_MUX__SHIFT 0x0000000d
|
||||
#define UVD_VCPU_CNTL__ECPU_AM32_EN_MASK 0x20000000L
|
||||
#define UVD_VCPU_CNTL__ECPU_AM32_EN__SHIFT 0x0000001d
|
||||
#define UVD_VCPU_CNTL__IRQ_ERR_MASK 0x0000000fL
|
||||
#define UVD_VCPU_CNTL__IRQ_ERR__SHIFT 0x00000000
|
||||
#define UVD_VCPU_CNTL__JTAG_EN_MASK 0x00010000L
|
||||
#define UVD_VCPU_CNTL__JTAG_EN__SHIFT 0x00000010
|
||||
#define UVD_VCPU_CNTL__PMB_ED_ENABLE_MASK 0x00000020L
|
||||
#define UVD_VCPU_CNTL__PMB_ED_ENABLE__SHIFT 0x00000005
|
||||
#define UVD_VCPU_CNTL__PMB_SOFT_RESET_MASK 0x00000040L
|
||||
#define UVD_VCPU_CNTL__PMB_SOFT_RESET__SHIFT 0x00000006
|
||||
#define UVD_VCPU_CNTL__PRB_TIMEOUT_VAL_MASK 0x0ff00000L
|
||||
#define UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT 0x00000014
|
||||
#define UVD_VCPU_CNTL__RBBM_SOFT_RESET_MASK 0x00000080L
|
||||
#define UVD_VCPU_CNTL__RBBM_SOFT_RESET__SHIFT 0x00000007
|
||||
#define UVD_VCPU_CNTL__TIMEOUT_DIS_MASK 0x00040000L
|
||||
#define UVD_VCPU_CNTL__TIMEOUT_DIS__SHIFT 0x00000012
|
||||
#define UVD_VCPU_CNTL__TRCE_EN_MASK 0x00000400L
|
||||
#define UVD_VCPU_CNTL__TRCE_EN__SHIFT 0x0000000a
|
||||
#define UVD_VCPU_CNTL__TRCE_MUX_MASK 0x00001800L
|
||||
#define UVD_VCPU_CNTL__TRCE_MUX__SHIFT 0x0000000b
|
||||
#define UVD_VCPU_CNTL__WMV9_EN_MASK 0x40000000L
|
||||
#define UVD_VCPU_CNTL__WMV9_EN__SHIFT 0x0000001e
|
||||
|
||||
#endif
|
|
@ -0,0 +1,64 @@
|
|||
/*
|
||||
*
|
||||
* Copyright (C) 2016 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included
|
||||
* in all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
||||
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
|
||||
* AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef VCE_1_0_D_H
|
||||
#define VCE_1_0_D_H
|
||||
|
||||
#define mmVCE_CLOCK_GATING_A 0x80BE
|
||||
#define mmVCE_CLOCK_GATING_B 0x80BF
|
||||
#define mmVCE_LMI_CACHE_CTRL 0x83BD
|
||||
#define mmVCE_LMI_CTRL 0x83A6
|
||||
#define mmVCE_LMI_CTRL2 0x839D
|
||||
#define mmVCE_LMI_MISC_CTRL 0x83B5
|
||||
#define mmVCE_LMI_STATUS 0x83A7
|
||||
#define mmVCE_LMI_SWAP_CNTL 0x83AD
|
||||
#define mmVCE_LMI_SWAP_CNTL1 0x83AE
|
||||
#define mmVCE_LMI_VCPU_CACHE_40BIT_BAR 0x8397
|
||||
#define mmVCE_LMI_VM_CTRL 0x83A8
|
||||
#define mmVCE_RB_ARB_CTRL 0x809F
|
||||
#define mmVCE_RB_BASE_HI 0x8061
|
||||
#define mmVCE_RB_BASE_HI2 0x805C
|
||||
#define mmVCE_RB_BASE_LO 0x8060
|
||||
#define mmVCE_RB_BASE_LO2 0x805B
|
||||
#define mmVCE_RB_RPTR 0x8063
|
||||
#define mmVCE_RB_RPTR2 0x805E
|
||||
#define mmVCE_RB_SIZE 0x8062
|
||||
#define mmVCE_RB_SIZE2 0x805D
|
||||
#define mmVCE_RB_WPTR 0x8064
|
||||
#define mmVCE_RB_WPTR2 0x805F
|
||||
#define mmVCE_SOFT_RESET 0x8048
|
||||
#define mmVCE_STATUS 0x8001
|
||||
#define mmVCE_SYS_INT_ACK 0x8341
|
||||
#define mmVCE_SYS_INT_EN 0x8340
|
||||
#define mmVCE_SYS_INT_STATUS 0x8341
|
||||
#define mmVCE_UENC_CLOCK_GATING 0x816F
|
||||
#define mmVCE_UENC_DMA_DCLK_CTRL 0x8250
|
||||
#define mmVCE_UENC_REG_CLOCK_GATING 0x8170
|
||||
#define mmVCE_VCPU_CACHE_OFFSET0 0x8009
|
||||
#define mmVCE_VCPU_CACHE_OFFSET1 0x800B
|
||||
#define mmVCE_VCPU_CACHE_OFFSET2 0x800D
|
||||
#define mmVCE_VCPU_CACHE_SIZE0 0x800A
|
||||
#define mmVCE_VCPU_CACHE_SIZE1 0x800C
|
||||
#define mmVCE_VCPU_CACHE_SIZE2 0x800E
|
||||
#define mmVCE_VCPU_CNTL 0x8005
|
||||
|
||||
#endif
|
|
@ -0,0 +1,99 @@
|
|||
/*
|
||||
*
|
||||
* Copyright (C) 2016 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included
|
||||
* in all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
||||
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
|
||||
* AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef VCE_1_0_SH_MASK_H
|
||||
#define VCE_1_0_SH_MASK_H
|
||||
|
||||
#define VCE_LMI_CACHE_CTRL__VCPU_EN_MASK 0x00000001L
|
||||
#define VCE_LMI_CACHE_CTRL__VCPU_EN__SHIFT 0x00000000
|
||||
#define VCE_LMI_CTRL2__STALL_ARB_UMC_MASK 0x00000100L
|
||||
#define VCE_LMI_CTRL2__STALL_ARB_UMC__SHIFT 0x00000008
|
||||
#define VCE_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK 0x00200000L
|
||||
#define VCE_LMI_CTRL__VCPU_DATA_COHERENCY_EN__SHIFT 0x00000015
|
||||
#define VCE_LMI_SWAP_CNTL1__RD_MC_CID_SWAP_MASK 0x00003ffcL
|
||||
#define VCE_LMI_SWAP_CNTL1__RD_MC_CID_SWAP__SHIFT 0x00000002
|
||||
#define VCE_LMI_SWAP_CNTL1__VCPU_R_MC_SWAP_MASK 0x00000003L
|
||||
#define VCE_LMI_SWAP_CNTL1__VCPU_R_MC_SWAP__SHIFT 0x00000000
|
||||
#define VCE_LMI_SWAP_CNTL__VCPU_W_MC_SWAP_MASK 0x00000003L
|
||||
#define VCE_LMI_SWAP_CNTL__VCPU_W_MC_SWAP__SHIFT 0x00000000
|
||||
#define VCE_LMI_SWAP_CNTL__WR_MC_CID_SWAP_MASK 0x00003ffcL
|
||||
#define VCE_LMI_SWAP_CNTL__WR_MC_CID_SWAP__SHIFT 0x00000002
|
||||
#define VCE_LMI_VCPU_CACHE_40BIT_BAR__BAR_MASK 0xffffffffL
|
||||
#define VCE_LMI_VCPU_CACHE_40BIT_BAR__BAR__SHIFT 0x00000000
|
||||
#define VCE_RB_BASE_HI2__RB_BASE_HI_MASK 0xffffffffL
|
||||
#define VCE_RB_BASE_HI2__RB_BASE_HI__SHIFT 0x00000000
|
||||
#define VCE_RB_BASE_HI__RB_BASE_HI_MASK 0xffffffffL
|
||||
#define VCE_RB_BASE_HI__RB_BASE_HI__SHIFT 0x00000000
|
||||
#define VCE_RB_BASE_LO2__RB_BASE_LO_MASK 0xffffffc0L
|
||||
#define VCE_RB_BASE_LO2__RB_BASE_LO__SHIFT 0x00000006
|
||||
#define VCE_RB_BASE_LO__RB_BASE_LO_MASK 0xffffffc0L
|
||||
#define VCE_RB_BASE_LO__RB_BASE_LO__SHIFT 0x00000006
|
||||
#define VCE_RB_RPTR2__RB_RPTR_MASK 0x007ffff0L
|
||||
#define VCE_RB_RPTR2__RB_RPTR__SHIFT 0x00000004
|
||||
#define VCE_RB_RPTR__RB_RPTR_MASK 0x007ffff0L
|
||||
#define VCE_RB_RPTR__RB_RPTR__SHIFT 0x00000004
|
||||
#define VCE_RB_SIZE2__RB_SIZE_MASK 0x007ffff0L
|
||||
#define VCE_RB_SIZE2__RB_SIZE__SHIFT 0x00000004
|
||||
#define VCE_RB_SIZE__RB_SIZE_MASK 0x007ffff0L
|
||||
#define VCE_RB_SIZE__RB_SIZE__SHIFT 0x00000004
|
||||
#define VCE_RB_WPTR2__RB_WPTR_MASK 0x007ffff0L
|
||||
#define VCE_RB_WPTR2__RB_WPTR__SHIFT 0x00000004
|
||||
#define VCE_RB_WPTR__RB_WPTR_MASK 0x007ffff0L
|
||||
#define VCE_RB_WPTR__RB_WPTR__SHIFT 0x00000004
|
||||
#define VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK 0x00000001L
|
||||
#define VCE_SOFT_RESET__ECPU_SOFT_RESET__SHIFT 0x00000000
|
||||
#define VCE_STATUS__JOB_BUSY_MASK 0x00000001L
|
||||
#define VCE_STATUS__JOB_BUSY__SHIFT 0x00000000
|
||||
#define VCE_STATUS__UENC_BUSY_MASK 0x00000100L
|
||||
#define VCE_STATUS__UENC_BUSY__SHIFT 0x00000008
|
||||
#define VCE_STATUS__VCPU_REPORT_MASK 0x000000feL
|
||||
#define VCE_STATUS__VCPU_REPORT__SHIFT 0x00000001
|
||||
#define VCE_SYS_INT_ACK__VCE_SYS_INT_TRAP_INTERRUPT_ACK_MASK 0x00000008L
|
||||
#define VCE_SYS_INT_ACK__VCE_SYS_INT_TRAP_INTERRUPT_ACK__SHIFT 0x00000003
|
||||
#define VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK 0x00000008L
|
||||
#define VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN__SHIFT 0x00000003
|
||||
#define VCE_SYS_INT_STATUS__VCE_SYS_INT_TRAP_INTERRUPT_INT_MASK 0x00000008L
|
||||
#define VCE_SYS_INT_STATUS__VCE_SYS_INT_TRAP_INTERRUPT_INT__SHIFT 0x00000003
|
||||
#define VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK 0x00000002L
|
||||
#define VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON__SHIFT 0x00000001
|
||||
#define VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK 0x00000004L
|
||||
#define VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON__SHIFT 0x00000002
|
||||
#define VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON_MASK 0x00000001L
|
||||
#define VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON__SHIFT 0x00000000
|
||||
#define VCE_VCPU_CACHE_OFFSET0__OFFSET_MASK 0x0fffffffL
|
||||
#define VCE_VCPU_CACHE_OFFSET0__OFFSET__SHIFT 0x00000000
|
||||
#define VCE_VCPU_CACHE_OFFSET1__OFFSET_MASK 0x0fffffffL
|
||||
#define VCE_VCPU_CACHE_OFFSET1__OFFSET__SHIFT 0x00000000
|
||||
#define VCE_VCPU_CACHE_OFFSET2__OFFSET_MASK 0x0fffffffL
|
||||
#define VCE_VCPU_CACHE_OFFSET2__OFFSET__SHIFT 0x00000000
|
||||
#define VCE_VCPU_CACHE_SIZE0__SIZE_MASK 0x00ffffffL
|
||||
#define VCE_VCPU_CACHE_SIZE0__SIZE__SHIFT 0x00000000
|
||||
#define VCE_VCPU_CACHE_SIZE1__SIZE_MASK 0x00ffffffL
|
||||
#define VCE_VCPU_CACHE_SIZE1__SIZE__SHIFT 0x00000000
|
||||
#define VCE_VCPU_CACHE_SIZE2__SIZE_MASK 0x00ffffffL
|
||||
#define VCE_VCPU_CACHE_SIZE2__SIZE__SHIFT 0x00000000
|
||||
#define VCE_VCPU_CNTL__CLK_EN_MASK 0x00000001L
|
||||
#define VCE_VCPU_CNTL__CLK_EN__SHIFT 0x00000000
|
||||
#define VCE_VCPU_CNTL__RBBM_SOFT_RESET_MASK 0x00040000L
|
||||
#define VCE_VCPU_CNTL__RBBM_SOFT_RESET__SHIFT 0x00000012
|
||||
|
||||
#endif
|
|
@ -41,7 +41,7 @@
|
|||
#define PP_CHECK_HW(hwmgr) \
|
||||
do { \
|
||||
if ((hwmgr) == NULL || (hwmgr)->hwmgr_func == NULL) \
|
||||
return -EINVAL; \
|
||||
return 0; \
|
||||
} while (0)
|
||||
|
||||
static int pp_early_init(void *handle)
|
||||
|
@ -115,6 +115,7 @@ static int pp_hw_init(void *handle)
|
|||
struct pp_instance *pp_handle;
|
||||
struct pp_smumgr *smumgr;
|
||||
struct pp_eventmgr *eventmgr;
|
||||
struct pp_hwmgr *hwmgr;
|
||||
int ret = 0;
|
||||
|
||||
if (handle == NULL)
|
||||
|
@ -122,6 +123,7 @@ static int pp_hw_init(void *handle)
|
|||
|
||||
pp_handle = (struct pp_instance *)handle;
|
||||
smumgr = pp_handle->smu_mgr;
|
||||
hwmgr = pp_handle->hwmgr;
|
||||
|
||||
if (smumgr == NULL || smumgr->smumgr_funcs == NULL ||
|
||||
smumgr->smumgr_funcs->smu_init == NULL ||
|
||||
|
@ -141,9 +143,11 @@ static int pp_hw_init(void *handle)
|
|||
return ret;
|
||||
}
|
||||
|
||||
hw_init_power_state_table(pp_handle->hwmgr);
|
||||
eventmgr = pp_handle->eventmgr;
|
||||
PP_CHECK_HW(hwmgr);
|
||||
|
||||
hw_init_power_state_table(hwmgr);
|
||||
|
||||
eventmgr = pp_handle->eventmgr;
|
||||
if (eventmgr == NULL || eventmgr->pp_eventmgr_init == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
|
@ -243,7 +247,9 @@ static int pp_suspend(void *handle)
|
|||
|
||||
pp_handle = (struct pp_instance *)handle;
|
||||
eventmgr = pp_handle->eventmgr;
|
||||
pem_handle_event(eventmgr, AMD_PP_EVENT_SUSPEND, &event_data);
|
||||
|
||||
if (eventmgr != NULL)
|
||||
pem_handle_event(eventmgr, AMD_PP_EVENT_SUSPEND, &event_data);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -273,7 +279,8 @@ static int pp_resume(void *handle)
|
|||
}
|
||||
|
||||
eventmgr = pp_handle->eventmgr;
|
||||
pem_handle_event(eventmgr, AMD_PP_EVENT_RESUME, &event_data);
|
||||
if (eventmgr != NULL)
|
||||
pem_handle_event(eventmgr, AMD_PP_EVENT_RESUME, &event_data);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -340,8 +347,7 @@ static enum amd_dpm_forced_level pp_dpm_get_performance_level(
|
|||
|
||||
hwmgr = ((struct pp_instance *)handle)->hwmgr;
|
||||
|
||||
if (hwmgr == NULL)
|
||||
return -EINVAL;
|
||||
PP_CHECK_HW(hwmgr);
|
||||
|
||||
return (((struct pp_instance *)handle)->hwmgr->dpm_level);
|
||||
}
|
||||
|
@ -448,6 +454,9 @@ static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_event event_id,
|
|||
if (pp_handle == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
if (pp_handle->eventmgr == NULL)
|
||||
return 0;
|
||||
|
||||
switch (event_id) {
|
||||
case AMD_PP_EVENT_DISPLAY_CONFIG_CHANGE:
|
||||
ret = pem_handle_event(pp_handle->eventmgr, event_id, &data);
|
||||
|
@ -582,6 +591,23 @@ static int pp_dpm_get_fan_speed_percent(void *handle, uint32_t *speed)
|
|||
return hwmgr->hwmgr_func->get_fan_speed_percent(hwmgr, speed);
|
||||
}
|
||||
|
||||
static int pp_dpm_get_fan_speed_rpm(void *handle, uint32_t *rpm)
|
||||
{
|
||||
struct pp_hwmgr *hwmgr;
|
||||
|
||||
if (handle == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
hwmgr = ((struct pp_instance *)handle)->hwmgr;
|
||||
|
||||
PP_CHECK_HW(hwmgr);
|
||||
|
||||
if (hwmgr->hwmgr_func->get_fan_speed_rpm == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
return hwmgr->hwmgr_func->get_fan_speed_rpm(hwmgr, rpm);
|
||||
}
|
||||
|
||||
static int pp_dpm_get_temperature(void *handle)
|
||||
{
|
||||
struct pp_hwmgr *hwmgr;
|
||||
|
@ -852,6 +878,7 @@ const struct amd_powerplay_funcs pp_dpm_funcs = {
|
|||
.get_fan_control_mode = pp_dpm_get_fan_control_mode,
|
||||
.set_fan_speed_percent = pp_dpm_set_fan_speed_percent,
|
||||
.get_fan_speed_percent = pp_dpm_get_fan_speed_percent,
|
||||
.get_fan_speed_rpm = pp_dpm_get_fan_speed_rpm,
|
||||
.get_pp_num_states = pp_dpm_get_pp_num_states,
|
||||
.get_pp_table = pp_dpm_get_pp_table,
|
||||
.set_pp_table = pp_dpm_set_pp_table,
|
||||
|
@ -881,6 +908,13 @@ static int amd_pp_instance_init(struct amd_pp_init *pp_init,
|
|||
if (ret)
|
||||
goto fail_smum;
|
||||
|
||||
|
||||
amd_pp->pp_handle = handle;
|
||||
|
||||
if ((amdgpu_dpm == 0)
|
||||
|| cgs_is_virtualization_enabled(pp_init->device))
|
||||
return 0;
|
||||
|
||||
ret = hwmgr_init(pp_init, handle);
|
||||
if (ret)
|
||||
goto fail_hwmgr;
|
||||
|
@ -889,7 +923,6 @@ static int amd_pp_instance_init(struct amd_pp_init *pp_init,
|
|||
if (ret)
|
||||
goto fail_eventmgr;
|
||||
|
||||
amd_pp->pp_handle = handle;
|
||||
return 0;
|
||||
|
||||
fail_eventmgr:
|
||||
|
@ -908,12 +941,13 @@ static int amd_pp_instance_fini(void *handle)
|
|||
if (instance == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
eventmgr_fini(instance->eventmgr);
|
||||
|
||||
hwmgr_fini(instance->hwmgr);
|
||||
if ((amdgpu_dpm != 0)
|
||||
&& !cgs_is_virtualization_enabled(instance->smu_mgr->device)) {
|
||||
eventmgr_fini(instance->eventmgr);
|
||||
hwmgr_fini(instance->hwmgr);
|
||||
}
|
||||
|
||||
smum_fini(instance->smu_mgr);
|
||||
|
||||
kfree(handle);
|
||||
return 0;
|
||||
}
|
||||
|
@ -972,6 +1006,10 @@ int amd_powerplay_reset(void *handle)
|
|||
|
||||
hw_init_power_state_table(instance->hwmgr);
|
||||
|
||||
if ((amdgpu_dpm == 0)
|
||||
|| cgs_is_virtualization_enabled(instance->smu_mgr->device))
|
||||
return 0;
|
||||
|
||||
if (eventmgr == NULL || eventmgr->pp_eventmgr_init == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
|
@ -993,6 +1031,8 @@ int amd_powerplay_display_configuration_change(void *handle,
|
|||
|
||||
hwmgr = ((struct pp_instance *)handle)->hwmgr;
|
||||
|
||||
PP_CHECK_HW(hwmgr);
|
||||
|
||||
phm_store_dal_configuration_data(hwmgr, display_config);
|
||||
|
||||
return 0;
|
||||
|
@ -1010,6 +1050,8 @@ int amd_powerplay_get_display_power_level(void *handle,
|
|||
|
||||
hwmgr = ((struct pp_instance *)handle)->hwmgr;
|
||||
|
||||
PP_CHECK_HW(hwmgr);
|
||||
|
||||
return phm_get_dal_power_level(hwmgr, output);
|
||||
}
|
||||
|
||||
|
@ -1027,6 +1069,8 @@ int amd_powerplay_get_current_clocks(void *handle,
|
|||
|
||||
hwmgr = ((struct pp_instance *)handle)->hwmgr;
|
||||
|
||||
PP_CHECK_HW(hwmgr);
|
||||
|
||||
phm_get_dal_power_level(hwmgr, &simple_clocks);
|
||||
|
||||
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PowerContainment)) {
|
||||
|
@ -1071,6 +1115,8 @@ int amd_powerplay_get_clock_by_type(void *handle, enum amd_pp_clock_type type, s
|
|||
|
||||
hwmgr = ((struct pp_instance *)handle)->hwmgr;
|
||||
|
||||
PP_CHECK_HW(hwmgr);
|
||||
|
||||
result = phm_get_clock_by_type(hwmgr, type, clocks);
|
||||
|
||||
return result;
|
||||
|
@ -1089,6 +1135,8 @@ int amd_powerplay_get_display_mode_validation_clocks(void *handle,
|
|||
|
||||
hwmgr = ((struct pp_instance *)handle)->hwmgr;
|
||||
|
||||
PP_CHECK_HW(hwmgr);
|
||||
|
||||
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DynamicPatchPowerState))
|
||||
result = phm_get_max_high_clocks(hwmgr, clocks);
|
||||
|
||||
|
|
|
@ -169,7 +169,7 @@ int cz_dpm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
|
|||
if (bgate) {
|
||||
cgs_set_clockgating_state(hwmgr->device,
|
||||
AMD_IP_BLOCK_TYPE_UVD,
|
||||
AMD_CG_STATE_UNGATE);
|
||||
AMD_CG_STATE_GATE);
|
||||
cgs_set_powergating_state(hwmgr->device,
|
||||
AMD_IP_BLOCK_TYPE_UVD,
|
||||
AMD_PG_STATE_GATE);
|
||||
|
@ -182,7 +182,7 @@ int cz_dpm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
|
|||
AMD_CG_STATE_UNGATE);
|
||||
cgs_set_clockgating_state(hwmgr->device,
|
||||
AMD_IP_BLOCK_TYPE_UVD,
|
||||
AMD_PG_STATE_GATE);
|
||||
AMD_PG_STATE_UNGATE);
|
||||
cz_dpm_update_uvd_dpm(hwmgr, false);
|
||||
}
|
||||
|
||||
|
|
|
@ -80,20 +80,17 @@ int hwmgr_init(struct amd_pp_init *pp_init, struct pp_instance *handle)
|
|||
switch (hwmgr->chip_id) {
|
||||
case CHIP_TOPAZ:
|
||||
topaz_set_asic_special_caps(hwmgr);
|
||||
hwmgr->feature_mask &= ~(PP_SMC_VOLTAGE_CONTROL_MASK |
|
||||
PP_VBI_TIME_SUPPORT_MASK |
|
||||
hwmgr->feature_mask &= ~ (PP_VBI_TIME_SUPPORT_MASK |
|
||||
PP_ENABLE_GFX_CG_THRU_SMU);
|
||||
hwmgr->pp_table_version = PP_TABLE_V0;
|
||||
break;
|
||||
case CHIP_TONGA:
|
||||
tonga_set_asic_special_caps(hwmgr);
|
||||
hwmgr->feature_mask &= ~(PP_SMC_VOLTAGE_CONTROL_MASK |
|
||||
PP_VBI_TIME_SUPPORT_MASK);
|
||||
hwmgr->feature_mask &= ~PP_VBI_TIME_SUPPORT_MASK;
|
||||
break;
|
||||
case CHIP_FIJI:
|
||||
fiji_set_asic_special_caps(hwmgr);
|
||||
hwmgr->feature_mask &= ~(PP_SMC_VOLTAGE_CONTROL_MASK |
|
||||
PP_VBI_TIME_SUPPORT_MASK |
|
||||
hwmgr->feature_mask &= ~ (PP_VBI_TIME_SUPPORT_MASK |
|
||||
PP_ENABLE_GFX_CG_THRU_SMU);
|
||||
break;
|
||||
case CHIP_POLARIS11:
|
||||
|
@ -685,20 +682,24 @@ void hwmgr_init_default_caps(struct pp_hwmgr *hwmgr)
|
|||
|
||||
int hwmgr_set_user_specify_caps(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
if (amdgpu_sclk_deep_sleep_en)
|
||||
if (amdgpu_pp_feature_mask & PP_SCLK_DEEP_SLEEP_MASK)
|
||||
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
|
||||
PHM_PlatformCaps_SclkDeepSleep);
|
||||
else
|
||||
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
|
||||
PHM_PlatformCaps_SclkDeepSleep);
|
||||
|
||||
if (amdgpu_powercontainment)
|
||||
if (amdgpu_pp_feature_mask & PP_POWER_CONTAINMENT_MASK) {
|
||||
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
|
||||
PHM_PlatformCaps_PowerContainment);
|
||||
else
|
||||
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
|
||||
PHM_PlatformCaps_CAC);
|
||||
} else {
|
||||
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
|
||||
PHM_PlatformCaps_PowerContainment);
|
||||
|
||||
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
|
||||
PHM_PlatformCaps_CAC);
|
||||
}
|
||||
hwmgr->feature_mask = amdgpu_pp_feature_mask;
|
||||
|
||||
return 0;
|
||||
|
@ -735,9 +736,6 @@ int polaris_set_asic_special_caps(struct pp_hwmgr *hwmgr)
|
|||
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
|
||||
PHM_PlatformCaps_TCPRamping);
|
||||
|
||||
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
|
||||
PHM_PlatformCaps_CAC);
|
||||
|
||||
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
|
||||
PHM_PlatformCaps_RegulatorHot);
|
||||
|
||||
|
@ -767,8 +765,6 @@ int fiji_set_asic_special_caps(struct pp_hwmgr *hwmgr)
|
|||
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
|
||||
PHM_PlatformCaps_TablelessHardwareInterface);
|
||||
|
||||
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
|
||||
PHM_PlatformCaps_CAC);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -791,9 +787,6 @@ int tonga_set_asic_special_caps(struct pp_hwmgr *hwmgr)
|
|||
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
|
||||
PHM_PlatformCaps_TablelessHardwareInterface);
|
||||
|
||||
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
|
||||
PHM_PlatformCaps_CAC);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -809,8 +802,6 @@ int topaz_set_asic_special_caps(struct pp_hwmgr *hwmgr)
|
|||
PHM_PlatformCaps_TCPRamping);
|
||||
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
|
||||
PHM_PlatformCaps_TablelessHardwareInterface);
|
||||
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
|
||||
PHM_PlatformCaps_CAC);
|
||||
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
|
||||
PHM_PlatformCaps_EVV);
|
||||
return 0;
|
||||
|
|
|
@ -149,7 +149,7 @@ int smu7_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
|
|||
if (bgate) {
|
||||
cgs_set_clockgating_state(hwmgr->device,
|
||||
AMD_IP_BLOCK_TYPE_UVD,
|
||||
AMD_CG_STATE_UNGATE);
|
||||
AMD_CG_STATE_GATE);
|
||||
cgs_set_powergating_state(hwmgr->device,
|
||||
AMD_IP_BLOCK_TYPE_UVD,
|
||||
AMD_PG_STATE_GATE);
|
||||
|
@ -162,7 +162,7 @@ int smu7_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
|
|||
AMD_CG_STATE_UNGATE);
|
||||
cgs_set_clockgating_state(hwmgr->device,
|
||||
AMD_IP_BLOCK_TYPE_UVD,
|
||||
AMD_CG_STATE_GATE);
|
||||
AMD_CG_STATE_UNGATE);
|
||||
smu7_update_uvd_dpm(hwmgr, false);
|
||||
}
|
||||
|
||||
|
|
|
@ -993,13 +993,6 @@ static int smu7_start_dpm(struct pp_hwmgr *hwmgr)
|
|||
PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE,
|
||||
SWRST_COMMAND_1, RESETLC, 0x0);
|
||||
|
||||
PP_ASSERT_WITH_CODE(
|
||||
(0 == smum_send_msg_to_smc(hwmgr->smumgr,
|
||||
PPSMC_MSG_Voltage_Cntl_Enable)),
|
||||
"Failed to enable voltage DPM during DPM Start Function!",
|
||||
return -EINVAL);
|
||||
|
||||
|
||||
if (smu7_enable_sclk_mclk_dpm(hwmgr)) {
|
||||
printk(KERN_ERR "Failed to enable Sclk DPM and Mclk DPM!");
|
||||
return -EINVAL;
|
||||
|
@ -1428,7 +1421,7 @@ static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr)
|
|||
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
|
||||
PHM_PlatformCaps_ControlVDDCI);
|
||||
|
||||
if ((hwmgr->pp_table_version != PP_TABLE_V0)
|
||||
if ((hwmgr->pp_table_version != PP_TABLE_V0) && (hwmgr->feature_mask & PP_CLOCK_STRETCH_MASK)
|
||||
&& (table_info->cac_dtp_table->usClockStretchAmount != 0))
|
||||
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
|
||||
PHM_PlatformCaps_ClockStretcher);
|
||||
|
@ -2008,8 +2001,9 @@ static int smu7_thermal_parameter_init(struct pp_hwmgr *hwmgr)
|
|||
|
||||
hwmgr->dyn_state.cac_dtp_table->usTargetOperatingTemp =
|
||||
table_info->cac_dtp_table->usTargetOperatingTemp;
|
||||
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
|
||||
PHM_PlatformCaps_ODFuzzyFanControlSupport);
|
||||
if (hwmgr->feature_mask & PP_OD_FUZZY_FAN_CONTROL_MASK)
|
||||
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
|
||||
PHM_PlatformCaps_ODFuzzyFanControlSupport);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -603,9 +603,10 @@ int smu7_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int smu7_set_overdriver_target_tdp(struct pp_hwmgr *pHwMgr, uint32_t target_tdp)
|
||||
static int smu7_set_overdriver_target_tdp(struct pp_hwmgr *hwmgr,
|
||||
uint32_t target_tdp)
|
||||
{
|
||||
return smum_send_msg_to_smc_with_parameter(pHwMgr->smumgr,
|
||||
return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
|
||||
PPSMC_MSG_OverDriveSetTargetTdp, target_tdp);
|
||||
}
|
||||
|
||||
|
|
|
@ -29,6 +29,8 @@
|
|||
#include "amd_shared.h"
|
||||
#include "cgs_common.h"
|
||||
|
||||
extern int amdgpu_dpm;
|
||||
|
||||
enum amd_pp_sensors {
|
||||
AMDGPU_PP_SENSOR_GFX_SCLK = 0,
|
||||
AMDGPU_PP_SENSOR_VDDNB,
|
||||
|
@ -349,6 +351,7 @@ struct amd_powerplay_funcs {
|
|||
int (*get_fan_control_mode)(void *handle);
|
||||
int (*set_fan_speed_percent)(void *handle, uint32_t percent);
|
||||
int (*get_fan_speed_percent)(void *handle, uint32_t *speed);
|
||||
int (*get_fan_speed_rpm)(void *handle, uint32_t *rpm);
|
||||
int (*get_pp_num_states)(void *handle, struct pp_states_info *data);
|
||||
int (*get_pp_table)(void *handle, char **table);
|
||||
int (*set_pp_table)(void *handle, const char *buf, size_t size);
|
||||
|
|
|
@ -38,8 +38,6 @@ struct pp_hwmgr;
|
|||
struct phm_fan_speed_info;
|
||||
struct pp_atomctrl_voltage_table;
|
||||
|
||||
extern int amdgpu_powercontainment;
|
||||
extern int amdgpu_sclk_deep_sleep_en;
|
||||
extern unsigned amdgpu_pp_feature_mask;
|
||||
|
||||
#define VOLTAGE_SCALE 4
|
||||
|
@ -85,7 +83,9 @@ enum PP_FEATURE_MASK {
|
|||
PP_SMC_VOLTAGE_CONTROL_MASK = 0x40,
|
||||
PP_VBI_TIME_SUPPORT_MASK = 0x80,
|
||||
PP_ULV_MASK = 0x100,
|
||||
PP_ENABLE_GFX_CG_THRU_SMU = 0x200
|
||||
PP_ENABLE_GFX_CG_THRU_SMU = 0x200,
|
||||
PP_CLOCK_STRETCH_MASK = 0x400,
|
||||
PP_OD_FUZZY_FAN_CONTROL_MASK = 0x800
|
||||
};
|
||||
|
||||
enum PHM_BackEnd_Magic {
|
||||
|
|
|
@ -396,7 +396,8 @@ static int fiji_start_smu(struct pp_smumgr *smumgr)
|
|||
struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend);
|
||||
|
||||
/* Only start SMC if SMC RAM is not running */
|
||||
if (!smu7_is_smc_ram_running(smumgr)) {
|
||||
if (!(smu7_is_smc_ram_running(smumgr)
|
||||
|| cgs_is_virtualization_enabled(smumgr->device))) {
|
||||
fiji_avfs_event_mgr(smumgr, false);
|
||||
|
||||
/* Check if SMU is running in protected mode */
|
||||
|
@ -443,6 +444,9 @@ static bool fiji_is_hw_avfs_present(struct pp_smumgr *smumgr)
|
|||
uint32_t efuse = 0;
|
||||
uint32_t mask = (1 << ((AVFS_EN_MSB - AVFS_EN_LSB) + 1)) - 1;
|
||||
|
||||
if (cgs_is_virtualization_enabled(smumgr->device))
|
||||
return 0;
|
||||
|
||||
if (!atomctrl_read_efuse(smumgr->device, AVFS_EN_LSB, AVFS_EN_MSB,
|
||||
mask, &efuse)) {
|
||||
if (efuse)
|
||||
|
|
|
@ -140,7 +140,8 @@ static int tonga_start_smu(struct pp_smumgr *smumgr)
|
|||
int result;
|
||||
|
||||
/* Only start SMC if SMC RAM is not running */
|
||||
if (!smu7_is_smc_ram_running(smumgr)) {
|
||||
if (!(smu7_is_smc_ram_running(smumgr) ||
|
||||
cgs_is_virtualization_enabled(smumgr->device))) {
|
||||
/*Check if SMU is running in protected mode*/
|
||||
if (0 == SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
|
||||
SMU_FIRMWARE, SMU_MODE)) {
|
||||
|
|
Loading…
Reference in New Issue