From 7d99e5177477866fce3df146d4fe378248032230 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 25 Oct 2012 17:02:17 -0400 Subject: [PATCH] drm/radeon/kms: fix up 6xx/7xx display watermark calc for dpm Calculate the low and high watermarks based on the low and high clocks for the current power state. The dynamic pm hw will select the appropriate watermark based on the internal dpm state. Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/rv515.c | 302 +++++++++++++++++++-------------- 1 file changed, 171 insertions(+), 131 deletions(-) diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c index 21c7d7b26e55..8ea1573ae820 100644 --- a/drivers/gpu/drm/radeon/rv515.c +++ b/drivers/gpu/drm/radeon/rv515.c @@ -937,13 +937,16 @@ struct rv515_watermark { }; static void rv515_crtc_bandwidth_compute(struct radeon_device *rdev, - struct radeon_crtc *crtc, - struct rv515_watermark *wm) + struct radeon_crtc *crtc, + struct rv515_watermark *wm, + bool low) { struct drm_display_mode *mode = &crtc->base.mode; fixed20_12 a, b, c; fixed20_12 pclk, request_fifo_depth, tolerable_latency, estimated_width; fixed20_12 consumption_time, line_time, chunk_time, read_delay_latency; + fixed20_12 sclk; + u32 selected_sclk; if (!crtc->base.enabled) { /* FIXME: wouldn't it better to set priority mark to maximum */ @@ -951,6 +954,18 @@ static void rv515_crtc_bandwidth_compute(struct radeon_device *rdev, return; } + /* rv6xx, rv7xx */ + if ((rdev->family >= CHIP_RV610) && + (rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) + selected_sclk = radeon_dpm_get_sclk(rdev, low); + else + selected_sclk = rdev->pm.current_sclk; + + /* sclk in Mhz */ + a.full = dfixed_const(100); + sclk.full = dfixed_const(selected_sclk); + sclk.full = dfixed_div(sclk, a); + if (crtc->vsc.full > dfixed_const(2)) wm->num_line_pair.full = dfixed_const(2); else @@ -1016,7 +1031,7 @@ static void rv515_crtc_bandwidth_compute(struct radeon_device *rdev, * sclk = system clock(Mhz) */ a.full = dfixed_const(600 * 1000); - chunk_time.full = dfixed_div(a, rdev->pm.sclk); + chunk_time.full = dfixed_div(a, sclk); read_delay_latency.full = dfixed_const(1000); /* Determine the worst case latency @@ -1077,17 +1092,147 @@ static void rv515_crtc_bandwidth_compute(struct radeon_device *rdev, } } +static void rv515_compute_mode_priority(struct radeon_device *rdev, + struct rv515_watermark *wm0, + struct rv515_watermark *wm1, + struct drm_display_mode *mode0, + struct drm_display_mode *mode1, + u32 *d1mode_priority_a_cnt, + u32 *d2mode_priority_a_cnt) +{ + fixed20_12 priority_mark02, priority_mark12, fill_rate; + fixed20_12 a, b; + + *d1mode_priority_a_cnt = MODE_PRIORITY_OFF; + *d2mode_priority_a_cnt = MODE_PRIORITY_OFF; + + if (mode0 && mode1) { + if (dfixed_trunc(wm0->dbpp) > 64) + a.full = dfixed_div(wm0->dbpp, wm0->num_line_pair); + else + a.full = wm0->num_line_pair.full; + if (dfixed_trunc(wm1->dbpp) > 64) + b.full = dfixed_div(wm1->dbpp, wm1->num_line_pair); + else + b.full = wm1->num_line_pair.full; + a.full += b.full; + fill_rate.full = dfixed_div(wm0->sclk, a); + if (wm0->consumption_rate.full > fill_rate.full) { + b.full = wm0->consumption_rate.full - fill_rate.full; + b.full = dfixed_mul(b, wm0->active_time); + a.full = dfixed_const(16); + b.full = dfixed_div(b, a); + a.full = dfixed_mul(wm0->worst_case_latency, + wm0->consumption_rate); + priority_mark02.full = a.full + b.full; + } else { + a.full = dfixed_mul(wm0->worst_case_latency, + wm0->consumption_rate); + b.full = dfixed_const(16 * 1000); + priority_mark02.full = dfixed_div(a, b); + } + if (wm1->consumption_rate.full > fill_rate.full) { + b.full = wm1->consumption_rate.full - fill_rate.full; + b.full = dfixed_mul(b, wm1->active_time); + a.full = dfixed_const(16); + b.full = dfixed_div(b, a); + a.full = dfixed_mul(wm1->worst_case_latency, + wm1->consumption_rate); + priority_mark12.full = a.full + b.full; + } else { + a.full = dfixed_mul(wm1->worst_case_latency, + wm1->consumption_rate); + b.full = dfixed_const(16 * 1000); + priority_mark12.full = dfixed_div(a, b); + } + if (wm0->priority_mark.full > priority_mark02.full) + priority_mark02.full = wm0->priority_mark.full; + if (dfixed_trunc(priority_mark02) < 0) + priority_mark02.full = 0; + if (wm0->priority_mark_max.full > priority_mark02.full) + priority_mark02.full = wm0->priority_mark_max.full; + if (wm1->priority_mark.full > priority_mark12.full) + priority_mark12.full = wm1->priority_mark.full; + if (dfixed_trunc(priority_mark12) < 0) + priority_mark12.full = 0; + if (wm1->priority_mark_max.full > priority_mark12.full) + priority_mark12.full = wm1->priority_mark_max.full; + *d1mode_priority_a_cnt = dfixed_trunc(priority_mark02); + *d2mode_priority_a_cnt = dfixed_trunc(priority_mark12); + if (rdev->disp_priority == 2) { + *d1mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON; + *d2mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON; + } + } else if (mode0) { + if (dfixed_trunc(wm0->dbpp) > 64) + a.full = dfixed_div(wm0->dbpp, wm0->num_line_pair); + else + a.full = wm0->num_line_pair.full; + fill_rate.full = dfixed_div(wm0->sclk, a); + if (wm0->consumption_rate.full > fill_rate.full) { + b.full = wm0->consumption_rate.full - fill_rate.full; + b.full = dfixed_mul(b, wm0->active_time); + a.full = dfixed_const(16); + b.full = dfixed_div(b, a); + a.full = dfixed_mul(wm0->worst_case_latency, + wm0->consumption_rate); + priority_mark02.full = a.full + b.full; + } else { + a.full = dfixed_mul(wm0->worst_case_latency, + wm0->consumption_rate); + b.full = dfixed_const(16); + priority_mark02.full = dfixed_div(a, b); + } + if (wm0->priority_mark.full > priority_mark02.full) + priority_mark02.full = wm0->priority_mark.full; + if (dfixed_trunc(priority_mark02) < 0) + priority_mark02.full = 0; + if (wm0->priority_mark_max.full > priority_mark02.full) + priority_mark02.full = wm0->priority_mark_max.full; + *d1mode_priority_a_cnt = dfixed_trunc(priority_mark02); + if (rdev->disp_priority == 2) + *d1mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON; + } else if (mode1) { + if (dfixed_trunc(wm1->dbpp) > 64) + a.full = dfixed_div(wm1->dbpp, wm1->num_line_pair); + else + a.full = wm1->num_line_pair.full; + fill_rate.full = dfixed_div(wm1->sclk, a); + if (wm1->consumption_rate.full > fill_rate.full) { + b.full = wm1->consumption_rate.full - fill_rate.full; + b.full = dfixed_mul(b, wm1->active_time); + a.full = dfixed_const(16); + b.full = dfixed_div(b, a); + a.full = dfixed_mul(wm1->worst_case_latency, + wm1->consumption_rate); + priority_mark12.full = a.full + b.full; + } else { + a.full = dfixed_mul(wm1->worst_case_latency, + wm1->consumption_rate); + b.full = dfixed_const(16 * 1000); + priority_mark12.full = dfixed_div(a, b); + } + if (wm1->priority_mark.full > priority_mark12.full) + priority_mark12.full = wm1->priority_mark.full; + if (dfixed_trunc(priority_mark12) < 0) + priority_mark12.full = 0; + if (wm1->priority_mark_max.full > priority_mark12.full) + priority_mark12.full = wm1->priority_mark_max.full; + *d2mode_priority_a_cnt = dfixed_trunc(priority_mark12); + if (rdev->disp_priority == 2) + *d2mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON; + } +} + void rv515_bandwidth_avivo_update(struct radeon_device *rdev) { struct drm_display_mode *mode0 = NULL; struct drm_display_mode *mode1 = NULL; - struct rv515_watermark wm0; - struct rv515_watermark wm1; + struct rv515_watermark wm0_high, wm0_low; + struct rv515_watermark wm1_high, wm1_low; u32 tmp; - u32 d1mode_priority_a_cnt = MODE_PRIORITY_OFF; - u32 d2mode_priority_a_cnt = MODE_PRIORITY_OFF; - fixed20_12 priority_mark02, priority_mark12, fill_rate; - fixed20_12 a, b; + u32 d1mode_priority_a_cnt, d1mode_priority_b_cnt; + u32 d2mode_priority_a_cnt, d2mode_priority_b_cnt; if (rdev->mode_info.crtcs[0]->base.enabled) mode0 = &rdev->mode_info.crtcs[0]->base.mode; @@ -1095,134 +1240,29 @@ void rv515_bandwidth_avivo_update(struct radeon_device *rdev) mode1 = &rdev->mode_info.crtcs[1]->base.mode; rs690_line_buffer_adjust(rdev, mode0, mode1); - rv515_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[0], &wm0); - rv515_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[1], &wm1); + rv515_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[0], &wm0_high, false); + rv515_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[1], &wm1_high, false); - tmp = wm0.lb_request_fifo_depth; - tmp |= wm1.lb_request_fifo_depth << 16; + rv515_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[0], &wm0_low, false); + rv515_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[1], &wm1_low, false); + + tmp = wm0_high.lb_request_fifo_depth; + tmp |= wm1_high.lb_request_fifo_depth << 16; WREG32(LB_MAX_REQ_OUTSTANDING, tmp); - if (mode0 && mode1) { - if (dfixed_trunc(wm0.dbpp) > 64) - a.full = dfixed_div(wm0.dbpp, wm0.num_line_pair); - else - a.full = wm0.num_line_pair.full; - if (dfixed_trunc(wm1.dbpp) > 64) - b.full = dfixed_div(wm1.dbpp, wm1.num_line_pair); - else - b.full = wm1.num_line_pair.full; - a.full += b.full; - fill_rate.full = dfixed_div(wm0.sclk, a); - if (wm0.consumption_rate.full > fill_rate.full) { - b.full = wm0.consumption_rate.full - fill_rate.full; - b.full = dfixed_mul(b, wm0.active_time); - a.full = dfixed_const(16); - b.full = dfixed_div(b, a); - a.full = dfixed_mul(wm0.worst_case_latency, - wm0.consumption_rate); - priority_mark02.full = a.full + b.full; - } else { - a.full = dfixed_mul(wm0.worst_case_latency, - wm0.consumption_rate); - b.full = dfixed_const(16 * 1000); - priority_mark02.full = dfixed_div(a, b); - } - if (wm1.consumption_rate.full > fill_rate.full) { - b.full = wm1.consumption_rate.full - fill_rate.full; - b.full = dfixed_mul(b, wm1.active_time); - a.full = dfixed_const(16); - b.full = dfixed_div(b, a); - a.full = dfixed_mul(wm1.worst_case_latency, - wm1.consumption_rate); - priority_mark12.full = a.full + b.full; - } else { - a.full = dfixed_mul(wm1.worst_case_latency, - wm1.consumption_rate); - b.full = dfixed_const(16 * 1000); - priority_mark12.full = dfixed_div(a, b); - } - if (wm0.priority_mark.full > priority_mark02.full) - priority_mark02.full = wm0.priority_mark.full; - if (dfixed_trunc(priority_mark02) < 0) - priority_mark02.full = 0; - if (wm0.priority_mark_max.full > priority_mark02.full) - priority_mark02.full = wm0.priority_mark_max.full; - if (wm1.priority_mark.full > priority_mark12.full) - priority_mark12.full = wm1.priority_mark.full; - if (dfixed_trunc(priority_mark12) < 0) - priority_mark12.full = 0; - if (wm1.priority_mark_max.full > priority_mark12.full) - priority_mark12.full = wm1.priority_mark_max.full; - d1mode_priority_a_cnt = dfixed_trunc(priority_mark02); - d2mode_priority_a_cnt = dfixed_trunc(priority_mark12); - if (rdev->disp_priority == 2) { - d1mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON; - d2mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON; - } - } else if (mode0) { - if (dfixed_trunc(wm0.dbpp) > 64) - a.full = dfixed_div(wm0.dbpp, wm0.num_line_pair); - else - a.full = wm0.num_line_pair.full; - fill_rate.full = dfixed_div(wm0.sclk, a); - if (wm0.consumption_rate.full > fill_rate.full) { - b.full = wm0.consumption_rate.full - fill_rate.full; - b.full = dfixed_mul(b, wm0.active_time); - a.full = dfixed_const(16); - b.full = dfixed_div(b, a); - a.full = dfixed_mul(wm0.worst_case_latency, - wm0.consumption_rate); - priority_mark02.full = a.full + b.full; - } else { - a.full = dfixed_mul(wm0.worst_case_latency, - wm0.consumption_rate); - b.full = dfixed_const(16); - priority_mark02.full = dfixed_div(a, b); - } - if (wm0.priority_mark.full > priority_mark02.full) - priority_mark02.full = wm0.priority_mark.full; - if (dfixed_trunc(priority_mark02) < 0) - priority_mark02.full = 0; - if (wm0.priority_mark_max.full > priority_mark02.full) - priority_mark02.full = wm0.priority_mark_max.full; - d1mode_priority_a_cnt = dfixed_trunc(priority_mark02); - if (rdev->disp_priority == 2) - d1mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON; - } else if (mode1) { - if (dfixed_trunc(wm1.dbpp) > 64) - a.full = dfixed_div(wm1.dbpp, wm1.num_line_pair); - else - a.full = wm1.num_line_pair.full; - fill_rate.full = dfixed_div(wm1.sclk, a); - if (wm1.consumption_rate.full > fill_rate.full) { - b.full = wm1.consumption_rate.full - fill_rate.full; - b.full = dfixed_mul(b, wm1.active_time); - a.full = dfixed_const(16); - b.full = dfixed_div(b, a); - a.full = dfixed_mul(wm1.worst_case_latency, - wm1.consumption_rate); - priority_mark12.full = a.full + b.full; - } else { - a.full = dfixed_mul(wm1.worst_case_latency, - wm1.consumption_rate); - b.full = dfixed_const(16 * 1000); - priority_mark12.full = dfixed_div(a, b); - } - if (wm1.priority_mark.full > priority_mark12.full) - priority_mark12.full = wm1.priority_mark.full; - if (dfixed_trunc(priority_mark12) < 0) - priority_mark12.full = 0; - if (wm1.priority_mark_max.full > priority_mark12.full) - priority_mark12.full = wm1.priority_mark_max.full; - d2mode_priority_a_cnt = dfixed_trunc(priority_mark12); - if (rdev->disp_priority == 2) - d2mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON; - } + rv515_compute_mode_priority(rdev, + &wm0_high, &wm1_high, + mode0, mode1, + &d1mode_priority_a_cnt, &d2mode_priority_a_cnt); + rv515_compute_mode_priority(rdev, + &wm0_low, &wm1_low, + mode0, mode1, + &d1mode_priority_b_cnt, &d2mode_priority_b_cnt); WREG32(D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt); - WREG32(D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt); + WREG32(D1MODE_PRIORITY_B_CNT, d1mode_priority_b_cnt); WREG32(D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt); - WREG32(D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt); + WREG32(D2MODE_PRIORITY_B_CNT, d2mode_priority_b_cnt); } void rv515_bandwidth_update(struct radeon_device *rdev)