drm/msm/disp: Move various debug logs to atomic bucket

These prints flood the logs with drm debugging set to enable kms and
driver logging (DRM_UT_KMS and DRM_UT_DRIVER). Let's move these prints
to the atomic bucket (DRM_UT_ATOMIC) as they're related to the atomic
paths.

Cc: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
Cc: Abhinav Kumar <abhinavk@codeaurora.org>
Cc: Kuogee Hsieh <khsieh@codeaurora.org>
Cc: aravindh@codeaurora.org
Cc: Sean Paul <sean@poorly.run>
Signed-off-by: Stephen Boyd <swboyd@chromium.org>
Link: https://lore.kernel.org/r/20210430193104.1770538-7-swboyd@chromium.org
Reviewed-by: Abhinav Kumar <abhinavk@codeaurora.org>
Signed-off-by: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
Signed-off-by: Rob Clark <robdclark@chromium.org>
This commit is contained in:
Stephen Boyd 2021-04-30 12:31:04 -07:00 committed by Rob Clark
parent f6bc4e1d51
commit 5b702d787b
6 changed files with 45 additions and 48 deletions

View File

@ -132,7 +132,7 @@ static void _dpu_core_perf_calc_crtc(struct dpu_kms *kms,
perf->core_clk_rate = _dpu_core_perf_calc_clk(kms, crtc, state);
}
DPU_DEBUG(
DRM_DEBUG_ATOMIC(
"crtc=%d clk_rate=%llu core_ib=%llu core_ab=%llu\n",
crtc->base.id, perf->core_clk_rate,
perf->max_per_pipe_ib, perf->bw_ctl);
@ -178,7 +178,7 @@ int dpu_core_perf_crtc_check(struct drm_crtc *crtc,
struct dpu_crtc_state *tmp_cstate =
to_dpu_crtc_state(tmp_crtc->state);
DPU_DEBUG("crtc:%d bw:%llu ctrl:%d\n",
DRM_DEBUG_ATOMIC("crtc:%d bw:%llu ctrl:%d\n",
tmp_crtc->base.id, tmp_cstate->new_perf.bw_ctl,
tmp_cstate->bw_control);
@ -187,11 +187,11 @@ int dpu_core_perf_crtc_check(struct drm_crtc *crtc,
/* convert bandwidth to kb */
bw = DIV_ROUND_UP_ULL(bw_sum_of_intfs, 1000);
DPU_DEBUG("calculated bandwidth=%uk\n", bw);
DRM_DEBUG_ATOMIC("calculated bandwidth=%uk\n", bw);
threshold = kms->catalog->perf.max_bw_high;
DPU_DEBUG("final threshold bw limit = %d\n", threshold);
DRM_DEBUG_ATOMIC("final threshold bw limit = %d\n", threshold);
if (!threshold) {
DPU_ERROR("no bandwidth limits specified\n");
@ -228,7 +228,7 @@ static int _dpu_core_perf_crtc_update_bus(struct dpu_kms *kms,
perf.bw_ctl += dpu_cstate->new_perf.bw_ctl;
DPU_DEBUG("crtc=%d bw=%llu paths:%d\n",
DRM_DEBUG_ATOMIC("crtc=%d bw=%llu paths:%d\n",
tmp_crtc->base.id,
dpu_cstate->new_perf.bw_ctl, kms->num_paths);
}
@ -278,7 +278,7 @@ void dpu_core_perf_crtc_release_bw(struct drm_crtc *crtc)
/* Release the bandwidth */
if (kms->perf.enable_bw_release) {
trace_dpu_cmd_release_bw(crtc->base.id);
DPU_DEBUG("Release BW crtc=%d\n", crtc->base.id);
DRM_DEBUG_ATOMIC("Release BW crtc=%d\n", crtc->base.id);
dpu_crtc->cur_perf.bw_ctl = 0;
_dpu_core_perf_crtc_update_bus(kms, crtc);
}
@ -314,7 +314,7 @@ static u64 _dpu_core_perf_get_core_clk_rate(struct dpu_kms *kms)
if (kms->perf.perf_tune.mode == DPU_PERF_MODE_FIXED)
clk_rate = kms->perf.fix_core_clk_rate;
DPU_DEBUG("clk:%llu\n", clk_rate);
DRM_DEBUG_ATOMIC("clk:%llu\n", clk_rate);
return clk_rate;
}
@ -344,7 +344,7 @@ int dpu_core_perf_crtc_update(struct drm_crtc *crtc,
dpu_crtc = to_dpu_crtc(crtc);
dpu_cstate = to_dpu_crtc_state(crtc->state);
DPU_DEBUG("crtc:%d stop_req:%d core_clk:%llu\n",
DRM_DEBUG_ATOMIC("crtc:%d stop_req:%d core_clk:%llu\n",
crtc->base.id, stop_req, kms->perf.core_clk_rate);
old = &dpu_crtc->cur_perf;
@ -362,7 +362,7 @@ int dpu_core_perf_crtc_update(struct drm_crtc *crtc,
(new->max_per_pipe_ib > old->max_per_pipe_ib))) ||
(!params_changed && ((new->bw_ctl < old->bw_ctl) ||
(new->max_per_pipe_ib < old->max_per_pipe_ib)))) {
DPU_DEBUG("crtc=%d p=%d new_bw=%llu,old_bw=%llu\n",
DRM_DEBUG_ATOMIC("crtc=%d p=%d new_bw=%llu,old_bw=%llu\n",
crtc->base.id, params_changed,
new->bw_ctl, old->bw_ctl);
old->bw_ctl = new->bw_ctl;
@ -378,7 +378,7 @@ int dpu_core_perf_crtc_update(struct drm_crtc *crtc,
update_clk = true;
}
} else {
DPU_DEBUG("crtc=%d disable\n", crtc->base.id);
DRM_DEBUG_ATOMIC("crtc=%d disable\n", crtc->base.id);
memset(old, 0, sizeof(*old));
update_bus = true;
update_clk = true;
@ -413,7 +413,7 @@ int dpu_core_perf_crtc_update(struct drm_crtc *crtc,
}
kms->perf.core_clk_rate = clk_rate;
DPU_DEBUG("update clk rate = %lld HZ\n", clk_rate);
DRM_DEBUG_ATOMIC("update clk rate = %lld HZ\n", clk_rate);
}
return 0;
}

View File

@ -57,8 +57,6 @@ static void dpu_crtc_destroy(struct drm_crtc *crtc)
{
struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
DPU_DEBUG("\n");
if (!crtc)
return;
@ -163,7 +161,7 @@ static void _dpu_crtc_setup_blend_cfg(struct dpu_crtc_mixer *mixer,
lm->ops.setup_blend_config(lm, pstate->stage,
0xFF, 0, blend_op);
DPU_DEBUG("format:%p4cc, alpha_en:%u blend_op:0x%x\n",
DRM_DEBUG_ATOMIC("format:%p4cc, alpha_en:%u blend_op:0x%x\n",
&format->base.pixel_format, format->alpha_enable, blend_op);
}
@ -220,7 +218,8 @@ static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc,
dpu_plane_get_ctl_flush(plane, ctl, &flush_mask);
set_bit(dpu_plane_pipe(plane), fetch_active);
DPU_DEBUG("crtc %d stage:%d - plane %d sspp %d fb %d\n",
DRM_DEBUG_ATOMIC("crtc %d stage:%d - plane %d sspp %d fb %d\n",
crtc->base.id,
pstate->stage,
plane->base.id,
@ -278,7 +277,7 @@ static void _dpu_crtc_blend_setup(struct drm_crtc *crtc)
struct dpu_hw_mixer *lm;
int i;
DPU_DEBUG("%s\n", dpu_crtc->name);
DRM_DEBUG_ATOMIC("%s\n", dpu_crtc->name);
for (i = 0; i < cstate->num_mixers; i++) {
mixer[i].mixer_op_mode = 0;
@ -305,7 +304,7 @@ static void _dpu_crtc_blend_setup(struct drm_crtc *crtc)
/* stage config flush mask */
ctl->ops.update_pending_flush(ctl, mixer[i].flush_mask);
DPU_DEBUG("lm %d, op_mode 0x%X, ctl %d, flush mask 0x%x\n",
DRM_DEBUG_ATOMIC("lm %d, op_mode 0x%X, ctl %d, flush mask 0x%x\n",
mixer[i].hw_lm->idx - LM_0,
mixer[i].mixer_op_mode,
ctl->idx - CTL_0,
@ -388,7 +387,7 @@ static void dpu_crtc_frame_event_work(struct kthread_work *work)
DPU_ATRACE_BEGIN("crtc_frame_event");
DRM_DEBUG_KMS("crtc%d event:%u ts:%lld\n", crtc->base.id, fevent->event,
DRM_DEBUG_ATOMIC("crtc%d event:%u ts:%lld\n", crtc->base.id, fevent->event,
ktime_to_ns(fevent->ts));
if (fevent->event & (DPU_ENCODER_FRAME_EVENT_DONE
@ -558,7 +557,7 @@ static void _dpu_crtc_setup_cp_blocks(struct drm_crtc *crtc)
/* stage config flush mask */
ctl->ops.update_pending_flush(ctl, mixer[i].flush_mask);
DPU_DEBUG("lm %d, ctl %d, flush mask 0x%x\n",
DRM_DEBUG_ATOMIC("lm %d, ctl %d, flush mask 0x%x\n",
mixer[i].hw_lm->idx - DSPP_0,
ctl->idx - CTL_0,
mixer[i].flush_mask);
@ -572,12 +571,12 @@ static void dpu_crtc_atomic_begin(struct drm_crtc *crtc,
struct drm_encoder *encoder;
if (!crtc->state->enable) {
DPU_DEBUG("crtc%d -> enable %d, skip atomic_begin\n",
DRM_DEBUG_ATOMIC("crtc%d -> enable %d, skip atomic_begin\n",
crtc->base.id, crtc->state->enable);
return;
}
DPU_DEBUG("crtc%d\n", crtc->base.id);
DRM_DEBUG_ATOMIC("crtc%d\n", crtc->base.id);
_dpu_crtc_setup_lm_bounds(crtc, crtc->state);
@ -617,12 +616,12 @@ static void dpu_crtc_atomic_flush(struct drm_crtc *crtc,
struct dpu_crtc_state *cstate;
if (!crtc->state->enable) {
DPU_DEBUG("crtc%d -> enable %d, skip atomic_flush\n",
DRM_DEBUG_ATOMIC("crtc%d -> enable %d, skip atomic_flush\n",
crtc->base.id, crtc->state->enable);
return;
}
DPU_DEBUG("crtc%d\n", crtc->base.id);
DRM_DEBUG_ATOMIC("crtc%d\n", crtc->base.id);
dpu_crtc = to_dpu_crtc(crtc);
cstate = to_dpu_crtc_state(crtc->state);
@ -675,7 +674,7 @@ static void dpu_crtc_destroy_state(struct drm_crtc *crtc,
{
struct dpu_crtc_state *cstate = to_dpu_crtc_state(state);
DPU_DEBUG("crtc%d\n", crtc->base.id);
DRM_DEBUG_ATOMIC("crtc%d\n", crtc->base.id);
__drm_atomic_helper_crtc_destroy_state(state);
@ -688,7 +687,7 @@ static int _dpu_crtc_wait_for_frame_done(struct drm_crtc *crtc)
int ret, rc = 0;
if (!atomic_read(&dpu_crtc->frame_pending)) {
DPU_DEBUG("no frames pending\n");
DRM_DEBUG_ATOMIC("no frames pending\n");
return 0;
}
@ -731,9 +730,9 @@ void dpu_crtc_commit_kickoff(struct drm_crtc *crtc)
if (atomic_inc_return(&dpu_crtc->frame_pending) == 1) {
/* acquire bandwidth and other resources */
DPU_DEBUG("crtc%d first commit\n", crtc->base.id);
DRM_DEBUG_ATOMIC("crtc%d first commit\n", crtc->base.id);
} else
DPU_DEBUG("crtc%d commit\n", crtc->base.id);
DRM_DEBUG_ATOMIC("crtc%d commit\n", crtc->base.id);
dpu_crtc->play_count++;
@ -908,7 +907,7 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
pstates = kzalloc(sizeof(*pstates) * DPU_STAGE_MAX * 4, GFP_KERNEL);
if (!crtc_state->enable || !crtc_state->active) {
DPU_DEBUG("crtc%d -> enable %d, active %d, skip atomic_check\n",
DRM_DEBUG_ATOMIC("crtc%d -> enable %d, active %d, skip atomic_check\n",
crtc->base.id, crtc_state->enable,
crtc_state->active);
memset(&cstate->new_perf, 0, sizeof(cstate->new_perf));
@ -916,7 +915,7 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
}
mode = &crtc_state->adjusted_mode;
DPU_DEBUG("%s: check\n", dpu_crtc->name);
DRM_DEBUG_ATOMIC("%s: check\n", dpu_crtc->name);
/* force a full mode set if active state changed */
if (crtc_state->active_changed)
@ -1024,7 +1023,7 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
}
pstates[i].dpu_pstate->stage = z_pos + DPU_STAGE_0;
DPU_DEBUG("%s: zpos %d\n", dpu_crtc->name, z_pos);
DRM_DEBUG_ATOMIC("%s: zpos %d\n", dpu_crtc->name, z_pos);
}
for (i = 0; i < multirect_count; i++) {
@ -1376,6 +1375,6 @@ struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane,
/* initialize event handling */
spin_lock_init(&dpu_crtc->event_lock);
DPU_DEBUG("%s: successfully initialized crtc\n", dpu_crtc->name);
DRM_DEBUG_KMS("%s: successfully initialized crtc\n", dpu_crtc->name);
return crtc;
}

View File

@ -28,13 +28,13 @@
#include "dpu_core_irq.h"
#include "disp/msm_disp_snapshot.h"
#define DPU_DEBUG_ENC(e, fmt, ...) DPU_DEBUG("enc%d " fmt,\
#define DPU_DEBUG_ENC(e, fmt, ...) DRM_DEBUG_ATOMIC("enc%d " fmt,\
(e) ? (e)->base.base.id : -1, ##__VA_ARGS__)
#define DPU_ERROR_ENC(e, fmt, ...) DPU_ERROR("enc%d " fmt,\
(e) ? (e)->base.base.id : -1, ##__VA_ARGS__)
#define DPU_DEBUG_PHYS(p, fmt, ...) DPU_DEBUG("enc%d intf%d pp%d " fmt,\
#define DPU_DEBUG_PHYS(p, fmt, ...) DRM_DEBUG_ATOMIC("enc%d intf%d pp%d " fmt,\
(p) ? (p)->parent->base.id : -1, \
(p) ? (p)->intf_idx - INTF_0 : -1, \
(p) ? ((p)->hw_pp ? (p)->hw_pp->idx - PINGPONG_0 : -1) : -1, \
@ -791,13 +791,13 @@ static int dpu_encoder_resource_control(struct drm_encoder *drm_enc,
/* return if the resource control is already in ON state */
if (dpu_enc->rc_state == DPU_ENC_RC_STATE_ON) {
DRM_DEBUG_KMS("id;%u, sw_event:%d, rc in ON state\n",
DRM_DEBUG_ATOMIC("id;%u, sw_event:%d, rc in ON state\n",
DRMID(drm_enc), sw_event);
mutex_unlock(&dpu_enc->rc_lock);
return 0;
} else if (dpu_enc->rc_state != DPU_ENC_RC_STATE_OFF &&
dpu_enc->rc_state != DPU_ENC_RC_STATE_IDLE) {
DRM_DEBUG_KMS("id;%u, sw_event:%d, rc in state %d\n",
DRM_DEBUG_ATOMIC("id;%u, sw_event:%d, rc in state %d\n",
DRMID(drm_enc), sw_event,
dpu_enc->rc_state);
mutex_unlock(&dpu_enc->rc_lock);
@ -2048,8 +2048,6 @@ static int dpu_encoder_setup_display(struct dpu_encoder_virt *dpu_enc,
phys_params.parent_ops = &dpu_encoder_parent_ops;
phys_params.enc_spinlock = &dpu_enc->enc_spinlock;
DPU_DEBUG("\n");
switch (disp_info->intf_type) {
case DRM_MODE_ENCODER_DSI:
intf_type = INTF_DSI;

View File

@ -992,7 +992,7 @@ const struct dpu_format *dpu_get_dpu_format_ext(
* Currently only support exactly zero or one modifier.
* All planes use the same modifier.
*/
DPU_DEBUG("plane format modifier 0x%llX\n", modifier);
DRM_DEBUG_ATOMIC("plane format modifier 0x%llX\n", modifier);
switch (modifier) {
case 0:
@ -1002,7 +1002,7 @@ const struct dpu_format *dpu_get_dpu_format_ext(
case DRM_FORMAT_MOD_QCOM_COMPRESSED:
map = dpu_format_map_ubwc;
map_size = ARRAY_SIZE(dpu_format_map_ubwc);
DPU_DEBUG("found fmt: %4.4s DRM_FORMAT_MOD_QCOM_COMPRESSED\n",
DRM_DEBUG_ATOMIC("found fmt: %4.4s DRM_FORMAT_MOD_QCOM_COMPRESSED\n",
(char *)&format);
break;
default:
@ -1021,7 +1021,7 @@ const struct dpu_format *dpu_get_dpu_format_ext(
DPU_ERROR("unsupported fmt: %4.4s modifier 0x%llX\n",
(char *)&format, modifier);
else
DPU_DEBUG("fmt %4.4s mod 0x%llX ubwc %d yuv %d\n",
DRM_DEBUG_ATOMIC("fmt %4.4s mod 0x%llX ubwc %d yuv %d\n",
(char *)&format, modifier,
DPU_FORMAT_IS_UBWC(fmt),
DPU_FORMAT_IS_YUV(fmt));

View File

@ -25,7 +25,7 @@
#include "dpu_vbif.h"
#include "dpu_plane.h"
#define DPU_DEBUG_PLANE(pl, fmt, ...) DPU_DEBUG("plane%d " fmt,\
#define DPU_DEBUG_PLANE(pl, fmt, ...) DRM_DEBUG_ATOMIC("plane%d " fmt,\
(pl) ? (pl)->base.base.id : -1, ##__VA_ARGS__)
#define DPU_ERROR_PLANE(pl, fmt, ...) DPU_ERROR("plane%d " fmt,\

View File

@ -46,7 +46,7 @@ static int _dpu_vbif_wait_for_xin_halt(struct dpu_hw_vbif *vbif, u32 xin_id)
vbif->idx - VBIF_0, xin_id);
} else {
rc = 0;
DPU_DEBUG("VBIF %d client %d is halted\n",
DRM_DEBUG_ATOMIC("VBIF %d client %d is halted\n",
vbif->idx - VBIF_0, xin_id);
}
@ -87,7 +87,7 @@ static void _dpu_vbif_apply_dynamic_ot_limit(struct dpu_hw_vbif *vbif,
}
}
DPU_DEBUG("vbif:%d xin:%d w:%d h:%d fps:%d pps:%llu ot:%u\n",
DRM_DEBUG_ATOMIC("vbif:%d xin:%d w:%d h:%d fps:%d pps:%llu ot:%u\n",
vbif->idx - VBIF_0, params->xin_id,
params->width, params->height, params->frame_rate,
pps, *ot_lim);
@ -133,7 +133,7 @@ static u32 _dpu_vbif_get_ot_limit(struct dpu_hw_vbif *vbif,
}
exit:
DPU_DEBUG("vbif:%d xin:%d ot_lim:%d\n",
DRM_DEBUG_ATOMIC("vbif:%d xin:%d ot_lim:%d\n",
vbif->idx - VBIF_0, params->xin_id, ot_lim);
return ot_lim;
}
@ -163,7 +163,7 @@ void dpu_vbif_set_ot_limit(struct dpu_kms *dpu_kms,
}
if (!vbif || !mdp) {
DPU_DEBUG("invalid arguments vbif %d mdp %d\n",
DRM_DEBUG_ATOMIC("invalid arguments vbif %d mdp %d\n",
vbif != NULL, mdp != NULL);
return;
}
@ -230,7 +230,7 @@ void dpu_vbif_set_qos_remap(struct dpu_kms *dpu_kms,
}
if (!vbif->ops.set_qos_remap || !mdp->ops.setup_clk_force_ctrl) {
DPU_DEBUG("qos remap not supported\n");
DRM_DEBUG_ATOMIC("qos remap not supported\n");
return;
}
@ -238,14 +238,14 @@ void dpu_vbif_set_qos_remap(struct dpu_kms *dpu_kms,
&vbif->cap->qos_nrt_tbl;
if (!qos_tbl->npriority_lvl || !qos_tbl->priority_lvl) {
DPU_DEBUG("qos tbl not defined\n");
DRM_DEBUG_ATOMIC("qos tbl not defined\n");
return;
}
forced_on = mdp->ops.setup_clk_force_ctrl(mdp, params->clk_ctrl, true);
for (i = 0; i < qos_tbl->npriority_lvl; i++) {
DPU_DEBUG("vbif:%d xin:%d lvl:%d/%d\n",
DRM_DEBUG_ATOMIC("vbif:%d xin:%d lvl:%d/%d\n",
params->vbif_idx, params->xin_id, i,
qos_tbl->priority_lvl[i]);
vbif->ops.set_qos_remap(vbif, params->xin_id, i,