Compare commits

...

8 Commits

Author SHA1 Message Date
ktx-vaidehi 7f067d524d fix: change key 2024-09-25 18:37:22 +05:30
ktx-vaidehi 5a1f455c28 feat: UI in config for time shift 2024-09-25 18:33:23 +05:30
ktx-abhay 90648d8773 add: shift timerange based on gap 2024-09-25 15:07:53 +05:30
ktx-abhay 73d347b5b4 add: multi query support 2024-09-25 15:07:53 +05:30
Ashish Kolhe e20c509bf4
feat: multi search with multiple time ranges (#4626)
<!-- This is an auto-generated comment: release notes by coderabbit.ai
-->
## Summary by CodeRabbit

- **New Features**
- Introduced a new `SqlQuery` structure for improved SQL query handling.
- Added a `per_query_response` feature to enhance query response
flexibility.

- **Bug Fixes**
	- Improved error handling and response clarity in search functions.
	- Enhanced request formatting for better readability and consistency.

- **Refactor**
- Restructured request and response handling in `search_multi` and
`_search_partition_multi` functions for improved code clarity.
<!-- end of auto-generated comment: release notes by coderabbit.ai -->
2024-09-25 15:02:30 +05:30
Ashish Kolhe 4fffa6d8a0
fix: add request timeout for tonic requests (#4633)
<!-- This is an auto-generated comment: release notes by coderabbit.ai
-->
## Summary by CodeRabbit

- **New Features**
- Introduced timeout settings for gRPC requests across multiple
functions, enhancing request handling and response management.
  
- **Bug Fixes**
- Improved the stability of gRPC requests by ensuring they respect the
configured timeout values, potentially reducing failed requests due to
timeouts.
<!-- end of auto-generated comment: release notes by coderabbit.ai -->
2024-09-25 13:57:16 +05:30
Subhra264 34f4a3c7cc
fix: use max of silence and frequency for alert next run and support all threshold symbols (#4627)
Addresses #4623 

- [x] When the alert conditions are satisfied, use the max of frequency
and silence period to calculate the next trigger time.
- [x] Support all the alert threshold symbols.

<!-- This is an auto-generated comment: release notes by coderabbit.ai
-->
## Summary by CodeRabbit

- **New Features**
- Enhanced alert scheduling logic to improve the timing of alert
triggers based on silence periods and alert frequency.
- Introduced refined conditional checks for processing query results,
improving clarity and maintainability.

- **Bug Fixes**
- Resolved issues with alert timing to ensure alerts run as expected
even during silence periods.
<!-- end of auto-generated comment: release notes by coderabbit.ai -->
2024-09-25 11:18:07 +05:30
Omkar Kesarkhane 58dc29408e
fix: Updated dashboard panel height row count logic (#4639)
#4630 

<!-- This is an auto-generated comment: release notes by coderabbit.ai
-->
## Summary by CodeRabbit

- **Bug Fixes**
- Improved the calculation of the row count in the dashboard panel
layout settings for better accuracy.
  
- **Refactor**
- Simplified the logic for determining the row count by adjusting the
height calculations.
<!-- end of auto-generated comment: release notes by coderabbit.ai -->
2024-09-25 10:59:54 +05:30
19 changed files with 1218 additions and 288 deletions

View File

@ -289,6 +289,15 @@ pub struct QueryConfig {
min: Option<f64>,
#[serde(skip_serializing_if = "Option::is_none")]
max: Option<f64>,
#[serde(skip_serializing_if = "Option::is_none")]
time_shift: Option<Vec<TimeShift>>,
}
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, ToSchema)]
#[serde(rename_all = "camelCase")]
pub struct TimeShift {
#[serde(skip_serializing_if = "Option::is_none")]
off_set: Option<String>,
}
#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)]

View File

@ -16,7 +16,7 @@
use std::str::FromStr;
use proto::cluster_rpc;
use serde::{Deserialize, Serialize};
use serde::{Deserialize, Deserializer, Serialize};
use utoipa::ToSchema;
use crate::{
@ -739,10 +739,24 @@ pub struct MultiSearchPartitionResponse {
pub error: hashbrown::HashMap<String, String>,
}
#[derive(Clone, Debug, Serialize, Deserialize, ToSchema)]
pub struct SqlQuery {
pub sql: String,
#[serde(default)]
pub start_time: Option<i64>,
#[serde(default)]
pub end_time: Option<i64>,
#[serde(default)]
pub query_fn: Option<String>,
#[serde(default)]
pub is_old_format: bool,
}
#[derive(Clone, Debug, Serialize, Deserialize, ToSchema)]
#[schema(as = SearchRequest)]
pub struct MultiStreamRequest {
pub sql: Vec<String>,
#[serde(default, deserialize_with = "deserialize_sql")]
pub sql: Vec<SqlQuery>, // Use the new struct for SQL queries
#[serde(default)]
pub encoding: RequestEncoding,
#[serde(default)]
@ -774,25 +788,63 @@ pub struct MultiStreamRequest {
pub search_type: Option<SearchEventType>,
#[serde(default)]
pub index_type: String, // parquet(default) or fst
#[serde(default)]
pub per_query_response: bool,
}
fn deserialize_sql<'de, D>(deserializer: D) -> Result<Vec<SqlQuery>, D::Error>
where
D: Deserializer<'de>,
{
#[derive(Deserialize)]
#[serde(untagged)]
enum SqlOrSqlQuery {
OldFormat(String),
NewFormat(SqlQuery),
}
let v: Vec<SqlOrSqlQuery> = Vec::deserialize(deserializer)?;
// Convert old format into the new format
let result: Vec<SqlQuery> = v
.into_iter()
.map(|item| match item {
SqlOrSqlQuery::OldFormat(sql) => SqlQuery {
sql,
start_time: None,
end_time: None,
query_fn: None,
is_old_format: true,
},
SqlOrSqlQuery::NewFormat(query) => query,
})
.collect();
Ok(result)
}
impl MultiStreamRequest {
pub fn to_query_req(&mut self) -> Vec<Request> {
let mut res = vec![];
for query in &self.sql {
let query_fn = if query.is_old_format {
self.query_fn.clone()
} else {
query.query_fn.clone()
};
res.push(Request {
query: Query {
sql: query.to_string(),
sql: query.sql.clone(),
from: self.from,
size: self.size,
start_time: self.start_time,
end_time: self.end_time,
start_time: query.start_time.unwrap_or(self.start_time),
end_time: query.end_time.unwrap_or(self.end_time),
sort_by: self.sort_by.clone(),
quick_mode: self.quick_mode,
query_type: self.query_type.clone(),
track_total_hits: self.track_total_hits,
uses_zo_fn: self.uses_zo_fn,
query_fn: self.query_fn.clone(),
query_fn,
skip_wal: self.skip_wal,
},
regions: self.regions.clone(),

View File

@ -88,7 +88,7 @@ pub fn check_auth(req: Request<()>) -> Result<Request<()>, Status> {
#[cfg(test)]
mod tests {
use config::cache_instance_id;
use config::{cache_instance_id, get_config};
use super::*;
use crate::common::meta::user::User;
@ -114,6 +114,9 @@ mod tests {
);
let mut request = tonic::Request::new(());
request.set_timeout(std::time::Duration::from_secs(
get_config().limit.query_timeout,
));
let token: MetadataValue<_> = "basic cm9vdEBleGFtcGxlLmNvbTp0b2tlbg==".parse().unwrap();
let meta: &mut tonic::metadata::MetadataMap = request.metadata_mut();
@ -144,6 +147,9 @@ mod tests {
);
let mut request = tonic::Request::new(());
request.set_timeout(std::time::Duration::from_secs(
get_config().limit.query_timeout,
));
let token: MetadataValue<_> = "instance".parse().unwrap();
let meta: &mut tonic::metadata::MetadataMap = request.metadata_mut();
meta.insert("authorization", token.clone());
@ -171,6 +177,9 @@ mod tests {
},
);
let mut request = tonic::Request::new(());
request.set_timeout(std::time::Duration::from_secs(
get_config().limit.query_timeout,
));
let token: MetadataValue<_> = "basic cm9vdEBleGFtcGxlLmNvbTp0b2tlbjg4OA=="
.parse()

View File

@ -49,13 +49,12 @@ use crate::{
context_path = "/api",
tag = "Search",
operation_id = "SearchSQL",
security(
("Authorization"= [])
),
params(
("org_id" = String, Path, description = "Organization name"),
),
request_body(content = SearchRequest, description = "Search query", content_type = "application/json", example = json!({
params(("org_id" = String, Path, description = "Organization name")),
request_body(
content = SearchRequest,
description = "Search query",
content_type = "application/json",
example = json!({
"query": {
"sql": "select * from k8s ",
"start_time": 1675182660872049i64,
@ -63,9 +62,15 @@ use crate::{
"from": 0,
"size": 10
}
})),
})
),
responses(
(status = 200, description = "Success", content_type = "application/json", body = SearchResponse, example = json!({
(
status = 200,
description = "Success",
content_type = "application/json",
body = SearchResponse,
example = json!({
"took": 155,
"hits": [
{
@ -89,9 +94,20 @@ use crate::{
"from": 0,
"size": 1,
"scan_size": 28943
})),
(status = 400, description = "Failure", content_type = "application/json", body = HttpResponse),
(status = 500, description = "Failure", content_type = "application/json", body = HttpResponse),
}),
),
(
status = 400,
description = "Failure",
content_type = "application/json",
body = HttpResponse,
),
(
status = 500,
description = "Failure",
content_type = "application/json",
body = HttpResponse,
)
)
)]
#[post("/{org_id}/_search_multi")]
@ -115,18 +131,24 @@ pub async fn search_multi(
let query = web::Query::<HashMap<String, String>>::from_query(in_req.query_string()).unwrap();
let stream_type = match get_stream_type_from_request(&query) {
Ok(v) => v.unwrap_or(StreamType::Logs),
Err(e) => return Ok(MetaHttpResponse::bad_request(e)),
Err(e) => {
return Ok(MetaHttpResponse::bad_request(e));
}
};
let search_type = match get_search_type_from_request(&query) {
Ok(v) => v,
Err(e) => return Ok(MetaHttpResponse::bad_request(e)),
Err(e) => {
return Ok(MetaHttpResponse::bad_request(e));
}
};
// handle encoding for query and aggs
let mut multi_req: search::MultiStreamRequest = match json::from_slice(&body) {
Ok(v) => v,
Err(e) => return Ok(MetaHttpResponse::bad_request(e)),
Err(e) => {
return Ok(MetaHttpResponse::bad_request(e));
}
};
let mut query_fn = multi_req
@ -144,6 +166,8 @@ pub async fn search_multi(
let mut queries = multi_req.to_query_req();
let mut multi_res = search::Response::new(multi_req.from, multi_req.size);
let per_query_resp = multi_req.per_query_response;
// Before making any rpc requests, first check the sql expressions can be decoded correctly
for req in queries.iter_mut() {
if let Err(e) = req.decode() {
@ -311,7 +335,7 @@ pub async fn search_multi(
multi_res.took += res.took;
if res.total > multi_res.total {
multi_res.total = res.total
multi_res.total = res.total;
}
multi_res.from = res.from;
multi_res.size += res.size;
@ -319,10 +343,15 @@ pub async fn search_multi(
multi_res.scan_size += res.scan_size;
multi_res.scan_records += res.scan_records;
multi_res.columns.extend(res.columns);
multi_res.hits.extend(res.hits);
multi_res.response_type = res.response_type;
multi_res.trace_id = res.trace_id;
multi_res.cached_ratio = res.cached_ratio;
if per_query_resp {
multi_res.hits.push(serde_json::Value::Array(res.hits));
} else {
multi_res.hits.extend(res.hits);
}
}
Err(err) => {
let time = start.elapsed().as_secs_f64();
@ -379,19 +408,24 @@ pub async fn search_multi(
context_path = "/api",
tag = "Search",
operation_id = "SearchPartitionMulti",
security(
("Authorization"= [])
),
params(
("org_id" = String, Path, description = "Organization name"),
),
request_body(content = SearchRequest, description = "Search query", content_type = "application/json", example = json!({
params(("org_id" = String, Path, description = "Organization name")),
request_body(
content = SearchRequest,
description = "Search query",
content_type = "application/json",
example = json!({
"sql": "select * from k8s ",
"start_time": 1675182660872049i64,
"end_time": 1675185660872049i64
})),
})
),
responses(
(status = 200, description = "Success", content_type = "application/json", body = SearchResponse, example = json!({
(
status = 200,
description = "Success",
content_type = "application/json",
body = SearchResponse,
example = json!({
"took": 155,
"file_num": 10,
"original_size": 10240,
@ -400,9 +434,20 @@ pub async fn search_multi(
[1674213225158000i64, 1674213225158000i64],
[1674213225158000i64, 1674213225158000i64],
]
})),
(status = 400, description = "Failure", content_type = "application/json", body = HttpResponse),
(status = 500, description = "Failure", content_type = "application/json", body = HttpResponse),
}),
),
(
status = 400,
description = "Failure",
content_type = "application/json",
body = HttpResponse,
),
(
status = 500,
description = "Failure",
content_type = "application/json",
body = HttpResponse,
)
)
)]
#[post("/{org_id}/_search_partition_multi")]
@ -428,12 +473,16 @@ pub async fn _search_partition_multi(
let query = web::Query::<HashMap<String, String>>::from_query(in_req.query_string()).unwrap();
let stream_type = match get_stream_type_from_request(&query) {
Ok(v) => v.unwrap_or(StreamType::Logs),
Err(e) => return Ok(MetaHttpResponse::bad_request(e)),
Err(e) => {
return Ok(MetaHttpResponse::bad_request(e));
}
};
let req: search::MultiSearchPartitionRequest = match json::from_slice(&body) {
Ok(v) => v,
Err(e) => return Ok(MetaHttpResponse::bad_request(e)),
Err(e) => {
return Ok(MetaHttpResponse::bad_request(e));
}
};
let search_fut = SearchService::search_partition_multi(&trace_id, &org_id, stream_type, &req);
@ -570,12 +619,16 @@ pub async fn around_multi(
let query = web::Query::<HashMap<String, String>>::from_query(in_req.query_string()).unwrap();
let stream_type = match get_stream_type_from_request(&query) {
Ok(v) => v.unwrap_or(StreamType::Logs),
Err(e) => return Ok(MetaHttpResponse::bad_request(e)),
Err(e) => {
return Ok(MetaHttpResponse::bad_request(e));
}
};
let around_key = match query.get("key") {
Some(v) => v.parse::<i64>().unwrap_or(0),
None => return Ok(MetaHttpResponse::bad_request("around key is empty")),
None => {
return Ok(MetaHttpResponse::bad_request("around key is empty"));
}
};
let mut query_fn = query
.get("query_fn")

View File

@ -242,18 +242,50 @@ impl QueryCondition {
}
}
};
if self.search_event_type.is_none() && resp.total < trigger_condition.threshold as usize {
let records: Option<Vec<Map<String, Value>>> = Some(
resp.hits
.iter()
.map(|hit| hit.as_object().unwrap().clone())
.collect(),
);
if self.search_event_type.is_none() {
let threshold = trigger_condition.threshold as usize;
match trigger_condition.operator {
Operator::EqualTo => {
if records.as_ref().unwrap().len() == threshold {
return Ok((records, now));
}
}
Operator::NotEqualTo => {
if records.as_ref().unwrap().len() != threshold {
return Ok((records, now));
}
}
Operator::GreaterThan => {
if records.as_ref().unwrap().len() > threshold {
return Ok((records, now));
}
}
Operator::GreaterThanEquals => {
if records.as_ref().unwrap().len() >= threshold {
return Ok((records, now));
}
}
Operator::LessThan => {
if records.as_ref().unwrap().len() < threshold {
return Ok((records, now));
}
}
Operator::LessThanEquals => {
if records.as_ref().unwrap().len() <= threshold {
return Ok((records, now));
}
}
_ => {}
}
Ok((None, now))
} else {
Ok((
Some(
resp.hits
.iter()
.map(|hit| hit.as_object().unwrap().clone())
.collect(),
),
now,
))
Ok((records, now))
}
}
}

View File

@ -235,7 +235,17 @@ async fn handle_alert_triggers(trigger: db::scheduler::Trigger) -> Result<(), an
// Check for the cron timestamp after the silence period
new_trigger.next_run_at = schedule.after(&silence).next().unwrap().timestamp_micros();
} else {
new_trigger.next_run_at += Duration::try_minutes(alert.trigger_condition.silence)
// When the silence period is less than the frequency, the alert runs after the silence
// period completely ignoring the frequency. So, if frequency is 60 mins and
// silence is 10 mins, the condition is satisfied, in that case, the alert
// will run after 10 mins of silence period. To avoid this scenario, we
// should use the max of (frequency, silence) as the next_run_at.
// Silence period is in minutes, and the frequency is in seconds.
let next_run_in_seconds = std::cmp::max(
alert.trigger_condition.silence * 60,
alert.trigger_condition.frequency,
);
new_trigger.next_run_at += Duration::try_seconds(next_run_in_seconds)
.unwrap()
.num_microseconds()
.unwrap();

View File

@ -202,7 +202,8 @@ async fn send_to_node(
);
break;
}
let request = tonic::Request::new(req_query.clone());
let mut request = tonic::Request::new(req_query.clone());
request.set_timeout(std::time::Duration::from_secs(cfg.limit.query_timeout));
match client.send_file_list(request).await {
Ok(_) => break,
Err(e) => {

View File

@ -241,7 +241,7 @@ async fn get_file_list(
.parse()
.map_err(|_| DataFusionError::Execution("invalid org_id".to_string()))?;
let mut request = tonic::Request::new(req);
// request.set_timeout(Duration::from_secs(cfg.grpc.timeout));
request.set_timeout(std::time::Duration::from_secs(cfg.limit.query_timeout));
opentelemetry::global::get_text_map_propagator(|propagator| {
propagator.inject_context(

View File

@ -160,7 +160,7 @@ async fn search_in_cluster(
.parse()
.map_err(|_| Error::Message(format!("invalid org_id: {}", req.org_id)))?;
let mut request = tonic::Request::new(req);
// request.set_timeout(Duration::from_secs(cfg.grpc.timeout));
request.set_timeout(std::time::Duration::from_secs(cfg.limit.query_timeout));
opentelemetry::global::get_text_map_propagator(|propagator| {
propagator.inject_context(

View File

@ -86,8 +86,8 @@ pub async fn get_cached_results(
is_descending:cache_req.is_descending,
};
let request = tonic::Request::new(req);
let mut request = tonic::Request::new(req);
request.set_timeout(std::time::Duration::from_secs(cfg.limit.query_timeout));
log::info!(
"[trace_id {trace_id}] get_cached_results->grpc: request node: {}",
&node_addr

View File

@ -85,7 +85,8 @@ pub async fn get_cached_results(
is_descending:cache_req.is_descending,
};
let request = tonic::Request::new(req);
let mut request = tonic::Request::new(req);
request.set_timeout(std::time::Duration::from_secs(cfg.limit.query_timeout));
log::info!(
"[trace_id {trace_id}] get_cached_results->grpc: request node: {}",

View File

@ -432,6 +432,7 @@ pub async fn query_status() -> Result<search::QueryStatusResponse, Error> {
async move {
let cfg = get_config();
let mut request = tonic::Request::new(proto::cluster_rpc::QueryStatusRequest {});
request.set_timeout(std::time::Duration::from_secs(cfg.limit.query_timeout));
opentelemetry::global::get_text_map_propagator(|propagator| {
propagator.inject_context(
@ -591,6 +592,7 @@ pub async fn cancel_query(
let cfg = get_config();
let mut request =
tonic::Request::new(proto::cluster_rpc::CancelQueryRequest { trace_id });
request.set_timeout(std::time::Duration::from_secs(cfg.limit.query_timeout));
opentelemetry::global::get_text_map_propagator(|propagator| {
propagator.inject_context(
&tracing::Span::current().context(),

View File

@ -0,0 +1,513 @@
<template>
<div v-for="(picker, index) in dateTimePickers" :key="index" class="q-mb-md">
<q-btn
style="width: 180px"
data-test="date-time-btn"
:label="getDisplayValue(picker)"
icon="schedule"
icon-right="arrow_drop_down"
class="date-time-button"
outline
no-caps
@click="picker.showMenu = !picker.showMenu"
/>
<q-menu
v-if="picker.showMenu"
class="date-time-dialog"
anchor="bottom left"
self="top left"
no-route-dismiss
@before-show="onBeforeShow"
@before-hide="onBeforeHide"
>
<q-tab-panels
class="tw-flex tw-justify-between"
v-model="picker.activeTab"
>
<q-tab-panel name="relative" class="q-pa-none">
<div class="date-time-table relative column">
<div
class="relative-row q-px-md q-py-sm"
v-for="(period, periodIndex) in relativePeriods"
:key="'date_' + periodIndex"
>
<div class="relative-period-name">
{{ period.label }}
</div>
<div
v-for="(item, itemIndex) in relativeDates[period.value]"
:key="item"
>
<q-btn
:data-test="`date-time-relative-${item}-${period.value}-btn`"
:label="item"
:class="
picker.data.selectedDate.relative.value == item &&
picker.data.selectedDate.relative.period == period.value
? 'rp-selector-selected'
: `rp-selector ${picker.relativePeriod}`
"
outline
dense
flat
@click="setRelativeDate(period, item, picker)"
/>
</div>
</div>
<div class="relative-row q-px-md q-py-sm">
<div class="relative-period-name">Custom</div>
<div class="row q-gutter-sm">
<div class="col">
<q-input
v-model="picker.data.selectedDate.relative.value"
type="number"
dense
filled
min="1"
@update:model-value="onCustomPeriodSelect(picker)"
/>
</div>
<div class="col">
<q-select
v-model="picker.data.selectedDate.relative.period"
:options="relativePeriodsSelect"
dense
filled
emit-value
@update:modelValue="onCustomPeriodSelect(picker)"
style="width: 100px"
>
<template v-slot:selected-item>
<div>{{ getPeriodLabel(picker) }}</div>
</template>
</q-select>
</div>
</div>
</div>
</div>
</q-tab-panel>
</q-tab-panels>
</q-menu>
<q-btn
v-if="props.deleteIcon == 'outlinedDelete'"
data-test="custom-date-picker-delete-btn"
:icon="outlinedDelete"
class="q-mb-sm q-ml-xs q-mr-sm"
:class="store.state?.theme === 'dark' ? 'icon-dark' : ''"
padding="xs"
unelevated
size="sm"
round
flat
@click="removeDateTimePicker(index)"
style="min-width: auto"
/>
<q-icon
v-else
class="q-mr-xs q-ml-sm"
size="15px"
name="close"
style="cursor: pointer"
@click="removeDateTimePicker(index)"
:data-test="`dashboard-addpanel-config-markline-remove-${index}`"
/>
</div>
<q-btn
@click="addDateTimePicker"
:class="!props.alertsPage ? 'dashboard-add-btn' : 'alert-add-btn'"
label="+ Add"
no-caps
data-test="date-time-picker-add-btn"
/>
</template>
<script setup>
import { ref, reactive, computed, watch, onMounted, onBeforeMount } from "vue";
import { useStore } from "vuex";
import {
outlinedDelete,
outlinedInfo,
} from "@quasar/extras/material-icons-outlined";
const store = useStore();
const dateTimePickers = ref([createPicker()]);
const relativePeriod = ref("m");
const relativeValue = ref(15);
const selectedType = ref("relative");
const props = defineProps({
deleteIcon: {
type: String,
default: "",
},
alertsPage: {
type: Boolean,
default: false,
},
});
function createPicker() {
return reactive({
activeTab: "relative",
data: {
selectedDate: {
relative: {
value: 15,
period: "m",
label: "Minutes",
},
},
},
});
}
let relativePeriods = [
{ label: "Minutes", value: "m" },
{ label: "Hours", value: "h" },
{ label: "Days", value: "d" },
{ label: "Weeks", value: "w" },
{ label: "Months", value: "M" },
];
let relativePeriodsSelect = ref([
{ label: "Minutes", value: "m" },
{ label: "Hours", value: "h" },
{ label: "Days", value: "d" },
{ label: "Weeks", value: "w" },
{ label: "Months", value: "M" },
]);
const relativeDates = {
m: [1, 5, 10, 15, 30, 45],
h: [1, 2, 3, 6, 8, 12],
d: [1, 2, 3, 4, 5, 6],
w: [1, 2, 3, 4, 5, 6],
M: [1, 2, 3, 4, 5, 6],
};
const relativeDatesInHour = {
m: [1, 1, 1, 1, 1, 1],
h: [1, 2, 3, 6, 8, 12],
d: [24, 48, 72, 96, 120, 144],
w: [168, 336, 504, 672, 840, 1008],
M: [744, 1488, 2232, 2976, 3720, 4464],
};
let relativePeriodsMaxValue = ref({
m: 0,
h: 0,
d: 0,
w: 0,
M: 0,
});
const emit = defineEmits(["update:dateTime"]);
const setRelativeDate = (period, item, picker) => {
const { label, value } = period;
picker.data.selectedDate.relative.period = value;
picker.data.selectedDate.relative.value = item;
picker.data.selectedDate.relative.label = label;
};
const onCustomPeriodSelect = (picker) => {
const { value, period } = picker.data.selectedDate.relative;
if (value == 0) {
}
// const { value, period } = picker.data.selectedDate.relative;
// picker.data.selectedDate.relative.label = period;
};
const dateTimeArray = computed(() => {
return dateTimePickers.value.map((picker) => {
const { value, period } = picker.data.selectedDate.relative;
return { offSet: value && period ? `${value}${period}` : null };
});
});
const onBeforeShow = () => {
// if (props.modelValue) selectedDate.value = cloneDeep(props.modelValue);
};
const onBeforeHide = () => {
if (selectedType.value === "absolute")
resetTime(selectedTime.value.startTime, selectedTime.value.endTime);
};
const getDisplayValue = (picker) => {
return `${picker.data.selectedDate.relative.value} ${picker.data.selectedDate.relative.label} ago`;
};
function removeDateTimePicker(index) {
dateTimePickers.value.splice(index, 1);
emit("update:dateTime", dateTimeArray.value);
}
const getPeriodLabel = (picker) => {
const periodMapping = {
m: "Minutes",
h: "Hours",
d: "Days",
w: "Weeks",
M: "Months",
};
picker.data.selectedDate.relative.label =
periodMapping[picker.data.selectedDate.relative.period];
return periodMapping[picker.data.selectedDate.relative.period];
};
function addDateTimePicker() {
dateTimePickers.value.push(createPicker());
emit("update:dateTime", dateTimeArray.value);
}
watch(
dateTimeArray,
(newVal) => {
emit("update:dateTime", newVal);
},
{ deep: true },
);
onBeforeMount(() => {
emit("update:dateTime", dateTimeArray.value);
});
</script>
<style scoped>
.relative-row {
/* Add your styles here */
}
.date-time-table {
/* Add your styles here */
}
.alerts-condition-action {
.q-btn {
&.icon-dark {
filter: none !important;
}
}
}
.alert-page-font {
background-color: red;
font-size: 14px;
}
</style>
<style lang="scss" scoped>
.q-btn--rectangle {
border-radius: 3px;
}
.date-time-button {
height: 100%;
border-radius: 3px;
padding: 0px 5px;
font-size: 12px;
min-width: auto;
background: rgba(89, 96, 178, 0.2) !important;
.q-icon.on-right {
transition: transform 0.25s ease;
}
&.isOpen .q-icon.on-right {
transform: rotate(180deg);
}
.q-btn__content {
justify-content: flex-start;
.block {
font-weight: 600;
}
}
}
.date-time-dialog {
width: 341px;
z-index: 10001;
max-height: 600px;
.tab-button {
&.q-btn {
padding-bottom: 0.1rem;
padding-top: 0.1rem;
font-size: 0.75rem;
font-weight: 700;
&.text-primary {
.q-btn__content {
}
}
}
}
}
.date-time-table.relative {
display: flex;
.relative-row {
display: flex;
flex: 1;
align-items: center;
border-bottom: 1px solid $border-color;
.block {
font-weight: 700;
}
.q-field {
&__control {
height: 40px;
}
&__native {
font-size: 0.875rem;
font-weight: 600;
}
.q-select__dropdown-icon {
}
}
> * {
margin-right: 6px;
}
}
}
.absolute-calendar {
box-shadow: none;
.q-date__header {
display: none;
}
.q-date__view {
padding: 0;
}
}
.relative-period-name {
font-size: 0.875rem;
font-weight: 600;
min-width: 75px;
}
.rp-selector,
.rp-selector-selected {
height: 32px;
width: 32px;
// border: $secondary;
background: rgba(0, 0, 0, 0.07);
}
.rp-selector-selected {
color: #ffffff;
background: $primary;
}
.tab-button {
width: 154px;
}
.notePara {
padding-right: 1.5rem;
padding-left: 1.5rem;
font-size: 0.625rem;
}
.q-date {
&__navigation {
justify-content: center;
padding: 0 0.5rem;
.q-date__arrow {
& + .q-date__arrow {
margin-left: auto;
}
& + .col {
flex: initial;
}
}
.q-btn .block {
font-size: 0.75rem;
font-weight: 700;
}
}
&__calendar {
&-item .block {
font-weight: 700;
}
&-weekdays > div {
font-size: 0.875rem;
font-weight: 700;
opacity: 1;
}
}
&__range {
&,
&-from,
&-to {
.block {
color: white;
}
&:before {
bottom: 3px;
top: 3px;
}
}
.block {
color: $dark-page;
}
}
}
.startEndTime {
.q-field {
padding-bottom: 0.125rem;
}
.label {
font-size: 0.75rem;
// color: $dark-page;
font-weight: 600;
}
.timeInput {
.q-field__control {
padding-right: 0.375rem;
}
.q-btn-group {
& > .q-btn-item {
border-radius: 2px;
}
.q-btn {
padding: 0 0.3125rem;
.block {
font-size: 0.625rem;
font-weight: 700;
}
}
}
}
}
.drawer-footer {
.q-btn {
font-size: 0.75rem;
font-weight: 700;
&.clearBtn {
margin-right: 1rem;
color: $dark-page;
}
}
}
.timezone-select {
.q-item:nth-child(2) {
border-bottom: 1px solid #dcdcdc;
}
}
.dashboard-add-btn {
cursor: pointer;
padding: 0px 5px;
}
.alert-add-btn {
border-radius: 4px;
text-transform: capitalize;
background: #f2f2f2 !important;
color: #000 !important;
}
</style>

View File

@ -920,6 +920,43 @@ along with this program. If not, see <http://www.gnu.org/licenses/>.
"
/>
</div>
<div>
<div class="flex items-center q-mr-sm">
<div
data-test="scheduled-dashboard-period-title"
class="text-bold q-py-md flex items-center"
style="width: 190px"
>
Comparison Against
<q-btn
no-caps
padding="xs"
class=""
size="sm"
flat
icon="info_outline"
data-test="dashboard-addpanel-config-time-shift-info"
>
<q-tooltip
anchor="bottom middle"
self="top middle"
style="font-size: 10px"
max-width="250px"
>
<span
>This feature allows you to compare data points from multiple
queries over a selected time range. By adjusting the date or
time, the system will retrieve corresponding data from different
queries, enabling you to observe changes or differences between
the selected time periods
</span>
</q-tooltip>
</q-btn>
</div>
</div>
<CustomDateTimePicker @update:dateTime="handleDateTimeUpdate" />
</div>
</div>
</template>
@ -930,9 +967,15 @@ import { useI18n } from "vue-i18n";
import Drilldown from "./Drilldown.vue";
import MarkLineConfig from "./MarkLineConfig.vue";
import CommonAutoComplete from "@/components/dashboards/addPanel/CommonAutoComplete.vue";
import CustomDateTimePicker from "@/components/CustomDateTimePicker.vue";
export default defineComponent({
components: { Drilldown, CommonAutoComplete, MarkLineConfig },
components: {
Drilldown,
CommonAutoComplete,
MarkLineConfig,
CustomDateTimePicker,
},
props: ["dashboardPanelData", "variablesData"],
setup(props) {
const dashboardPanelDataPageKey = inject(
@ -1165,6 +1208,13 @@ export default defineComponent({
}
};
const handleDateTimeUpdate = (data: any) => {
//here we get the data from the CustomDateTimePicker component
dashboardPanelData.data.queries[
dashboardPanelData.layout.currentQueryIndex
].config.time_shift = data;
};
const selectPromQlNameOption = (option: any) => {
const inputValue =
dashboardPanelData.data.queries[
@ -1213,6 +1263,7 @@ export default defineComponent({
legendWidthValue,
dashboardSelectfieldPromQlList,
selectPromQlNameOption,
handleDateTimeUpdate,
};
},
});

View File

@ -71,7 +71,12 @@ export const usePanelDataLoader = (
*/
const getCacheKey = () => ({
panelSchema: toRaw(panelSchema.value),
variablesData: JSON.parse(JSON.stringify([...(getDependentVariablesData() || []), ...(getDynamicVariablesData() || [])])),
variablesData: JSON.parse(
JSON.stringify([
...(getDependentVariablesData() || []),
...(getDynamicVariablesData() || []),
]),
),
forceLoad: toRaw(forceLoad.value),
// searchType: toRaw(searchType.value),
dashboardId: toRaw(dashboardId?.value),
@ -417,255 +422,375 @@ export const usePanelDataLoader = (
// which is used to check whether the current query has been aborted
const abortControllerRef = abortController;
// reset old state data
state.data = [];
state.metadata = {
queries: [],
};
state.resultMetaData = [];
// Call search API
// Get the page type from the first query in the panel schema
const pageType = panelSchema.value.queries[0]?.fields?.stream_type;
// Handle each query sequentially
for (const [
panelQueryIndex,
it,
] of panelSchema.value.queries.entries()) {
const { query: query1, metadata: metadata1 } = replaceQueryValue(
it.query,
startISOTimestamp,
endISOTimestamp,
panelSchema.value.queryType,
);
const { query: query2, metadata: metadata2 } =
await applyDynamicVariables(query1, panelSchema.value.queryType);
const query = query2;
const metadata: any = {
originalQuery: it.query,
query: query,
startTime: startISOTimestamp,
endTime: endISOTimestamp,
queryType: panelSchema.value.queryType,
variables: [...(metadata1 || []), ...(metadata2 || [])],
try {
// reset old state data
state.data = [];
state.metadata = {
queries: [],
};
const { traceparent, traceId } = generateTraceContext();
addTraceId(traceId);
try {
// partition api call
const res = await callWithAbortController(
async () =>
queryService.partition({
org_identifier: store.state.selectedOrganization.identifier,
query: {
sql: query,
query_fn: it.vrlFunctionQuery
? b64EncodeUnicode(it.vrlFunctionQuery)
: null,
sql_mode: "full",
start_time: startISOTimestamp,
end_time: endISOTimestamp,
size: -1,
state.resultMetaData = [];
// Call search API
// Get the page type from the first query in the panel schema
const pageType = panelSchema.value.queries[0]?.fields?.stream_type;
panelSchema.value.queries = [
{
query:
'SELECT histogram(_timestamp) as "x_axis_1", count(_timestamp) as "y_axis_1" FROM "default" GROUP BY x_axis_1 ORDER BY x_axis_1 ASC',
vrlFunctionQuery: "",
customQuery: false,
fields: {
stream: "default",
stream_type: "logs",
x: [
{
label: "Timestamp",
alias: "x_axis_1",
column: "_timestamp",
color: null,
aggregationFunction: "histogram",
sortBy: "ASC",
isDerived: false,
},
page_type: pageType,
traceparent,
}),
abortControllerRef.signal,
],
y: [
{
label: "Timestamp",
alias: "y_axis_1",
column: "_timestamp",
color: "#5960b2",
aggregationFunction: "count",
isDerived: false,
},
],
z: [],
breakdown: [],
filter: {
filterType: "group",
logicalOperator: "AND",
conditions: [],
},
},
config: {
promql_legend: "",
layer_type: "scatter",
weight_fixed: 1,
limit: 0,
min: 0,
max: 100,
},
},
{
query:
'SELECT histogram(_timestamp) as "x_axis_1", count(_timestamp) as "y_axis_1" FROM "default" GROUP BY x_axis_1 ORDER BY x_axis_1 ASC',
vrlFunctionQuery: "",
customQuery: false,
fields: {
stream: "default",
stream_type: "logs",
x: [
{
label: "Timestamp",
alias: "x_axis_1",
column: "_timestamp",
color: null,
aggregationFunction: "histogram",
sortBy: "ASC",
isDerived: false,
},
],
y: [
{
label: "Timestamp",
alias: "y_axis_1",
column: "_timestamp",
color: "#5960b2",
aggregationFunction: "count",
isDerived: false,
},
],
z: [],
breakdown: [],
filter: {
filterType: "group",
logicalOperator: "AND",
conditions: [],
},
},
config: {
promql_legend: "",
layer_type: "scatter",
weight_fixed: 1,
limit: 0,
min: 0,
max: 100,
},
},
];
// Handle each query sequentially
for (const [
panelQueryIndex,
it,
] of panelSchema.value.queries.entries()) {
state.loading = true;
const { query: query1, metadata: metadata1 } = replaceQueryValue(
it.query,
startISOTimestamp,
endISOTimestamp,
panelSchema.value.queryType,
);
// if aborted, return
if (abortControllerRef?.signal?.aborted) {
return;
}
const { query: query2, metadata: metadata2 } =
await applyDynamicVariables(query1, panelSchema.value.queryType);
// partition array from api response
const partitionArr = res?.data?.partitions ?? [];
const query = query2;
// always sort partitions in descending order
partitionArr.sort((a: any, b: any) => a[0] - b[0]);
// max_query_range for current query stream
const max_query_range = res?.data?.max_query_range ?? 0;
// histogram_interval from partition api response
const histogramInterval = res?.data?.histogram_interval
? `${res?.data?.histogram_interval} seconds`
: null;
// Add empty objects to state.metadata.queries and state.resultMetaData for the results of this query
state.data.push([]);
state.metadata.queries.push({});
state.resultMetaData.push({});
const currentQueryIndex = state.data.length - 1;
// Update the metadata for the current query
Object.assign(state.metadata.queries[currentQueryIndex], metadata);
// remaining query range
let remainingQueryRange = max_query_range;
// loop on all partitions and call search api for each partition
for (let i = partitionArr.length - 1; i >= 0; i--) {
state.loading = true;
const partition = partitionArr[i];
const metadata: any = {
originalQuery: it.query,
query: query,
startTime:
panelQueryIndex != 0
? startISOTimestamp - 60 * 60 * 1000000
: startISOTimestamp,
endTime:
panelQueryIndex != 0
? endISOTimestamp - 60 * 60 * 1000000
: endISOTimestamp,
queryType: panelSchema.value.queryType,
variables: [...(metadata1 || []), ...(metadata2 || [])],
timeRangeGap: panelQueryIndex != 0 ? 60 * 60 * 1000 : 0,
};
const { traceparent, traceId } = generateTraceContext();
addTraceId(traceId);
try {
// partition api call
const res = await callWithAbortController(
async () =>
queryService.partition({
org_identifier: store.state.selectedOrganization.identifier,
query: {
sql: query,
query_fn: it.vrlFunctionQuery
? b64EncodeUnicode(it.vrlFunctionQuery)
: null,
sql_mode: "full",
start_time: startISOTimestamp,
end_time: endISOTimestamp,
size: -1,
},
page_type: pageType,
traceparent,
}),
abortControllerRef.signal,
);
// if aborted, return
if (abortControllerRef?.signal?.aborted) {
break;
return;
}
const { traceparent, traceId } = generateTraceContext();
addTraceId(traceId);
try {
const searchRes = await callWithAbortController(
async () =>
queryService.search(
{
org_identifier:
store.state.selectedOrganization.identifier,
query: {
query: {
sql: await changeHistogramInterval(
query,
histogramInterval,
),
query_fn: it.vrlFunctionQuery
? b64EncodeUnicode(it.vrlFunctionQuery)
: null,
sql_mode: "full",
start_time: partition[0],
end_time: partition[1],
size: -1,
},
},
page_type: pageType,
traceparent,
},
searchType.value ?? "Dashboards",
),
abortControllerRef.signal,
);
// remove past error detail
state.errorDetail = "";
// partition array from api response
const partitionArr = res?.data?.partitions ?? [];
// if there is an function error and which not related to stream range, throw error
if (
searchRes.data.function_error &&
searchRes.data.is_partial != true
) {
// abort on unmount
if (abortControllerRef) {
// this will stop partition api call
abortControllerRef?.abort();
}
// always sort partitions in descending order
partitionArr.sort((a: any, b: any) => a[0] - b[0]);
// throw error
throw new Error(
`Function error: ${searchRes.data.function_error}`,
);
}
// max_query_range for current query stream
const max_query_range = res?.data?.max_query_range ?? 0;
// histogram_interval from partition api response
const histogramInterval = res?.data?.histogram_interval
? `${res?.data?.histogram_interval} seconds`
: null;
// Add empty objects to state.metadata.queries and state.resultMetaData for the results of this query
state.data.push([]);
state.metadata.queries.push({});
state.resultMetaData.push({});
const currentQueryIndex = state.data.length - 1;
// Update the metadata for the current query
Object.assign(
state.metadata.queries[currentQueryIndex],
metadata,
);
// remaining query range
let remainingQueryRange = max_query_range;
// loop on all partitions and call search api for each partition
for (let i = partitionArr.length - 1; i >= 0; i--) {
state.loading = true;
const partition = partitionArr[i];
// if the query is aborted or the response is partial, break the loop
if (abortControllerRef?.signal?.aborted) {
break;
}
const { traceparent, traceId } = generateTraceContext();
addTraceId(traceId);
state.data[currentQueryIndex] = [
...searchRes.data.hits,
...(state.data[currentQueryIndex] ?? []),
];
try {
const searchRes = await callWithAbortController(
async () =>
await queryService.search(
{
org_identifier:
store.state.selectedOrganization.identifier,
query: {
query: {
sql: await changeHistogramInterval(
query,
histogramInterval,
),
query_fn: it.vrlFunctionQuery
? b64EncodeUnicode(it.vrlFunctionQuery)
: null,
sql_mode: "full",
// if i == 0 ? then do gap of 7 days
start_time:
currentQueryIndex != 0
? partition[0] - 60 * 60 * 1000000
: partition[0],
end_time:
currentQueryIndex != 0
? partition[1] - 60 * 60 * 1000000
: partition[1],
size: -1,
},
},
page_type: pageType,
traceparent,
},
searchType.value ?? "Dashboards",
),
abortControllerRef.signal,
);
// remove past error detail
state.errorDetail = "";
// update result metadata
state.resultMetaData[currentQueryIndex] = searchRes.data ?? {};
// if there is an function error and which not related to stream range, throw error
if (
searchRes.data.function_error &&
searchRes.data.is_partial != true
) {
// abort on unmount
if (abortControllerRef) {
// this will stop partition api call
abortControllerRef?.abort();
}
if (searchRes.data.is_partial == true) {
// set the new start time as the start time of query
state.resultMetaData[currentQueryIndex].new_end_time =
endISOTimestamp;
// throw error
throw new Error(
`Function error: ${searchRes.data.function_error}`,
);
}
// need to break the loop, save the cache
saveCurrentStateToCache();
// if the query is aborted or the response is partial, break the loop
if (abortControllerRef?.signal?.aborted) {
break;
}
break;
}
state.data[currentQueryIndex] = [
...searchRes.data.hits,
...(state.data[currentQueryIndex] ?? []),
];
if (max_query_range != 0) {
// calculate the current partition time range
// convert timerange from milliseconds to hours
const timeRange = (partition[1] - partition[0]) / 3600000000;
// update result metadata
state.resultMetaData[currentQueryIndex] =
searchRes.data ?? {};
// get result cache ratio(it will be from 0 to 100)
const resultCacheRatio =
searchRes.data.result_cache_ratio ?? 0;
if (searchRes.data.is_partial == true) {
// set the new start time as the start time of query
state.resultMetaData[currentQueryIndex].new_end_time =
endISOTimestamp;
// calculate the remaining query range
// remaining query range = remaining query range - queried time range for the current partition
// queried time range = time range * ((100 - result cache ratio) / 100)
// need to break the loop, save the cache
saveCurrentStateToCache();
const queriedTimeRange =
timeRange * ((100 - resultCacheRatio) / 100);
break;
}
remainingQueryRange = remainingQueryRange - queriedTimeRange;
if (max_query_range != 0) {
// calculate the current partition time range
// convert timerange from milliseconds to hours
const timeRange =
(partition[1] - partition[0]) / 3600000000;
// if the remaining query range is less than 0, break the loop
// we exceeded the max query range
if (remainingQueryRange < 0) {
// set that is_partial to true if it is not last partition which we need to call
if (i != 0) {
// set that is_partial to true
state.resultMetaData[currentQueryIndex].is_partial = true;
// set function error
state.resultMetaData[currentQueryIndex].function_error =
`Query duration is modified due to query range restriction of ${max_query_range} hours`;
// set the new start time and end time
state.resultMetaData[currentQueryIndex].new_end_time =
endISOTimestamp;
// get result cache ratio(it will be from 0 to 100)
const resultCacheRatio =
searchRes.data.result_cache_ratio ?? 0;
// set the new start time as the start time of query
state.resultMetaData[currentQueryIndex].new_start_time =
partition[0];
// calculate the remaining query range
// remaining query range = remaining query range - queried time range for the current partition
// queried time range = time range * ((100 - result cache ratio) / 100)
// need to break the loop, save the cache
saveCurrentStateToCache();
const queriedTimeRange =
timeRange * ((100 - resultCacheRatio) / 100);
break;
remainingQueryRange =
remainingQueryRange - queriedTimeRange;
// if the remaining query range is less than 0, break the loop
// we exceeded the max query range
if (remainingQueryRange < 0) {
// set that is_partial to true if it is not last partition which we need to call
if (i != 0) {
// set that is_partial to true
state.resultMetaData[currentQueryIndex].is_partial =
true;
// set function error
state.resultMetaData[currentQueryIndex].function_error =
`Query duration is modified due to query range restriction of ${max_query_range} hours`;
// set the new start time and end time
state.resultMetaData[currentQueryIndex].new_end_time =
endISOTimestamp;
// set the new start time as the start time of query
state.resultMetaData[currentQueryIndex].new_start_time =
partition[0];
// need to break the loop, save the cache
saveCurrentStateToCache();
break;
}
}
}
} finally {
removeTraceId(traceId);
}
} finally {
removeTraceId(traceId);
}
if (i == 0) {
// if it is last partition, cache the result
saveCurrentStateToCache();
if (i == 0) {
// if it is last partition, cache the result
saveCurrentStateToCache();
}
}
}
} catch (error) {
// Process API error for "sql"
processApiError(error, "sql");
return { result: null, metadata: metadata };
} finally {
// set loading to false
state.loading = false;
removeTraceId(traceId);
// abort on done
if (abortControllerRef) {
abortControllerRef?.abort();
} catch (error) {
// Process API error for "sql"
processApiError(error, "sql");
return { result: null, metadata: metadata };
} finally {
// set loading to false
state.loading = false;
removeTraceId(traceId);
}
}
state.loading = false;
log("logaData: state.data", state.data);
log("logaData: state.metadata", state.metadata);
} finally {
// abort on done
if (abortControllerRef) {
abortControllerRef?.abort();
}
}
state.loading = false;
log("logaData: state.data", state.data);
log("logaData: state.metadata", state.metadata);
}
} catch (error: any) {
if (
@ -1356,9 +1481,15 @@ export const usePanelDataLoader = (
"panelSchema.markdownContent",
];
log("usePanelDataLoader: panelcache: tempPanelCacheKey", tempPanelCacheKey)
log("usePanelDataLoader: panelcache: omit(getCacheKey())", omit(getCacheKey(), keysToIgnore))
log("usePanelDataLoader: panelcache: omit(tempPanelCacheKey))", omit(tempPanelCacheKey, keysToIgnore))
log("usePanelDataLoader: panelcache: tempPanelCacheKey", tempPanelCacheKey);
log(
"usePanelDataLoader: panelcache: omit(getCacheKey())",
omit(getCacheKey(), keysToIgnore),
);
log(
"usePanelDataLoader: panelcache: omit(tempPanelCacheKey))",
omit(tempPanelCacheKey, keysToIgnore),
);
// check if it is stale or not
if (

View File

@ -115,6 +115,7 @@ const getDefaultDashboardPanelData: any = () => ({
// gauge min and max values
min: 0,
max: 100,
time_shift: [],
},
},
],

View File

@ -14,7 +14,10 @@
// along with this program. If not, see <http://www.gnu.org/licenses/>.
import { convertPromQLData } from "@/utils/dashboard/convertPromQLData";
import { convertSQLData } from "@/utils/dashboard/convertSQLData";
import {
convertMultiSQLData,
convertSQLData,
} from "@/utils/dashboard/convertSQLData";
import { convertTableData } from "@/utils/dashboard/convertTableData";
import { convertMapData } from "@/utils/dashboard/convertMapData";
import { convertSankeyData } from "./convertSankeyData";
@ -68,9 +71,20 @@ export const convertPanelData = async (
};
} else {
// chartpanelref will be used to get width and height of the chart element from DOM
// await convertMultiSQLData(
// panelSchema,
// data,
// store,
// chartPanelRef,
// hoveredSeriesState,
// resultMetaData,
// metadata,
// );
return {
chartType: panelSchema.type,
...(await convertSQLData(
...(await convertMultiSQLData(
panelSchema,
data,
store,

View File

@ -34,6 +34,53 @@ import {
import { calculateGridPositions } from "./calculateGridForSubPlot";
import { isGivenFieldInOrderBy } from "../query/sqlUtils";
export const convertMultiSQLData = async (
panelSchema: any,
searchQueryData: any,
store: any,
chartPanelRef: any,
hoveredSeriesState: any,
resultMetaData: any,
metadata: any,
) => {
if (!Array.isArray(searchQueryData) || searchQueryData.length === 0) {
return { options: null };
}
// loop on all search query data
const options: any = [];
for (let i = 0; i < searchQueryData.length; i++) {
options.push(
await convertSQLData(
panelSchema,
[searchQueryData[i]],
store,
chartPanelRef,
hoveredSeriesState,
[resultMetaData.value[i]],
{ queries: [metadata.queries[i]] },
),
);
}
// loop on all options
if (options && options[0] && options[0].options) {
for (let i = 1; i < options.length; i++) {
if (options[i] && options[i].options && options[i].options.series) {
options[0].options.series = [
...options[0].options.series,
...options[i].options.series.map((it: any) => {
return { ...it, name: it.name + " (15min ago)" };
}),
];
}
}
}
console.log("options", JSON.parse(JSON.stringify(options)));
return options[0];
};
export const convertSQLData = async (
panelSchema: any,
searchQueryData: any,
@ -221,9 +268,7 @@ export const convertSQLData = async (
const missingValue = () => {
// Get the interval in minutes
const interval = resultMetaData?.value?.map(
(it: any) => it.histogram_interval,
)[0];
const interval = resultMetaData?.map((it: any) => it.histogram_interval)[0];
if (
!interval ||
@ -1594,8 +1639,10 @@ export const convertSQLData = async (
if (timeStringCache[xKey]) {
x = timeStringCache[xKey];
} else {
// need to consider time range gap
x = toZonedTime(
new Date(options.xAxis[0].data[index] + "Z").getTime(),
new Date(options.xAxis[0].data[index] + "Z").getTime() +
metadata?.queries[0]?.timeRangeGap,
store.state.timezone,
);
timeStringCache[xKey] = x;
@ -1609,8 +1656,11 @@ export const convertSQLData = async (
if (timeStringCache[xKey]) {
x = timeStringCache[xKey];
} else {
// need to consider time range gap
x = toZonedTime(
new Date(options.xAxis[0].data[index]).getTime() / 1000,
(new Date(options.xAxis[0].data[index]).getTime() +
metadata?.queries[0]?.timeRangeGap) /
1000,
store.state.timezone,
);
timeStringCache[xKey] = x;
@ -1750,16 +1800,21 @@ export const convertSQLData = async (
// if value field is not present in the data than use null
if (isTimeSeriesData) {
seriesObj.data = seriesObj?.data?.map((it: any, index: any) => [
// need to consider time range gap
toZonedTime(
new Date(options.xAxis[0].data[index] + "Z").getTime(),
new Date(options.xAxis[0].data[index] + "Z").getTime() +
metadata?.queries[0]?.timeRangeGap,
store.state.timezone,
),
it ?? null,
]);
} else if (isTimeStampData) {
seriesObj.data = seriesObj?.data?.map((it: any, index: any) => [
// need to consider time range gap
toZonedTime(
new Date(options.xAxis[0].data[index]).getTime() / 1000,
(new Date(options.xAxis[0].data[index]).getTime() +
metadata?.queries[0]?.timeRangeGap) /
1000,
store.state.timezone,
),
it ?? null,
@ -1972,7 +2027,7 @@ export const convertSQLData = async (
options.toolbox.show = options.toolbox.show && isTimeSeriesFlag;
return {
options,
options: JSON.parse(JSON.stringify(options)),
extras: { panelId: panelSchema?.id, isTimeSeries: isTimeSeriesFlag },
};
};

View File

@ -137,12 +137,8 @@ export default defineComponent({
const getRowCount = computed(() => {
// 24 is the height of toolbar
// 28 is the height of table header
// 28.5 is the height of each row
// 33 is the height of pagination
const count = Number(
Math.ceil((updatedLayout.value.h * 30 - (28 + 24 + 33)) / 28.5),
);
const count = Number(Math.ceil((updatedLayout.value.h * 30 - 24) / 28.5));
if (count < 0) return 0;