Compare commits
11 Commits
2443eb2038
...
46726f4b74
Author | SHA1 | Date |
---|---|---|
TessaIO | 46726f4b74 | |
Ashish Kolhe | e20c509bf4 | |
Ashish Kolhe | 4fffa6d8a0 | |
Subhra264 | 34f4a3c7cc | |
Omkar Kesarkhane | 58dc29408e | |
Yashodhan Joshi | a71abee701 | |
John Vandenberg | 16ee5fc680 | |
TessaIO | f51a262bcf | |
TessaIO | bfee38a538 | |
TessaIO | 05fe3e802c | |
TessaIO | 2123bc8a32 |
13
.typos.toml
13
.typos.toml
|
@ -11,11 +11,6 @@ extend-ignore-re = [
|
|||
"cohabitatingResource[12]",
|
||||
# The following are typos yet to be fixed
|
||||
"custom_error_handeling",
|
||||
"organizationIdetifier",
|
||||
"udpateQuery",
|
||||
"resetFunctionDefination",
|
||||
"machedRoutes",
|
||||
"seachCollapseImage",
|
||||
]
|
||||
|
||||
[default.extend-words]
|
||||
|
@ -29,7 +24,9 @@ extend-exclude = [
|
|||
"web/src/locales/languages/",
|
||||
"ua_regex/regexes.yaml",
|
||||
"tests/ui-testing/cypress/fixtures/enrichment_info.csv",
|
||||
"*.vue",
|
||||
"*.js",
|
||||
"*.ts",
|
||||
"web/src/components/rum/dd_events.js",
|
||||
"web/src/components/rum/h_events.js",
|
||||
"web/src/components/rum/segments.js",
|
||||
"web/src/components/rum/sessions.js",
|
||||
"web/src/test/unit/mockData",
|
||||
]
|
||||
|
|
|
@ -16,7 +16,7 @@
|
|||
use std::str::FromStr;
|
||||
|
||||
use proto::cluster_rpc;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde::{Deserialize, Deserializer, Serialize};
|
||||
use utoipa::ToSchema;
|
||||
|
||||
use crate::{
|
||||
|
@ -739,10 +739,24 @@ pub struct MultiSearchPartitionResponse {
|
|||
pub error: hashbrown::HashMap<String, String>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Serialize, Deserialize, ToSchema)]
|
||||
pub struct SqlQuery {
|
||||
pub sql: String,
|
||||
#[serde(default)]
|
||||
pub start_time: Option<i64>,
|
||||
#[serde(default)]
|
||||
pub end_time: Option<i64>,
|
||||
#[serde(default)]
|
||||
pub query_fn: Option<String>,
|
||||
#[serde(default)]
|
||||
pub is_old_format: bool,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Serialize, Deserialize, ToSchema)]
|
||||
#[schema(as = SearchRequest)]
|
||||
pub struct MultiStreamRequest {
|
||||
pub sql: Vec<String>,
|
||||
#[serde(default, deserialize_with = "deserialize_sql")]
|
||||
pub sql: Vec<SqlQuery>, // Use the new struct for SQL queries
|
||||
#[serde(default)]
|
||||
pub encoding: RequestEncoding,
|
||||
#[serde(default)]
|
||||
|
@ -774,25 +788,63 @@ pub struct MultiStreamRequest {
|
|||
pub search_type: Option<SearchEventType>,
|
||||
#[serde(default)]
|
||||
pub index_type: String, // parquet(default) or fst
|
||||
#[serde(default)]
|
||||
pub per_query_response: bool,
|
||||
}
|
||||
|
||||
fn deserialize_sql<'de, D>(deserializer: D) -> Result<Vec<SqlQuery>, D::Error>
|
||||
where
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
#[derive(Deserialize)]
|
||||
#[serde(untagged)]
|
||||
enum SqlOrSqlQuery {
|
||||
OldFormat(String),
|
||||
NewFormat(SqlQuery),
|
||||
}
|
||||
|
||||
let v: Vec<SqlOrSqlQuery> = Vec::deserialize(deserializer)?;
|
||||
|
||||
// Convert old format into the new format
|
||||
let result: Vec<SqlQuery> = v
|
||||
.into_iter()
|
||||
.map(|item| match item {
|
||||
SqlOrSqlQuery::OldFormat(sql) => SqlQuery {
|
||||
sql,
|
||||
start_time: None,
|
||||
end_time: None,
|
||||
query_fn: None,
|
||||
is_old_format: true,
|
||||
},
|
||||
SqlOrSqlQuery::NewFormat(query) => query,
|
||||
})
|
||||
.collect();
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
impl MultiStreamRequest {
|
||||
pub fn to_query_req(&mut self) -> Vec<Request> {
|
||||
let mut res = vec![];
|
||||
for query in &self.sql {
|
||||
let query_fn = if query.is_old_format {
|
||||
self.query_fn.clone()
|
||||
} else {
|
||||
query.query_fn.clone()
|
||||
};
|
||||
res.push(Request {
|
||||
query: Query {
|
||||
sql: query.to_string(),
|
||||
sql: query.sql.clone(),
|
||||
from: self.from,
|
||||
size: self.size,
|
||||
start_time: self.start_time,
|
||||
end_time: self.end_time,
|
||||
start_time: query.start_time.unwrap_or(self.start_time),
|
||||
end_time: query.end_time.unwrap_or(self.end_time),
|
||||
sort_by: self.sort_by.clone(),
|
||||
quick_mode: self.quick_mode,
|
||||
query_type: self.query_type.clone(),
|
||||
track_total_hits: self.track_total_hits,
|
||||
uses_zo_fn: self.uses_zo_fn,
|
||||
query_fn: self.query_fn.clone(),
|
||||
query_fn,
|
||||
skip_wal: self.skip_wal,
|
||||
},
|
||||
regions: self.regions.clone(),
|
||||
|
|
|
@ -88,7 +88,7 @@ pub fn check_auth(req: Request<()>) -> Result<Request<()>, Status> {
|
|||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use config::cache_instance_id;
|
||||
use config::{cache_instance_id, get_config};
|
||||
|
||||
use super::*;
|
||||
use crate::common::meta::user::User;
|
||||
|
@ -114,6 +114,9 @@ mod tests {
|
|||
);
|
||||
|
||||
let mut request = tonic::Request::new(());
|
||||
request.set_timeout(std::time::Duration::from_secs(
|
||||
get_config().limit.query_timeout,
|
||||
));
|
||||
|
||||
let token: MetadataValue<_> = "basic cm9vdEBleGFtcGxlLmNvbTp0b2tlbg==".parse().unwrap();
|
||||
let meta: &mut tonic::metadata::MetadataMap = request.metadata_mut();
|
||||
|
@ -144,6 +147,9 @@ mod tests {
|
|||
);
|
||||
|
||||
let mut request = tonic::Request::new(());
|
||||
request.set_timeout(std::time::Duration::from_secs(
|
||||
get_config().limit.query_timeout,
|
||||
));
|
||||
let token: MetadataValue<_> = "instance".parse().unwrap();
|
||||
let meta: &mut tonic::metadata::MetadataMap = request.metadata_mut();
|
||||
meta.insert("authorization", token.clone());
|
||||
|
@ -171,6 +177,9 @@ mod tests {
|
|||
},
|
||||
);
|
||||
let mut request = tonic::Request::new(());
|
||||
request.set_timeout(std::time::Duration::from_secs(
|
||||
get_config().limit.query_timeout,
|
||||
));
|
||||
|
||||
let token: MetadataValue<_> = "basic cm9vdEBleGFtcGxlLmNvbTp0b2tlbjg4OA=="
|
||||
.parse()
|
||||
|
|
|
@ -49,13 +49,12 @@ use crate::{
|
|||
context_path = "/api",
|
||||
tag = "Search",
|
||||
operation_id = "SearchSQL",
|
||||
security(
|
||||
("Authorization"= [])
|
||||
),
|
||||
params(
|
||||
("org_id" = String, Path, description = "Organization name"),
|
||||
),
|
||||
request_body(content = SearchRequest, description = "Search query", content_type = "application/json", example = json!({
|
||||
params(("org_id" = String, Path, description = "Organization name")),
|
||||
request_body(
|
||||
content = SearchRequest,
|
||||
description = "Search query",
|
||||
content_type = "application/json",
|
||||
example = json!({
|
||||
"query": {
|
||||
"sql": "select * from k8s ",
|
||||
"start_time": 1675182660872049i64,
|
||||
|
@ -63,9 +62,15 @@ use crate::{
|
|||
"from": 0,
|
||||
"size": 10
|
||||
}
|
||||
})),
|
||||
})
|
||||
),
|
||||
responses(
|
||||
(status = 200, description = "Success", content_type = "application/json", body = SearchResponse, example = json!({
|
||||
(
|
||||
status = 200,
|
||||
description = "Success",
|
||||
content_type = "application/json",
|
||||
body = SearchResponse,
|
||||
example = json!({
|
||||
"took": 155,
|
||||
"hits": [
|
||||
{
|
||||
|
@ -89,9 +94,20 @@ use crate::{
|
|||
"from": 0,
|
||||
"size": 1,
|
||||
"scan_size": 28943
|
||||
})),
|
||||
(status = 400, description = "Failure", content_type = "application/json", body = HttpResponse),
|
||||
(status = 500, description = "Failure", content_type = "application/json", body = HttpResponse),
|
||||
}),
|
||||
),
|
||||
(
|
||||
status = 400,
|
||||
description = "Failure",
|
||||
content_type = "application/json",
|
||||
body = HttpResponse,
|
||||
),
|
||||
(
|
||||
status = 500,
|
||||
description = "Failure",
|
||||
content_type = "application/json",
|
||||
body = HttpResponse,
|
||||
)
|
||||
)
|
||||
)]
|
||||
#[post("/{org_id}/_search_multi")]
|
||||
|
@ -115,18 +131,24 @@ pub async fn search_multi(
|
|||
let query = web::Query::<HashMap<String, String>>::from_query(in_req.query_string()).unwrap();
|
||||
let stream_type = match get_stream_type_from_request(&query) {
|
||||
Ok(v) => v.unwrap_or(StreamType::Logs),
|
||||
Err(e) => return Ok(MetaHttpResponse::bad_request(e)),
|
||||
Err(e) => {
|
||||
return Ok(MetaHttpResponse::bad_request(e));
|
||||
}
|
||||
};
|
||||
|
||||
let search_type = match get_search_type_from_request(&query) {
|
||||
Ok(v) => v,
|
||||
Err(e) => return Ok(MetaHttpResponse::bad_request(e)),
|
||||
Err(e) => {
|
||||
return Ok(MetaHttpResponse::bad_request(e));
|
||||
}
|
||||
};
|
||||
|
||||
// handle encoding for query and aggs
|
||||
let mut multi_req: search::MultiStreamRequest = match json::from_slice(&body) {
|
||||
Ok(v) => v,
|
||||
Err(e) => return Ok(MetaHttpResponse::bad_request(e)),
|
||||
Err(e) => {
|
||||
return Ok(MetaHttpResponse::bad_request(e));
|
||||
}
|
||||
};
|
||||
|
||||
let mut query_fn = multi_req
|
||||
|
@ -144,6 +166,8 @@ pub async fn search_multi(
|
|||
let mut queries = multi_req.to_query_req();
|
||||
let mut multi_res = search::Response::new(multi_req.from, multi_req.size);
|
||||
|
||||
let per_query_resp = multi_req.per_query_response;
|
||||
|
||||
// Before making any rpc requests, first check the sql expressions can be decoded correctly
|
||||
for req in queries.iter_mut() {
|
||||
if let Err(e) = req.decode() {
|
||||
|
@ -311,7 +335,7 @@ pub async fn search_multi(
|
|||
multi_res.took += res.took;
|
||||
|
||||
if res.total > multi_res.total {
|
||||
multi_res.total = res.total
|
||||
multi_res.total = res.total;
|
||||
}
|
||||
multi_res.from = res.from;
|
||||
multi_res.size += res.size;
|
||||
|
@ -319,10 +343,15 @@ pub async fn search_multi(
|
|||
multi_res.scan_size += res.scan_size;
|
||||
multi_res.scan_records += res.scan_records;
|
||||
multi_res.columns.extend(res.columns);
|
||||
multi_res.hits.extend(res.hits);
|
||||
multi_res.response_type = res.response_type;
|
||||
multi_res.trace_id = res.trace_id;
|
||||
multi_res.cached_ratio = res.cached_ratio;
|
||||
|
||||
if per_query_resp {
|
||||
multi_res.hits.push(serde_json::Value::Array(res.hits));
|
||||
} else {
|
||||
multi_res.hits.extend(res.hits);
|
||||
}
|
||||
}
|
||||
Err(err) => {
|
||||
let time = start.elapsed().as_secs_f64();
|
||||
|
@ -379,19 +408,24 @@ pub async fn search_multi(
|
|||
context_path = "/api",
|
||||
tag = "Search",
|
||||
operation_id = "SearchPartitionMulti",
|
||||
security(
|
||||
("Authorization"= [])
|
||||
),
|
||||
params(
|
||||
("org_id" = String, Path, description = "Organization name"),
|
||||
),
|
||||
request_body(content = SearchRequest, description = "Search query", content_type = "application/json", example = json!({
|
||||
params(("org_id" = String, Path, description = "Organization name")),
|
||||
request_body(
|
||||
content = SearchRequest,
|
||||
description = "Search query",
|
||||
content_type = "application/json",
|
||||
example = json!({
|
||||
"sql": "select * from k8s ",
|
||||
"start_time": 1675182660872049i64,
|
||||
"end_time": 1675185660872049i64
|
||||
})),
|
||||
})
|
||||
),
|
||||
responses(
|
||||
(status = 200, description = "Success", content_type = "application/json", body = SearchResponse, example = json!({
|
||||
(
|
||||
status = 200,
|
||||
description = "Success",
|
||||
content_type = "application/json",
|
||||
body = SearchResponse,
|
||||
example = json!({
|
||||
"took": 155,
|
||||
"file_num": 10,
|
||||
"original_size": 10240,
|
||||
|
@ -400,9 +434,20 @@ pub async fn search_multi(
|
|||
[1674213225158000i64, 1674213225158000i64],
|
||||
[1674213225158000i64, 1674213225158000i64],
|
||||
]
|
||||
})),
|
||||
(status = 400, description = "Failure", content_type = "application/json", body = HttpResponse),
|
||||
(status = 500, description = "Failure", content_type = "application/json", body = HttpResponse),
|
||||
}),
|
||||
),
|
||||
(
|
||||
status = 400,
|
||||
description = "Failure",
|
||||
content_type = "application/json",
|
||||
body = HttpResponse,
|
||||
),
|
||||
(
|
||||
status = 500,
|
||||
description = "Failure",
|
||||
content_type = "application/json",
|
||||
body = HttpResponse,
|
||||
)
|
||||
)
|
||||
)]
|
||||
#[post("/{org_id}/_search_partition_multi")]
|
||||
|
@ -428,12 +473,16 @@ pub async fn _search_partition_multi(
|
|||
let query = web::Query::<HashMap<String, String>>::from_query(in_req.query_string()).unwrap();
|
||||
let stream_type = match get_stream_type_from_request(&query) {
|
||||
Ok(v) => v.unwrap_or(StreamType::Logs),
|
||||
Err(e) => return Ok(MetaHttpResponse::bad_request(e)),
|
||||
Err(e) => {
|
||||
return Ok(MetaHttpResponse::bad_request(e));
|
||||
}
|
||||
};
|
||||
|
||||
let req: search::MultiSearchPartitionRequest = match json::from_slice(&body) {
|
||||
Ok(v) => v,
|
||||
Err(e) => return Ok(MetaHttpResponse::bad_request(e)),
|
||||
Err(e) => {
|
||||
return Ok(MetaHttpResponse::bad_request(e));
|
||||
}
|
||||
};
|
||||
|
||||
let search_fut = SearchService::search_partition_multi(&trace_id, &org_id, stream_type, &req);
|
||||
|
@ -570,12 +619,16 @@ pub async fn around_multi(
|
|||
let query = web::Query::<HashMap<String, String>>::from_query(in_req.query_string()).unwrap();
|
||||
let stream_type = match get_stream_type_from_request(&query) {
|
||||
Ok(v) => v.unwrap_or(StreamType::Logs),
|
||||
Err(e) => return Ok(MetaHttpResponse::bad_request(e)),
|
||||
Err(e) => {
|
||||
return Ok(MetaHttpResponse::bad_request(e));
|
||||
}
|
||||
};
|
||||
|
||||
let around_key = match query.get("key") {
|
||||
Some(v) => v.parse::<i64>().unwrap_or(0),
|
||||
None => return Ok(MetaHttpResponse::bad_request("around key is empty")),
|
||||
None => {
|
||||
return Ok(MetaHttpResponse::bad_request("around key is empty"));
|
||||
}
|
||||
};
|
||||
let mut query_fn = query
|
||||
.get("query_fn")
|
||||
|
|
|
@ -234,6 +234,12 @@ pub struct MetaRecord {
|
|||
pub value: String,
|
||||
}
|
||||
|
||||
#[derive(Hash, Clone, Eq, PartialEq)]
|
||||
struct DBIndex {
|
||||
name: String,
|
||||
table: String,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
use std::{str::FromStr, sync::Arc};
|
||||
use std::{collections::HashSet, str::FromStr, sync::Arc};
|
||||
|
||||
use async_trait::async_trait;
|
||||
use bytes::Bytes;
|
||||
|
@ -24,11 +24,13 @@ use sqlx::{
|
|||
mysql::{MySqlConnectOptions, MySqlPoolOptions},
|
||||
ConnectOptions, MySql, Pool,
|
||||
};
|
||||
use tokio::sync::mpsc;
|
||||
use tokio::sync::{mpsc, OnceCell};
|
||||
|
||||
use super::DBIndex;
|
||||
use crate::errors::*;
|
||||
|
||||
pub static CLIENT: Lazy<Pool<MySql>> = Lazy::new(connect);
|
||||
static INDICES: OnceCell<HashSet<DBIndex>> = OnceCell::const_new();
|
||||
|
||||
fn connect() -> Pool<MySql> {
|
||||
let cfg = config::get_config();
|
||||
|
@ -50,6 +52,22 @@ fn connect() -> Pool<MySql> {
|
|||
.connect_lazy_with(db_opts)
|
||||
}
|
||||
|
||||
async fn cache_indices() -> HashSet<DBIndex> {
|
||||
let client = CLIENT.clone();
|
||||
let var_name = r#"SELECT INDEX_NAME,TABLE_NAME FROM information_schema.statistics;"#;
|
||||
let sql = var_name;
|
||||
let res = sqlx::query_as::<_, (String, String)>(sql)
|
||||
.fetch_all(&client)
|
||||
.await;
|
||||
match res {
|
||||
Ok(r) => r
|
||||
.into_iter()
|
||||
.map(|(name, table)| DBIndex { name, table })
|
||||
.collect(),
|
||||
Err(_) => HashSet::new(),
|
||||
}
|
||||
}
|
||||
|
||||
pub struct MysqlDb {}
|
||||
|
||||
impl MysqlDb {
|
||||
|
@ -616,32 +634,23 @@ CREATE TABLE IF NOT EXISTS meta
|
|||
}
|
||||
|
||||
// create table index
|
||||
create_index_item("CREATE INDEX meta_module_idx on meta (module);").await?;
|
||||
create_index_item("CREATE INDEX meta_module_key1_idx on meta (module, key1);").await?;
|
||||
create_index_item(
|
||||
"CREATE UNIQUE INDEX meta_module_start_dt_idx on meta (module, key1, key2, start_dt);",
|
||||
create_index("meta_module_idx", "meta", false, &["module"]).await?;
|
||||
create_index("meta_module_key1_idx", "meta", false, &["module", "key1"]).await?;
|
||||
create_index(
|
||||
"meta_module_start_dt_idx",
|
||||
"meta",
|
||||
true,
|
||||
&["module", "key1", "key2", "start_dt"],
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn create_index_item(sql: &str) -> Result<()> {
|
||||
let pool = CLIENT.clone();
|
||||
if let Err(e) = sqlx::query(sql).execute(&pool).await {
|
||||
if e.to_string().contains("Duplicate key") {
|
||||
// index already exists
|
||||
return Ok(());
|
||||
}
|
||||
log::error!("[MYSQL] create table meta index error: {}", e);
|
||||
return Err(e.into());
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn add_start_dt_column() -> Result<()> {
|
||||
let pool = CLIENT.clone();
|
||||
let mut tx = pool.begin().await?;
|
||||
|
||||
if let Err(e) =
|
||||
sqlx::query(r#"ALTER TABLE meta ADD COLUMN start_dt BIGINT NOT NULL DEFAULT 0;"#)
|
||||
.execute(&mut *tx)
|
||||
|
@ -662,42 +671,16 @@ async fn add_start_dt_column() -> Result<()> {
|
|||
};
|
||||
|
||||
// create new index meta_module_start_dt_idx
|
||||
if let Err(e) = create_index_item(
|
||||
"CREATE UNIQUE INDEX meta_module_start_dt_idx ON meta (module, key1, key2, start_dt);",
|
||||
create_index(
|
||||
"meta_module_start_dt_idx",
|
||||
"meta",
|
||||
true,
|
||||
&["module", "key1", "key2", "start_dt"],
|
||||
)
|
||||
.await
|
||||
{
|
||||
log::error!(
|
||||
"[MYSQL] Error in adding index meta_module_start_dt_idx: {}",
|
||||
e
|
||||
);
|
||||
return Err(e);
|
||||
}
|
||||
|
||||
.await?;
|
||||
// delete old index meta_module_key2_idx
|
||||
let mut tx = pool.begin().await?;
|
||||
if let Err(e) = sqlx::query(r#"DROP INDEX meta_module_key2_idx ON meta;"#)
|
||||
.execute(&mut *tx)
|
||||
.await
|
||||
{
|
||||
if !e.to_string().contains("check that column/key exists")
|
||||
&& !e.to_string().contains("check that it exists")
|
||||
{
|
||||
// Check for the specific MySQL error code for duplicate column
|
||||
log::error!(
|
||||
"[MYSQL] Error in dropping index meta_module_key2_idx: {}",
|
||||
e
|
||||
);
|
||||
if let Err(e) = tx.rollback().await {
|
||||
log::error!("[MYSQL] Error in rolling back transaction: {}", e);
|
||||
}
|
||||
return Err(e.into());
|
||||
}
|
||||
}
|
||||
if let Err(e) = tx.commit().await {
|
||||
log::info!("[MYSQL] Error in committing transaction: {}", e);
|
||||
return Err(e.into());
|
||||
};
|
||||
delete_index("meta_module_key2_idx", "meta").await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
@ -746,3 +729,47 @@ async fn create_meta_backup() -> Result<()> {
|
|||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn create_index(
|
||||
idx_name: &str,
|
||||
table: &str,
|
||||
unique: bool,
|
||||
fields: &[&str],
|
||||
) -> Result<()> {
|
||||
let client = CLIENT.clone();
|
||||
let indices = INDICES.get_or_init(cache_indices).await;
|
||||
if indices.contains(&DBIndex {
|
||||
name: idx_name.into(),
|
||||
table: table.into(),
|
||||
}) {
|
||||
return Ok(());
|
||||
}
|
||||
let unique_str = if unique { "UNIQUE" } else { "" };
|
||||
log::info!("[MYSQL] creating index {} on table {}", idx_name, table);
|
||||
let sql = format!(
|
||||
"CREATE {} INDEX {} ON {} ({});",
|
||||
unique_str,
|
||||
idx_name,
|
||||
table,
|
||||
fields.join(",")
|
||||
);
|
||||
sqlx::query(&sql).execute(&client).await?;
|
||||
log::info!("[MYSQL] index {} created successfully", idx_name);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn delete_index(idx_name: &str, table: &str) -> Result<()> {
|
||||
let client = CLIENT.clone();
|
||||
let indices = INDICES.get_or_init(cache_indices).await;
|
||||
if !indices.contains(&DBIndex {
|
||||
name: idx_name.into(),
|
||||
table: table.into(),
|
||||
}) {
|
||||
return Ok(());
|
||||
}
|
||||
log::info!("[MYSQL] deleting index {} on table {}", idx_name, table);
|
||||
let sql = format!("DROP INDEX {} ON {};", idx_name, table);
|
||||
sqlx::query(&sql).execute(&client).await?;
|
||||
log::info!("[MYSQL] index {} deleted successfully", idx_name);
|
||||
Ok(())
|
||||
}
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
use std::{str::FromStr, sync::Arc};
|
||||
use std::{collections::HashSet, str::FromStr, sync::Arc};
|
||||
|
||||
use async_trait::async_trait;
|
||||
use bytes::Bytes;
|
||||
|
@ -24,11 +24,13 @@ use sqlx::{
|
|||
postgres::{PgConnectOptions, PgPoolOptions},
|
||||
ConnectOptions, Pool, Postgres,
|
||||
};
|
||||
use tokio::sync::mpsc;
|
||||
use tokio::sync::{mpsc, OnceCell};
|
||||
|
||||
use super::DBIndex;
|
||||
use crate::errors::*;
|
||||
|
||||
pub static CLIENT: Lazy<Pool<Postgres>> = Lazy::new(connect);
|
||||
static INDICES: OnceCell<HashSet<DBIndex>> = OnceCell::const_new();
|
||||
|
||||
fn connect() -> Pool<Postgres> {
|
||||
let cfg = config::get_config();
|
||||
|
@ -50,6 +52,20 @@ fn connect() -> Pool<Postgres> {
|
|||
.connect_lazy_with(db_opts)
|
||||
}
|
||||
|
||||
async fn cache_indices() -> HashSet<DBIndex> {
|
||||
let client = CLIENT.clone();
|
||||
let sql = r#"SELECT indexname, tablename FROM pg_indexes;"#;
|
||||
let res = sqlx::query_as::<_, (String, String)>(sql)
|
||||
.fetch_all(&client)
|
||||
.await;
|
||||
match res {
|
||||
Ok(r) => r
|
||||
.into_iter()
|
||||
.map(|(name, table)| DBIndex { name, table })
|
||||
.collect(),
|
||||
Err(_) => HashSet::new(),
|
||||
}
|
||||
}
|
||||
pub struct PostgresDb {}
|
||||
|
||||
impl PostgresDb {
|
||||
|
@ -553,26 +569,19 @@ CREATE TABLE IF NOT EXISTS meta
|
|||
}
|
||||
|
||||
// create table index
|
||||
create_index_item("CREATE INDEX IF NOT EXISTS meta_module_idx on meta (module);").await?;
|
||||
create_index_item("CREATE INDEX IF NOT EXISTS meta_module_key1_idx on meta (module, key1);")
|
||||
.await?;
|
||||
create_index_item(
|
||||
"CREATE UNIQUE INDEX IF NOT EXISTS meta_module_start_dt_idx on meta (module, key1, key2, start_dt);",
|
||||
create_index("meta_module_idx", "meta", false, &["module"]).await?;
|
||||
create_index("meta_module_key1_idx", "meta", false, &["module", "key1"]).await?;
|
||||
create_index(
|
||||
"meta_module_start_dt_idx",
|
||||
"meta",
|
||||
true,
|
||||
&["module", "key1", "key2", "start_dt"],
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn create_index_item(sql: &str) -> Result<()> {
|
||||
let pool = CLIENT.clone();
|
||||
if let Err(e) = sqlx::query(sql).execute(&pool).await {
|
||||
log::error!("[POSTGRES] create table meta index error: {}", e);
|
||||
return Err(e.into());
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn add_start_dt_column() -> Result<()> {
|
||||
let pool = CLIENT.clone();
|
||||
let mut tx = pool.begin().await?;
|
||||
|
@ -588,37 +597,18 @@ async fn add_start_dt_column() -> Result<()> {
|
|||
}
|
||||
return Err(e.into());
|
||||
}
|
||||
tx.commit().await?;
|
||||
|
||||
// Proceed to drop the index if it exists and create a new one if it does not exist
|
||||
if let Err(e) = sqlx::query(
|
||||
r#"CREATE UNIQUE INDEX IF NOT EXISTS meta_module_start_dt_idx ON meta (module, key1, key2, start_dt);"#
|
||||
create_index(
|
||||
"meta_module_start_dt_idx",
|
||||
"meta",
|
||||
true,
|
||||
&["module", "key1", "key2", "start_dt"],
|
||||
)
|
||||
.execute(&mut *tx)
|
||||
.await {
|
||||
log::error!("[POSTGRES] Error in adding index meta_module_start_dt_idx: {}", e);
|
||||
if let Err(e) = tx.rollback().await {
|
||||
log::error!("[POSTGRES] Error in rolling back transaction: {}", e);
|
||||
}
|
||||
return Err(e.into());
|
||||
}
|
||||
if let Err(e) = sqlx::query(r#"DROP INDEX IF EXISTS meta_module_key2_idx;"#)
|
||||
.execute(&mut *tx)
|
||||
.await
|
||||
{
|
||||
log::error!(
|
||||
"[POSTGRES] Error in dropping index meta_module_key2_idx: {}",
|
||||
e
|
||||
);
|
||||
if let Err(e) = tx.rollback().await {
|
||||
log::error!("[POSTGRES] Error in rolling back transaction: {}", e);
|
||||
}
|
||||
return Err(e.into());
|
||||
}
|
||||
.await?;
|
||||
delete_index("meta_module_key2_idx", "meta").await?;
|
||||
|
||||
if let Err(e) = tx.commit().await {
|
||||
log::info!("[POSTGRES] Error in committing transaction: {}", e);
|
||||
return Err(e.into());
|
||||
};
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
@ -647,3 +637,47 @@ async fn create_meta_backup() -> Result<()> {
|
|||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn create_index(
|
||||
idx_name: &str,
|
||||
table: &str,
|
||||
unique: bool,
|
||||
fields: &[&str],
|
||||
) -> Result<()> {
|
||||
let client = CLIENT.clone();
|
||||
let indices = INDICES.get_or_init(cache_indices).await;
|
||||
if indices.contains(&DBIndex {
|
||||
name: idx_name.into(),
|
||||
table: table.into(),
|
||||
}) {
|
||||
return Ok(());
|
||||
}
|
||||
let unique_str = if unique { "UNIQUE" } else { "" };
|
||||
log::info!("[POSTGRES] creating index {} on table {}", idx_name, table);
|
||||
let sql = format!(
|
||||
"CREATE {} INDEX IF NOT EXISTS {} ON {} ({});",
|
||||
unique_str,
|
||||
idx_name,
|
||||
table,
|
||||
fields.join(",")
|
||||
);
|
||||
sqlx::query(&sql).execute(&client).await?;
|
||||
log::info!("[POSTGRES] index {} created successfully", idx_name);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn delete_index(idx_name: &str, table: &str) -> Result<()> {
|
||||
let client = CLIENT.clone();
|
||||
let indices = INDICES.get_or_init(cache_indices).await;
|
||||
if !indices.contains(&DBIndex {
|
||||
name: idx_name.into(),
|
||||
table: table.into(),
|
||||
}) {
|
||||
return Ok(());
|
||||
}
|
||||
log::info!("[POSTGRES] deleting index {} on table {}", idx_name, table);
|
||||
let sql = format!("DROP INDEX IF EXISTS {};", idx_name,);
|
||||
sqlx::query(&sql).execute(&client).await?;
|
||||
log::info!("[POSTGRES] index {} deleted successfully", idx_name);
|
||||
Ok(())
|
||||
}
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
use std::{str::FromStr, sync::Arc, time::Duration};
|
||||
use std::{collections::HashSet, str::FromStr, sync::Arc, time::Duration};
|
||||
|
||||
use async_trait::async_trait;
|
||||
use bytes::Bytes;
|
||||
|
@ -27,8 +27,9 @@ use sqlx::{
|
|||
},
|
||||
Pool, Sqlite,
|
||||
};
|
||||
use tokio::sync::{mpsc, Mutex, RwLock};
|
||||
use tokio::sync::{mpsc, Mutex, OnceCell, RwLock};
|
||||
|
||||
use super::DBIndex;
|
||||
use crate::{
|
||||
db::{Event, EventData},
|
||||
errors::*,
|
||||
|
@ -37,6 +38,8 @@ use crate::{
|
|||
pub static CLIENT_RO: Lazy<Pool<Sqlite>> = Lazy::new(connect_ro);
|
||||
pub static CLIENT_RW: Lazy<Arc<Mutex<Pool<Sqlite>>>> =
|
||||
Lazy::new(|| Arc::new(Mutex::new(connect_rw())));
|
||||
static INDICES: OnceCell<HashSet<DBIndex>> = OnceCell::const_new();
|
||||
|
||||
pub static CHANNEL: Lazy<SqliteDbChannel> = Lazy::new(SqliteDbChannel::new);
|
||||
|
||||
static WATCHERS: Lazy<RwLock<FxIndexMap<String, EventChannel>>> =
|
||||
|
@ -96,6 +99,21 @@ fn connect_ro() -> Pool<Sqlite> {
|
|||
.connect_lazy_with(db_opts)
|
||||
}
|
||||
|
||||
async fn cache_indices() -> HashSet<DBIndex> {
|
||||
let client = CLIENT_RO.clone();
|
||||
let sql = r#"SELECT name,tbl_name FROM sqlite_master where type = 'index';"#;
|
||||
let res = sqlx::query_as::<_, (String, String)>(sql)
|
||||
.fetch_all(&client)
|
||||
.await;
|
||||
match res {
|
||||
Ok(r) => r
|
||||
.into_iter()
|
||||
.map(|(name, table)| DBIndex { name, table })
|
||||
.collect(),
|
||||
Err(_) => HashSet::new(),
|
||||
}
|
||||
}
|
||||
|
||||
pub struct SqliteDbChannel {
|
||||
pub watch_tx: EventChannel,
|
||||
}
|
||||
|
@ -674,10 +692,8 @@ impl super::Db for SqliteDb {
|
|||
}
|
||||
|
||||
async fn add_start_dt_column(&self) -> Result<()> {
|
||||
let client = CLIENT_RW.clone();
|
||||
let client = client.lock().await;
|
||||
create_meta_backup(&client).await?;
|
||||
add_start_dt_column(&client).await?;
|
||||
create_meta_backup().await?;
|
||||
add_start_dt_column().await?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
@ -701,44 +717,32 @@ CREATE TABLE IF NOT EXISTS meta
|
|||
)
|
||||
.execute(&*client)
|
||||
.await?;
|
||||
drop(client);
|
||||
|
||||
// create start_dt column for old version <= 0.9.2
|
||||
add_start_dt_column(&client).await?;
|
||||
add_start_dt_column().await?;
|
||||
|
||||
// create table index
|
||||
sqlx::query(
|
||||
r#"
|
||||
CREATE INDEX IF NOT EXISTS meta_module_idx on meta (module);
|
||||
CREATE INDEX IF NOT EXISTS meta_module_key1_idx on meta (module, key1);
|
||||
"#,
|
||||
create_index("meta_module_idx", "meta", false, &["module"]).await?;
|
||||
create_index("meta_module_key1_idx", "meta", false, &["module", "key1"]).await?;
|
||||
create_index(
|
||||
"meta_module_start_dt_idx",
|
||||
"meta",
|
||||
true,
|
||||
&["module", "key1", "key2", "start_dt"],
|
||||
)
|
||||
.execute(&*client)
|
||||
.await?;
|
||||
|
||||
match sqlx::query(
|
||||
r#"
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS meta_module_start_dt_idx on meta (module, key1, key2, start_dt);
|
||||
"#,
|
||||
)
|
||||
.execute(&*client)
|
||||
.await
|
||||
{
|
||||
Ok(_) => {}
|
||||
Err(e) => {
|
||||
log::error!(
|
||||
"[SQLITE] create meta_module_start_dt_idx index error: {}",
|
||||
e
|
||||
);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn add_start_dt_column(client: &Pool<Sqlite>) -> Result<()> {
|
||||
async fn add_start_dt_column() -> Result<()> {
|
||||
let client = CLIENT_RW.clone();
|
||||
let client = client.lock().await;
|
||||
// Attempt to add the column, ignoring the error if the column already exists
|
||||
if let Err(e) =
|
||||
sqlx::query(r#"ALTER TABLE meta ADD COLUMN start_dt INTEGER NOT NULL DEFAULT 0;"#)
|
||||
.execute(client)
|
||||
.execute(&*client)
|
||||
.await
|
||||
{
|
||||
// Check if the error is about the duplicate column
|
||||
|
@ -747,21 +751,23 @@ async fn add_start_dt_column(client: &Pool<Sqlite>) -> Result<()> {
|
|||
return Err(e.into());
|
||||
}
|
||||
}
|
||||
drop(client);
|
||||
|
||||
// Proceed to drop the index if it exists and create a new one if it does not exist
|
||||
sqlx::query(
|
||||
r#"CREATE UNIQUE INDEX IF NOT EXISTS meta_module_start_dt_idx ON meta (module, key1, key2, start_dt);"#
|
||||
)
|
||||
.execute(client)
|
||||
.await?;
|
||||
sqlx::query(r#"DROP INDEX IF EXISTS meta_module_key2_idx;"#)
|
||||
.execute(client)
|
||||
.await?;
|
||||
|
||||
create_index(
|
||||
"meta_module_start_dt_idx",
|
||||
"meta",
|
||||
true,
|
||||
&["module", "key1", "key2", "start_dt"],
|
||||
)
|
||||
.await?;
|
||||
delete_index("meta_module_key2_idx", "meta").await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn create_meta_backup(client: &Pool<Sqlite>) -> Result<()> {
|
||||
async fn create_meta_backup() -> Result<()> {
|
||||
let client = CLIENT_RW.clone();
|
||||
let client = client.lock().await;
|
||||
let mut tx = client.begin().await?;
|
||||
if let Err(e) =
|
||||
sqlx::query(r#"CREATE TABLE IF NOT EXISTS meta_backup_20240330 AS SELECT * FROM meta;"#)
|
||||
|
@ -785,3 +791,49 @@ async fn create_meta_backup(client: &Pool<Sqlite>) -> Result<()> {
|
|||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn create_index(
|
||||
idx_name: &str,
|
||||
table: &str,
|
||||
unique: bool,
|
||||
fields: &[&str],
|
||||
) -> Result<()> {
|
||||
let client = CLIENT_RW.clone();
|
||||
let client = client.lock().await;
|
||||
let indices = INDICES.get_or_init(cache_indices).await;
|
||||
if indices.contains(&DBIndex {
|
||||
name: idx_name.into(),
|
||||
table: table.into(),
|
||||
}) {
|
||||
return Ok(());
|
||||
}
|
||||
let unique_str = if unique { "UNIQUE" } else { "" };
|
||||
log::info!("[SQLITE] creating index {} on table {}", idx_name, table);
|
||||
let sql = format!(
|
||||
"CREATE {} INDEX IF NOT EXISTS {} ON {} ({});",
|
||||
unique_str,
|
||||
idx_name,
|
||||
table,
|
||||
fields.join(",")
|
||||
);
|
||||
sqlx::query(&sql).execute(&*client).await?;
|
||||
log::info!("[SQLITE] index {} created successfully", idx_name);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn delete_index(idx_name: &str, table: &str) -> Result<()> {
|
||||
let client = CLIENT_RW.clone();
|
||||
let client = client.lock().await;
|
||||
let indices = INDICES.get_or_init(cache_indices).await;
|
||||
if !indices.contains(&DBIndex {
|
||||
name: idx_name.into(),
|
||||
table: table.into(),
|
||||
}) {
|
||||
return Ok(());
|
||||
}
|
||||
log::info!("[SQLITE] deleting index {} on table {}", idx_name, table);
|
||||
let sql = format!("DROP INDEX IF EXISTS {};", idx_name,);
|
||||
sqlx::query(&sql).execute(&*client).await?;
|
||||
log::info!("[SQLITE] index {}deleted successfully", idx_name);
|
||||
Ok(())
|
||||
}
|
||||
|
|
|
@ -25,7 +25,7 @@ use hashbrown::HashMap;
|
|||
use sqlx::{Executor, MySql, QueryBuilder, Row};
|
||||
|
||||
use crate::{
|
||||
db::mysql::CLIENT,
|
||||
db::mysql::{create_index, CLIENT},
|
||||
errors::{DbError, Error, Result},
|
||||
};
|
||||
|
||||
|
@ -1192,99 +1192,109 @@ CREATE TABLE IF NOT EXISTS stream_stats
|
|||
|
||||
pub async fn create_table_index() -> Result<()> {
|
||||
let pool = CLIENT.clone();
|
||||
let sqls = vec![
|
||||
|
||||
let indices: Vec<(&str, &str, &[&str])> = vec![
|
||||
("file_list_org_idx", "file_list", &["org"]),
|
||||
(
|
||||
"file_list_stream_ts_idx",
|
||||
"file_list",
|
||||
"CREATE INDEX file_list_org_idx on file_list (org);",
|
||||
),
|
||||
(
|
||||
"file_list",
|
||||
"CREATE INDEX file_list_stream_ts_idx on file_list (stream, max_ts, min_ts);",
|
||||
&["stream", "max_ts", "min_ts"],
|
||||
),
|
||||
("file_list_history_org_idx", "file_list_history", &["org"]),
|
||||
(
|
||||
"file_list_history_stream_ts_idx",
|
||||
"file_list_history",
|
||||
"CREATE INDEX file_list_history_org_idx on file_list_history (org);",
|
||||
),
|
||||
(
|
||||
"file_list_history",
|
||||
"CREATE INDEX file_list_history_stream_ts_idx on file_list_history (stream, max_ts, min_ts);",
|
||||
),
|
||||
(
|
||||
"file_list_history",
|
||||
"CREATE UNIQUE INDEX file_list_history_stream_file_idx on file_list_history (stream, date, file);",
|
||||
&["stream", "max_ts", "min_ts"],
|
||||
),
|
||||
(
|
||||
"file_list_deleted_created_at_idx",
|
||||
"file_list_deleted",
|
||||
"CREATE INDEX file_list_deleted_created_at_idx on file_list_deleted (org, created_at);",
|
||||
&["org", "created_at"],
|
||||
),
|
||||
(
|
||||
"file_list_deleted_stream_date_file_idx",
|
||||
"file_list_deleted",
|
||||
"CREATE INDEX file_list_deleted_stream_date_file_idx on file_list_deleted (stream, date, file);",
|
||||
&["stream", "date", "file"],
|
||||
),
|
||||
(
|
||||
"file_list_jobs_stream_status_idx",
|
||||
"file_list_jobs",
|
||||
"CREATE UNIQUE INDEX file_list_jobs_stream_offsets_idx on file_list_jobs (stream, offsets);",
|
||||
),
|
||||
(
|
||||
"file_list_jobs",
|
||||
"CREATE INDEX file_list_jobs_stream_status_idx on file_list_jobs (status, stream);",
|
||||
),
|
||||
(
|
||||
"stream_stats",
|
||||
"CREATE INDEX stream_stats_org_idx on stream_stats (org);",
|
||||
),
|
||||
(
|
||||
"stream_stats",
|
||||
"CREATE UNIQUE INDEX stream_stats_stream_idx on stream_stats (stream);",
|
||||
&["status", "stream"],
|
||||
),
|
||||
("stream_stats_org_idx", "stream_stats", &["org"]),
|
||||
];
|
||||
for (table, sql) in sqls {
|
||||
if let Err(e) = sqlx::query(sql).execute(&pool).await {
|
||||
if e.to_string().contains("Duplicate key") {
|
||||
// index already exists
|
||||
continue;
|
||||
}
|
||||
log::error!("[MYSQL] create table {} index error: {}", table, e);
|
||||
return Err(e.into());
|
||||
}
|
||||
for (idx, table, fields) in indices {
|
||||
create_index(idx, table, false, fields).await?;
|
||||
}
|
||||
|
||||
// create UNIQUE index for file_list
|
||||
let unique_index_sql =
|
||||
r#"CREATE UNIQUE INDEX file_list_stream_file_idx on file_list (stream, date, file);"#;
|
||||
if let Err(e) = sqlx::query(unique_index_sql).execute(&pool).await {
|
||||
if e.to_string().contains("Duplicate key") {
|
||||
return Ok(()); // index already exists
|
||||
} else if e.to_string().contains("Duplicate entry") {
|
||||
log::warn!("[MYSQL] starting delete duplicate records");
|
||||
// delete duplicate records
|
||||
let ret = sqlx::query(
|
||||
let unique_indices: Vec<(&str, &str, &[&str])> = vec![
|
||||
(
|
||||
"file_list_history_stream_file_idx",
|
||||
"file_list_history",
|
||||
&["stream", "date", "file"],
|
||||
),
|
||||
(
|
||||
"file_list_jobs_stream_offsets_idx",
|
||||
"file_list_jobs",
|
||||
&["stream", "offsets"],
|
||||
),
|
||||
("stream_stats_stream_idx", "stream_stats", &["stream"]),
|
||||
];
|
||||
for (idx, table, fields) in unique_indices {
|
||||
create_index(idx, table, true, fields).await?;
|
||||
}
|
||||
|
||||
// This is special case where we want to MAKE the index unique if it is not
|
||||
let res = create_index(
|
||||
"file_list_stream_file_idx",
|
||||
"file_list",
|
||||
true,
|
||||
&["stream", "date", "file"],
|
||||
)
|
||||
.await;
|
||||
if let Err(e) = res {
|
||||
if !e.to_string().contains("Duplicate entry") {
|
||||
return Err(e);
|
||||
}
|
||||
|
||||
log::warn!("[MYSQL] starting delete duplicate records");
|
||||
// delete duplicate records
|
||||
let ret = sqlx::query(
|
||||
r#"SELECT stream, date, file, min(id) as id FROM file_list GROUP BY stream, date, file HAVING COUNT(*) > 1;"#,
|
||||
).fetch_all(&pool).await?;
|
||||
log::warn!("[MYSQL] total: {} duplicate records", ret.len());
|
||||
for (i, r) in ret.iter().enumerate() {
|
||||
let stream = r.get::<String, &str>("stream");
|
||||
let date = r.get::<String, &str>("date");
|
||||
let file = r.get::<String, &str>("file");
|
||||
let id = r.get::<i64, &str>("id");
|
||||
sqlx::query(
|
||||
r#"DELETE FROM file_list WHERE id != ? AND stream = ? AND date = ? AND file = ?;"#,
|
||||
).bind(id).bind(stream).bind(date).bind(file).execute(&pool).await?;
|
||||
if i / 1000 == 0 {
|
||||
log::warn!("[MYSQL] delete duplicate records: {}/{}", i, ret.len());
|
||||
}
|
||||
log::warn!("[MYSQL] total: {} duplicate records", ret.len());
|
||||
for (i, r) in ret.iter().enumerate() {
|
||||
let stream = r.get::<String, &str>("stream");
|
||||
let date = r.get::<String, &str>("date");
|
||||
let file = r.get::<String, &str>("file");
|
||||
let id = r.get::<i64, &str>("id");
|
||||
sqlx::query(
|
||||
r#"DELETE FROM file_list WHERE id != ? AND stream = ? AND date = ? AND file = ?;"#,
|
||||
)
|
||||
.bind(id)
|
||||
.bind(stream)
|
||||
.bind(date)
|
||||
.bind(file)
|
||||
.execute(&pool)
|
||||
.await?;
|
||||
if i % 1000 == 0 {
|
||||
log::warn!("[MYSQL] delete duplicate records: {}/{}", i, ret.len());
|
||||
}
|
||||
log::warn!(
|
||||
"[MYSQL] delete duplicate records: {}/{}",
|
||||
ret.len(),
|
||||
ret.len()
|
||||
);
|
||||
// create index again
|
||||
sqlx::query(unique_index_sql).execute(&pool).await?;
|
||||
log::warn!("[MYSQL] create table index(file_list_stream_file_idx) succeed");
|
||||
} else {
|
||||
return Err(e.into());
|
||||
}
|
||||
log::warn!(
|
||||
"[MYSQL] delete duplicate records: {}/{}",
|
||||
ret.len(),
|
||||
ret.len()
|
||||
);
|
||||
// create index again
|
||||
create_index(
|
||||
"file_list_stream_file_idx",
|
||||
"file_list",
|
||||
true,
|
||||
&["stream", "date", "file"],
|
||||
)
|
||||
.await?;
|
||||
log::warn!("[MYSQL] create table index(file_list_stream_file_idx) succeed");
|
||||
}
|
||||
|
||||
Ok(())
|
||||
|
|
|
@ -25,7 +25,7 @@ use hashbrown::HashMap;
|
|||
use sqlx::{Executor, Postgres, QueryBuilder, Row};
|
||||
|
||||
use crate::{
|
||||
db::postgres::CLIENT,
|
||||
db::postgres::{create_index, CLIENT},
|
||||
errors::{Error, Result},
|
||||
};
|
||||
|
||||
|
@ -1169,64 +1169,69 @@ CREATE TABLE IF NOT EXISTS stream_stats
|
|||
|
||||
pub async fn create_table_index() -> Result<()> {
|
||||
let pool = CLIENT.clone();
|
||||
let sqls = vec![
|
||||
|
||||
let indices: Vec<(&str, &str, &[&str])> = vec![
|
||||
("file_list_org_idx", "file_list", &["org"]),
|
||||
(
|
||||
"file_list_stream_ts_idx",
|
||||
"file_list",
|
||||
"CREATE INDEX IF NOT EXISTS file_list_org_idx on file_list (org);",
|
||||
),
|
||||
(
|
||||
"file_list",
|
||||
"CREATE INDEX IF NOT EXISTS file_list_stream_ts_idx on file_list (stream, max_ts, min_ts);",
|
||||
&["stream", "max_ts", "min_ts"],
|
||||
),
|
||||
("file_list_history_org_idx", "file_list_history", &["org"]),
|
||||
(
|
||||
"file_list_history_stream_ts_idx",
|
||||
"file_list_history",
|
||||
"CREATE INDEX IF NOT EXISTS file_list_history_org_idx on file_list_history (org);",
|
||||
),
|
||||
(
|
||||
"file_list_history",
|
||||
"CREATE INDEX IF NOT EXISTS file_list_history_stream_ts_idx on file_list_history (stream, max_ts, min_ts);",
|
||||
),
|
||||
(
|
||||
"file_list_history",
|
||||
"CREATE UNIQUE INDEX IF NOT EXISTS file_list_history_stream_file_idx on file_list_history (stream, date, file);",
|
||||
&["stream", "max_ts", "min_ts"],
|
||||
),
|
||||
(
|
||||
"file_list_deleted_created_at_idx",
|
||||
"file_list_deleted",
|
||||
"CREATE INDEX IF NOT EXISTS file_list_deleted_created_at_idx on file_list_deleted (org, created_at);",
|
||||
&["org", "created_at"],
|
||||
),
|
||||
(
|
||||
"file_list_deleted_stream_date_file_idx",
|
||||
"file_list_deleted",
|
||||
"CREATE INDEX IF NOT EXISTS file_list_deleted_stream_date_file_idx on file_list_deleted (stream, date, file);",
|
||||
&["stream", "date", "file"],
|
||||
),
|
||||
(
|
||||
"file_list_jobs_stream_status_idx",
|
||||
"file_list_jobs",
|
||||
"CREATE UNIQUE INDEX IF NOT EXISTS file_list_jobs_stream_offsets_idx on file_list_jobs (stream, offsets);",
|
||||
),
|
||||
(
|
||||
"file_list_jobs",
|
||||
"CREATE INDEX IF NOT EXISTS file_list_jobs_stream_status_idx on file_list_jobs (status, stream);",
|
||||
),
|
||||
(
|
||||
"stream_stats",
|
||||
"CREATE INDEX IF NOT EXISTS stream_stats_org_idx on stream_stats (org);",
|
||||
),
|
||||
(
|
||||
"stream_stats",
|
||||
"CREATE UNIQUE INDEX IF NOT EXISTS stream_stats_stream_idx on stream_stats (stream);",
|
||||
&["status", "stream"],
|
||||
),
|
||||
("stream_stats_org_idx", "stream_stats", &["org"]),
|
||||
];
|
||||
for (table, sql) in sqls {
|
||||
if let Err(e) = sqlx::query(sql).execute(&pool).await {
|
||||
log::error!("[POSTGRES] create table {} index error: {}", table, e);
|
||||
return Err(e.into());
|
||||
}
|
||||
for (idx, table, fields) in indices {
|
||||
create_index(idx, table, false, fields).await?;
|
||||
}
|
||||
|
||||
// create UNIQUE index for file_list
|
||||
let unique_index_sql = r#"CREATE UNIQUE INDEX IF NOT EXISTS file_list_stream_file_idx on file_list (stream, date, file);"#;
|
||||
if let Err(e) = sqlx::query(unique_index_sql).execute(&pool).await {
|
||||
let unique_indices: Vec<(&str, &str, &[&str])> = vec![
|
||||
(
|
||||
"file_list_history_stream_file_idx",
|
||||
"file_list_history",
|
||||
&["stream", "date", "file"],
|
||||
),
|
||||
(
|
||||
"file_list_jobs_stream_offsets_idx",
|
||||
"file_list_jobs",
|
||||
&["stream", "offsets"],
|
||||
),
|
||||
("stream_stats_stream_idx", "stream_stats", &["stream"]),
|
||||
];
|
||||
for (idx, table, fields) in unique_indices {
|
||||
create_index(idx, table, true, fields).await?;
|
||||
}
|
||||
|
||||
// This is a case where we want to MAKE the index unique if it isn't
|
||||
let res = create_index(
|
||||
"file_list_stream_file_idx",
|
||||
"file_list",
|
||||
true,
|
||||
&["stream", "date", "file"],
|
||||
)
|
||||
.await;
|
||||
if let Err(e) = res {
|
||||
if !e.to_string().contains("could not create unique index") {
|
||||
return Err(e.into());
|
||||
return Err(e);
|
||||
}
|
||||
// delete duplicate records
|
||||
log::warn!("[POSTGRES] starting delete duplicate records");
|
||||
|
@ -1250,7 +1255,7 @@ pub async fn create_table_index() -> Result<()> {
|
|||
.bind(date)
|
||||
.bind(file)
|
||||
.execute(&pool).await?;
|
||||
if i / 1000 == 0 {
|
||||
if i % 1000 == 0 {
|
||||
log::warn!("[POSTGRES] delete duplicate records: {}/{}", i, ret.len());
|
||||
}
|
||||
}
|
||||
|
@ -1260,7 +1265,13 @@ pub async fn create_table_index() -> Result<()> {
|
|||
ret.len()
|
||||
);
|
||||
// create index again
|
||||
sqlx::query(unique_index_sql).execute(&pool).await?;
|
||||
create_index(
|
||||
"file_list_stream_file_idx",
|
||||
"file_list",
|
||||
true,
|
||||
&["stream", "date", "file"],
|
||||
)
|
||||
.await?;
|
||||
log::warn!("[POSTGRES] create table index(file_list_stream_file_idx) succeed");
|
||||
}
|
||||
|
||||
|
|
|
@ -25,7 +25,7 @@ use hashbrown::HashMap;
|
|||
use sqlx::{Executor, Pool, QueryBuilder, Row, Sqlite};
|
||||
|
||||
use crate::{
|
||||
db::sqlite::{CLIENT_RO, CLIENT_RW},
|
||||
db::sqlite::{create_index, CLIENT_RO, CLIENT_RW},
|
||||
errors::{Error, Result},
|
||||
};
|
||||
|
||||
|
@ -1156,70 +1156,74 @@ CREATE TABLE IF NOT EXISTS stream_stats
|
|||
}
|
||||
|
||||
pub async fn create_table_index() -> Result<()> {
|
||||
let sqls = vec![
|
||||
let indices: Vec<(&str, &str, &[&str])> = vec![
|
||||
("file_list_org_idx", "file_list", &["org"]),
|
||||
(
|
||||
"file_list_stream_ts_idx",
|
||||
"file_list",
|
||||
"CREATE INDEX IF NOT EXISTS file_list_org_idx on file_list (org);",
|
||||
),
|
||||
(
|
||||
"file_list",
|
||||
"CREATE INDEX IF NOT EXISTS file_list_stream_ts_idx on file_list (stream, max_ts, min_ts);",
|
||||
&["stream", "max_ts", "min_ts"],
|
||||
),
|
||||
("file_list_history_org_idx", "file_list_history", &["org"]),
|
||||
(
|
||||
"file_list_history_stream_ts_idx",
|
||||
"file_list_history",
|
||||
"CREATE INDEX IF NOT EXISTS file_list_history_org_idx on file_list_history (org);",
|
||||
),
|
||||
(
|
||||
"file_list_history",
|
||||
"CREATE INDEX IF NOT EXISTS file_list_history_stream_ts_idx on file_list_history (stream, max_ts, min_ts);",
|
||||
),
|
||||
(
|
||||
"file_list_history",
|
||||
"CREATE UNIQUE INDEX IF NOT EXISTS file_list_history_stream_file_idx on file_list_history (stream, date, file);",
|
||||
&["stream", "max_ts", "min_ts"],
|
||||
),
|
||||
(
|
||||
"file_list_deleted_created_at_idx",
|
||||
"file_list_deleted",
|
||||
"CREATE INDEX IF NOT EXISTS file_list_deleted_created_at_idx on file_list_deleted (org, created_at);",
|
||||
&["org", "created_at"],
|
||||
),
|
||||
(
|
||||
"file_list_deleted_stream_date_file_idx",
|
||||
"file_list_deleted",
|
||||
"CREATE INDEX IF NOT EXISTS file_list_deleted_stream_date_file_idx on file_list_deleted (stream, date, file);",
|
||||
&["stream", "date", "file"],
|
||||
),
|
||||
(
|
||||
"file_list_jobs_stream_status_idx",
|
||||
"file_list_jobs",
|
||||
"CREATE UNIQUE INDEX IF NOT EXISTS file_list_jobs_stream_offsets_idx on file_list_jobs (stream, offsets);",
|
||||
),
|
||||
(
|
||||
"file_list_jobs",
|
||||
"CREATE INDEX IF NOT EXISTS file_list_jobs_stream_status_idx on file_list_jobs (status, stream);",
|
||||
),
|
||||
(
|
||||
"stream_stats",
|
||||
"CREATE INDEX IF NOT EXISTS stream_stats_org_idx on stream_stats (org);",
|
||||
),
|
||||
(
|
||||
"stream_stats",
|
||||
"CREATE UNIQUE INDEX IF NOT EXISTS stream_stats_stream_idx on stream_stats (stream);",
|
||||
&["status", "stream"],
|
||||
),
|
||||
("stream_stats_org_idx", "stream_stats", &["org"]),
|
||||
];
|
||||
|
||||
let client = CLIENT_RW.clone();
|
||||
let client = client.lock().await;
|
||||
for (table, sql) in sqls {
|
||||
if let Err(e) = sqlx::query(sql).execute(&*client).await {
|
||||
log::error!("[SQLITE] create table {} index error: {}", table, e);
|
||||
return Err(e.into());
|
||||
}
|
||||
for (idx, table, fields) in indices {
|
||||
create_index(idx, table, false, fields).await?;
|
||||
}
|
||||
|
||||
// create UNIQUE index for file_list
|
||||
let unique_index_sql = r#"CREATE UNIQUE INDEX IF NOT EXISTS file_list_stream_file_idx on file_list (stream, date, file);"#;
|
||||
if let Err(e) = sqlx::query(unique_index_sql).execute(&*client).await {
|
||||
let unique_indices: Vec<(&str, &str, &[&str])> = vec![
|
||||
(
|
||||
"file_list_history_stream_file_idx",
|
||||
"file_list_history",
|
||||
&["stream", "date", "file"],
|
||||
),
|
||||
(
|
||||
"file_list_jobs_stream_offsets_idx",
|
||||
"file_list_jobs",
|
||||
&["stream", "offsets"],
|
||||
),
|
||||
("stream_stats_stream_idx", "stream_stats", &["stream"]),
|
||||
];
|
||||
for (idx, table, fields) in unique_indices {
|
||||
create_index(idx, table, true, fields).await?;
|
||||
}
|
||||
|
||||
// This is a case where we want to MAKE the index unique
|
||||
|
||||
let res = create_index(
|
||||
"file_list_stream_file_idx",
|
||||
"file_list",
|
||||
true,
|
||||
&["stream", "date", "file"],
|
||||
)
|
||||
.await;
|
||||
if let Err(e) = res {
|
||||
if !e.to_string().contains("UNIQUE constraint failed") {
|
||||
return Err(e.into());
|
||||
return Err(e);
|
||||
}
|
||||
// delete duplicate records
|
||||
log::warn!("[SQLITE] starting delete duplicate records");
|
||||
let client = CLIENT_RW.clone();
|
||||
let client = client.lock().await;
|
||||
let ret = sqlx::query(
|
||||
r#"SELECT stream, date, file, min(id) as id FROM file_list GROUP BY stream, date, file HAVING COUNT(*) > 1;"#,
|
||||
).fetch_all(&*client).await?;
|
||||
|
@ -1232,22 +1236,31 @@ pub async fn create_table_index() -> Result<()> {
|
|||
sqlx::query(
|
||||
r#"DELETE FROM file_list WHERE id != $1 AND stream = $2 AND date = $3 AND file = $4;"#,
|
||||
).bind(id).bind(stream).bind(date).bind(file).execute(&*client).await?;
|
||||
if i / 1000 == 0 {
|
||||
if i % 1000 == 0 {
|
||||
log::warn!("[SQLITE] delete duplicate records: {}/{}", i, ret.len());
|
||||
}
|
||||
}
|
||||
drop(client);
|
||||
log::warn!(
|
||||
"[SQLITE] delete duplicate records: {}/{}",
|
||||
ret.len(),
|
||||
ret.len()
|
||||
);
|
||||
// create index again
|
||||
sqlx::query(unique_index_sql).execute(&*client).await?;
|
||||
create_index(
|
||||
"file_list_stream_file_idx",
|
||||
"file_list",
|
||||
true,
|
||||
&["stream", "date", "file"],
|
||||
)
|
||||
.await?;
|
||||
log::warn!("[SQLITE] create table index(file_list_stream_file_idx) succeed");
|
||||
}
|
||||
|
||||
// delete trigger for old version
|
||||
// compatible for old version <= 0.6.4
|
||||
let client = CLIENT_RW.clone();
|
||||
let client = client.lock().await;
|
||||
sqlx::query(r#"DROP TRIGGER IF EXISTS update_stream_stats_delete;"#)
|
||||
.execute(&*client)
|
||||
.await?;
|
||||
|
|
|
@ -20,7 +20,10 @@ use sqlx::Row;
|
|||
|
||||
use super::{Trigger, TriggerId, TriggerModule, TriggerStatus, TRIGGERS_KEY};
|
||||
use crate::{
|
||||
db::{self, mysql::CLIENT},
|
||||
db::{
|
||||
self,
|
||||
mysql::{create_index, CLIENT},
|
||||
},
|
||||
errors::{DbError, Error, Result},
|
||||
};
|
||||
|
||||
|
@ -77,24 +80,28 @@ CREATE TABLE IF NOT EXISTS scheduled_jobs
|
|||
}
|
||||
|
||||
async fn create_table_index(&self) -> Result<()> {
|
||||
let pool = CLIENT.clone();
|
||||
create_index(
|
||||
"scheduled_jobs_key_idx",
|
||||
"scheduled_jobs",
|
||||
false,
|
||||
&["module_key"],
|
||||
)
|
||||
.await?;
|
||||
create_index(
|
||||
"scheduled_jobs_org_key_idx",
|
||||
"scheduled_jobs",
|
||||
false,
|
||||
&["org", "module_key"],
|
||||
)
|
||||
.await?;
|
||||
create_index(
|
||||
"scheduled_jobs_org_module_key_idx",
|
||||
"scheduled_jobs",
|
||||
true,
|
||||
&["org", "module", "module_key"],
|
||||
)
|
||||
.await?;
|
||||
|
||||
let queries = vec![
|
||||
"CREATE INDEX scheduled_jobs_key_idx on scheduled_jobs (module_key);",
|
||||
"CREATE INDEX scheduled_jobs_org_key_idx on scheduled_jobs (org, module_key);",
|
||||
"CREATE UNIQUE INDEX scheduled_jobs_org_module_key_idx on scheduled_jobs (org, module, module_key);",
|
||||
];
|
||||
|
||||
for query in queries {
|
||||
if let Err(e) = sqlx::query(query).execute(&pool).await {
|
||||
if e.to_string().contains("Duplicate key") {
|
||||
// index already exists
|
||||
return Ok(());
|
||||
}
|
||||
log::error!("[MYSQL] create table scheduled_jobs index error: {}", e);
|
||||
return Err(e.into());
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
|
|
@ -20,7 +20,10 @@ use sqlx::Row;
|
|||
|
||||
use super::{Trigger, TriggerModule, TriggerStatus, TRIGGERS_KEY};
|
||||
use crate::{
|
||||
db::{self, postgres::CLIENT},
|
||||
db::{
|
||||
self,
|
||||
postgres::{create_index, CLIENT},
|
||||
},
|
||||
errors::{DbError, Error, Result},
|
||||
};
|
||||
|
||||
|
@ -77,20 +80,28 @@ CREATE TABLE IF NOT EXISTS scheduled_jobs
|
|||
}
|
||||
|
||||
async fn create_table_index(&self) -> Result<()> {
|
||||
let pool = CLIENT.clone();
|
||||
create_index(
|
||||
"scheduled_jobs_key_idx",
|
||||
"scheduled_jobs",
|
||||
false,
|
||||
&["module_key"],
|
||||
)
|
||||
.await?;
|
||||
create_index(
|
||||
"scheduled_jobs_org_key_idx",
|
||||
"scheduled_jobs",
|
||||
false,
|
||||
&["org", "module_key"],
|
||||
)
|
||||
.await?;
|
||||
create_index(
|
||||
"scheduled_jobs_org_module_key_idx",
|
||||
"scheduled_jobs",
|
||||
true,
|
||||
&["org", "module", "module_key"],
|
||||
)
|
||||
.await?;
|
||||
|
||||
let queries = vec![
|
||||
"CREATE INDEX IF NOT EXISTS scheduled_jobs_key_idx on scheduled_jobs (module_key);",
|
||||
"CREATE INDEX IF NOT EXISTS scheduled_jobs_org_key_idx on scheduled_jobs (org, module_key);",
|
||||
"CREATE UNIQUE INDEX IF NOT EXISTS scheduled_jobs_org_module_key_idx on scheduled_jobs (org, module, module_key);",
|
||||
];
|
||||
|
||||
for query in queries {
|
||||
if let Err(e) = sqlx::query(query).execute(&pool).await {
|
||||
log::error!("[POSTGRES] create table scheduled_jobs index error: {}", e);
|
||||
return Err(e.into());
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
|
|
@ -22,7 +22,7 @@ use super::{Trigger, TriggerModule, TriggerStatus, TRIGGERS_KEY};
|
|||
use crate::{
|
||||
db::{
|
||||
self,
|
||||
sqlite::{CLIENT_RO, CLIENT_RW},
|
||||
sqlite::{create_index, CLIENT_RO, CLIENT_RW},
|
||||
},
|
||||
errors::{DbError, Error, Result},
|
||||
};
|
||||
|
@ -76,17 +76,28 @@ CREATE TABLE IF NOT EXISTS scheduled_jobs
|
|||
}
|
||||
|
||||
async fn create_table_index(&self) -> Result<()> {
|
||||
let client = CLIENT_RW.clone();
|
||||
let client = client.lock().await;
|
||||
let queries = vec![
|
||||
"CREATE INDEX IF NOT EXISTS scheduled_jobs_key_idx on scheduled_jobs (module_key);",
|
||||
"CREATE INDEX IF NOT EXISTS scheduled_jobs_org_key_idx on scheduled_jobs (org, module_key);",
|
||||
"CREATE UNIQUE INDEX IF NOT EXISTS scheduled_jobs_org_module_key_idx on scheduled_jobs (org, module, module_key);",
|
||||
];
|
||||
create_index(
|
||||
"scheduled_jobs_key_idx",
|
||||
"scheduled_jobs",
|
||||
false,
|
||||
&["module_key"],
|
||||
)
|
||||
.await?;
|
||||
create_index(
|
||||
"scheduled_jobs_org_key_idx",
|
||||
"scheduled_jobs",
|
||||
false,
|
||||
&["org", "module_key"],
|
||||
)
|
||||
.await?;
|
||||
create_index(
|
||||
"scheduled_jobs_org_module_key_idx",
|
||||
"scheduled_jobs",
|
||||
true,
|
||||
&["org", "module", "module_key"],
|
||||
)
|
||||
.await?;
|
||||
|
||||
for query in queries {
|
||||
sqlx::query(query).execute(&*client).await?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
|
|
@ -18,7 +18,7 @@ use config::{meta::stream::StreamType, utils::json};
|
|||
use datafusion::arrow::datatypes::Schema;
|
||||
|
||||
use crate::{
|
||||
db::mysql::CLIENT,
|
||||
db::mysql::{create_index, CLIENT},
|
||||
errors::{Error, Result},
|
||||
};
|
||||
|
||||
|
@ -105,31 +105,21 @@ CREATE TABLE IF NOT EXISTS schema_history
|
|||
}
|
||||
|
||||
pub async fn create_table_index() -> Result<()> {
|
||||
let pool = CLIENT.clone();
|
||||
let sqls = vec![
|
||||
(
|
||||
"schema_history",
|
||||
"CREATE INDEX schema_history_org_idx on schema_history (org);",
|
||||
),
|
||||
(
|
||||
"schema_history",
|
||||
"CREATE INDEX schema_history_stream_idx on schema_history (org, stream_type, stream_name);",
|
||||
),
|
||||
(
|
||||
"schema_history",
|
||||
"CREATE UNIQUE INDEX schema_history_stream_version_idx on schema_history (org, stream_type, stream_name, start_dt);",
|
||||
),
|
||||
];
|
||||
for (table, sql) in sqls {
|
||||
if let Err(e) = sqlx::query(sql).execute(&pool).await {
|
||||
if e.to_string().contains("Duplicate key") {
|
||||
// index already exists
|
||||
continue;
|
||||
}
|
||||
log::error!("[MYSQL] create table {} index error: {}", table, e);
|
||||
return Err(e.into());
|
||||
}
|
||||
}
|
||||
create_index("schema_history_org_idx", "schema_history", false, &["org"]).await?;
|
||||
create_index(
|
||||
"schema_history_stream_idx",
|
||||
"schema_history",
|
||||
false,
|
||||
&["org", "stream_type", "stream_name"],
|
||||
)
|
||||
.await?;
|
||||
create_index(
|
||||
"schema_history_stream_version_idx",
|
||||
"schema_history",
|
||||
true,
|
||||
&["org", "stream_type", "stream_name", "start_dt"],
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
|
|
@ -18,7 +18,7 @@ use config::{meta::stream::StreamType, utils::json};
|
|||
use datafusion::arrow::datatypes::Schema;
|
||||
|
||||
use crate::{
|
||||
db::postgres::CLIENT,
|
||||
db::postgres::{create_index, CLIENT},
|
||||
errors::{Error, Result},
|
||||
};
|
||||
|
||||
|
@ -106,27 +106,21 @@ CREATE TABLE IF NOT EXISTS schema_history
|
|||
}
|
||||
|
||||
pub async fn create_table_index() -> Result<()> {
|
||||
let pool = CLIENT.clone();
|
||||
let sqls = vec![
|
||||
(
|
||||
"schema_history",
|
||||
"CREATE INDEX IF NOT EXISTS schema_history_org_idx on schema_history (org);",
|
||||
),
|
||||
(
|
||||
"schema_history",
|
||||
"CREATE INDEX IF NOT EXISTS schema_history_stream_idx on schema_history (org, stream_type, stream_name);",
|
||||
),
|
||||
(
|
||||
"schema_history",
|
||||
"CREATE UNIQUE INDEX IF NOT EXISTS schema_history_stream_version_idx on schema_history (org, stream_type, stream_name, start_dt);",
|
||||
),
|
||||
];
|
||||
for (table, sql) in sqls {
|
||||
if let Err(e) = sqlx::query(sql).execute(&pool).await {
|
||||
log::error!("[POSTGRES] create table {} index error: {}", table, e);
|
||||
return Err(e.into());
|
||||
}
|
||||
}
|
||||
create_index("schema_history_org_idx", "schema_history", false, &["org"]).await?;
|
||||
create_index(
|
||||
"schema_history_stream_idx",
|
||||
"schema_history",
|
||||
false,
|
||||
&["org", "stream_type", "stream_name"],
|
||||
)
|
||||
.await?;
|
||||
create_index(
|
||||
"schema_history_stream_version_idx",
|
||||
"schema_history",
|
||||
true,
|
||||
&["org", "stream_type", "stream_name", "start_dt"],
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
|
|
@ -18,7 +18,7 @@ use config::{meta::stream::StreamType, utils::json};
|
|||
use datafusion::arrow::datatypes::Schema;
|
||||
|
||||
use crate::{
|
||||
db::sqlite::CLIENT_RW,
|
||||
db::sqlite::{create_index, CLIENT_RW},
|
||||
errors::{Error, Result},
|
||||
};
|
||||
|
||||
|
@ -107,29 +107,21 @@ CREATE TABLE IF NOT EXISTS schema_history
|
|||
}
|
||||
|
||||
pub async fn create_table_index() -> Result<()> {
|
||||
let sqls = vec![
|
||||
(
|
||||
"schema_history",
|
||||
"CREATE INDEX IF NOT EXISTS schema_history_org_idx on schema_history (org);",
|
||||
),
|
||||
(
|
||||
"schema_history",
|
||||
"CREATE INDEX IF NOT EXISTS schema_history_stream_idx on schema_history (org, stream_type, stream_name);",
|
||||
),
|
||||
(
|
||||
"schema_history",
|
||||
"CREATE UNIQUE INDEX IF NOT EXISTS schema_history_stream_version_idx on schema_history (org, stream_type, stream_name, start_dt);",
|
||||
),
|
||||
];
|
||||
|
||||
let client = CLIENT_RW.clone();
|
||||
let client = client.lock().await;
|
||||
for (table, sql) in sqls {
|
||||
if let Err(e) = sqlx::query(sql).execute(&*client).await {
|
||||
log::error!("[SQLITE] create table {} index error: {}", table, e);
|
||||
return Err(e.into());
|
||||
}
|
||||
}
|
||||
create_index("schema_history_org_idx", "schema_history", false, &["org"]).await?;
|
||||
create_index(
|
||||
"schema_history_stream_idx",
|
||||
"schema_history",
|
||||
false,
|
||||
&["org", "stream_type", "stream_name"],
|
||||
)
|
||||
.await?;
|
||||
create_index(
|
||||
"schema_history_stream_version_idx",
|
||||
"schema_history",
|
||||
true,
|
||||
&["org", "stream_type", "stream_name", "start_dt"],
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
|
|
@ -242,18 +242,50 @@ impl QueryCondition {
|
|||
}
|
||||
}
|
||||
};
|
||||
if self.search_event_type.is_none() && resp.total < trigger_condition.threshold as usize {
|
||||
let records: Option<Vec<Map<String, Value>>> = Some(
|
||||
resp.hits
|
||||
.iter()
|
||||
.map(|hit| hit.as_object().unwrap().clone())
|
||||
.collect(),
|
||||
);
|
||||
if self.search_event_type.is_none() {
|
||||
let threshold = trigger_condition.threshold as usize;
|
||||
match trigger_condition.operator {
|
||||
Operator::EqualTo => {
|
||||
if records.as_ref().unwrap().len() == threshold {
|
||||
return Ok((records, now));
|
||||
}
|
||||
}
|
||||
Operator::NotEqualTo => {
|
||||
if records.as_ref().unwrap().len() != threshold {
|
||||
return Ok((records, now));
|
||||
}
|
||||
}
|
||||
Operator::GreaterThan => {
|
||||
if records.as_ref().unwrap().len() > threshold {
|
||||
return Ok((records, now));
|
||||
}
|
||||
}
|
||||
Operator::GreaterThanEquals => {
|
||||
if records.as_ref().unwrap().len() >= threshold {
|
||||
return Ok((records, now));
|
||||
}
|
||||
}
|
||||
Operator::LessThan => {
|
||||
if records.as_ref().unwrap().len() < threshold {
|
||||
return Ok((records, now));
|
||||
}
|
||||
}
|
||||
Operator::LessThanEquals => {
|
||||
if records.as_ref().unwrap().len() <= threshold {
|
||||
return Ok((records, now));
|
||||
}
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
Ok((None, now))
|
||||
} else {
|
||||
Ok((
|
||||
Some(
|
||||
resp.hits
|
||||
.iter()
|
||||
.map(|hit| hit.as_object().unwrap().clone())
|
||||
.collect(),
|
||||
),
|
||||
now,
|
||||
))
|
||||
Ok((records, now))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -235,7 +235,17 @@ async fn handle_alert_triggers(trigger: db::scheduler::Trigger) -> Result<(), an
|
|||
// Check for the cron timestamp after the silence period
|
||||
new_trigger.next_run_at = schedule.after(&silence).next().unwrap().timestamp_micros();
|
||||
} else {
|
||||
new_trigger.next_run_at += Duration::try_minutes(alert.trigger_condition.silence)
|
||||
// When the silence period is less than the frequency, the alert runs after the silence
|
||||
// period completely ignoring the frequency. So, if frequency is 60 mins and
|
||||
// silence is 10 mins, the condition is satisfied, in that case, the alert
|
||||
// will run after 10 mins of silence period. To avoid this scenario, we
|
||||
// should use the max of (frequency, silence) as the next_run_at.
|
||||
// Silence period is in minutes, and the frequency is in seconds.
|
||||
let next_run_in_seconds = std::cmp::max(
|
||||
alert.trigger_condition.silence * 60,
|
||||
alert.trigger_condition.frequency,
|
||||
);
|
||||
new_trigger.next_run_at += Duration::try_seconds(next_run_in_seconds)
|
||||
.unwrap()
|
||||
.num_microseconds()
|
||||
.unwrap();
|
||||
|
|
|
@ -560,12 +560,20 @@ pub async fn merge_files(
|
|||
let mut deleted_files = Vec::new();
|
||||
let cfg = get_config();
|
||||
for file in files_with_size.iter() {
|
||||
if new_file_size + file.meta.original_size > cfg.compact.max_file_size as i64
|
||||
|| new_compressed_file_size + file.meta.compressed_size
|
||||
> cfg.compact.max_file_size as i64
|
||||
let total_new_file_size = new_file_size + file.meta.original_size;
|
||||
let total_new_compressed_file_size = new_compressed_file_size + file.meta.compressed_size;
|
||||
|
||||
if total_new_file_size > cfg.compact.max_file_size as i64
|
||||
{
|
||||
log::info!("[COMPACT:{thread_id}] new file size is bigger then compactor max file size: {}", total_new_file_size);
|
||||
break;
|
||||
}
|
||||
|
||||
if total_new_compressed_file_size > cfg.compact.max_file_size as i64 {
|
||||
log::info!("[COMPACT:{thread_id}] new compressed file size is bigger then compactor max file size: {}", total_new_compressed_file_size);
|
||||
break;
|
||||
}
|
||||
|
||||
new_file_size += file.meta.original_size;
|
||||
new_compressed_file_size += file.meta.compressed_size;
|
||||
total_records += file.meta.records;
|
||||
|
|
|
@ -202,7 +202,8 @@ async fn send_to_node(
|
|||
);
|
||||
break;
|
||||
}
|
||||
let request = tonic::Request::new(req_query.clone());
|
||||
let mut request = tonic::Request::new(req_query.clone());
|
||||
request.set_timeout(std::time::Duration::from_secs(cfg.limit.query_timeout));
|
||||
match client.send_file_list(request).await {
|
||||
Ok(_) => break,
|
||||
Err(e) => {
|
||||
|
|
|
@ -241,7 +241,7 @@ async fn get_file_list(
|
|||
.parse()
|
||||
.map_err(|_| DataFusionError::Execution("invalid org_id".to_string()))?;
|
||||
let mut request = tonic::Request::new(req);
|
||||
// request.set_timeout(Duration::from_secs(cfg.grpc.timeout));
|
||||
request.set_timeout(std::time::Duration::from_secs(cfg.limit.query_timeout));
|
||||
|
||||
opentelemetry::global::get_text_map_propagator(|propagator| {
|
||||
propagator.inject_context(
|
||||
|
|
|
@ -160,7 +160,7 @@ async fn search_in_cluster(
|
|||
.parse()
|
||||
.map_err(|_| Error::Message(format!("invalid org_id: {}", req.org_id)))?;
|
||||
let mut request = tonic::Request::new(req);
|
||||
// request.set_timeout(Duration::from_secs(cfg.grpc.timeout));
|
||||
request.set_timeout(std::time::Duration::from_secs(cfg.limit.query_timeout));
|
||||
|
||||
opentelemetry::global::get_text_map_propagator(|propagator| {
|
||||
propagator.inject_context(
|
||||
|
|
|
@ -86,8 +86,8 @@ pub async fn get_cached_results(
|
|||
is_descending:cache_req.is_descending,
|
||||
};
|
||||
|
||||
let request = tonic::Request::new(req);
|
||||
|
||||
let mut request = tonic::Request::new(req);
|
||||
request.set_timeout(std::time::Duration::from_secs(cfg.limit.query_timeout));
|
||||
log::info!(
|
||||
"[trace_id {trace_id}] get_cached_results->grpc: request node: {}",
|
||||
&node_addr
|
||||
|
|
|
@ -85,7 +85,8 @@ pub async fn get_cached_results(
|
|||
is_descending:cache_req.is_descending,
|
||||
};
|
||||
|
||||
let request = tonic::Request::new(req);
|
||||
let mut request = tonic::Request::new(req);
|
||||
request.set_timeout(std::time::Duration::from_secs(cfg.limit.query_timeout));
|
||||
|
||||
log::info!(
|
||||
"[trace_id {trace_id}] get_cached_results->grpc: request node: {}",
|
||||
|
|
|
@ -432,6 +432,7 @@ pub async fn query_status() -> Result<search::QueryStatusResponse, Error> {
|
|||
async move {
|
||||
let cfg = get_config();
|
||||
let mut request = tonic::Request::new(proto::cluster_rpc::QueryStatusRequest {});
|
||||
request.set_timeout(std::time::Duration::from_secs(cfg.limit.query_timeout));
|
||||
|
||||
opentelemetry::global::get_text_map_propagator(|propagator| {
|
||||
propagator.inject_context(
|
||||
|
@ -591,6 +592,7 @@ pub async fn cancel_query(
|
|||
let cfg = get_config();
|
||||
let mut request =
|
||||
tonic::Request::new(proto::cluster_rpc::CancelQueryRequest { trace_id });
|
||||
request.set_timeout(std::time::Duration::from_secs(cfg.limit.query_timeout));
|
||||
opentelemetry::global::get_text_map_propagator(|propagator| {
|
||||
propagator.inject_context(
|
||||
&tracing::Span::current().context(),
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
{
|
||||
"data": {
|
||||
"organizationIdetifier": "default",
|
||||
"organizationIdentifier": "default",
|
||||
"runQuery": false,
|
||||
"loading": false,
|
||||
"config": {
|
||||
|
|
|
@ -176,20 +176,20 @@ export function valueAddedInSqlMode() {
|
|||
});
|
||||
}
|
||||
|
||||
//** Verify if add value feild is clicked*/
|
||||
//** Verify if add value field is clicked*/
|
||||
export function addFeildandSubValue() {
|
||||
cy.get(
|
||||
`[data-test="log-search-expand-${logData.addFieldAndSubFieldValueWithEqual.field}-field-btn"]`
|
||||
).click({ force: true });
|
||||
}
|
||||
|
||||
export function addsubFeildValue() {
|
||||
export function addsubFieldValue() {
|
||||
cy.get(
|
||||
`[data-test="logs-search-subfield-add-${logData.addFieldAndSubFieldValueWithEqual.field}-${logData.addFieldAndSubFieldValueWithEqual.subFieldValue}"] `
|
||||
).trigger("mouseover", { force: true });
|
||||
}
|
||||
|
||||
export function clickFeildSubvalue() {
|
||||
export function clickFieldSubvalue() {
|
||||
cy.get(
|
||||
`[data-test="logs-search-subfield-add-${logData.addFieldAndSubFieldValueWithEqual.field}-${logData.addFieldAndSubFieldValueWithEqual.subFieldValue}"] [data-test="log-search-subfield-list-equal-${logData.addFieldAndSubFieldValueWithEqual.field}-field-btn"]`
|
||||
).click({ force: true });
|
||||
|
|
|
@ -444,7 +444,7 @@ describe("Create a new dashboard", () => {
|
|||
"have.value",
|
||||
""
|
||||
);
|
||||
// Asserion for the clear value in the x and y layout
|
||||
// Assertion for the clear value in the x and y layout
|
||||
cy.get(
|
||||
`[data-test='dashboard-x-item-${dashboardData.customQueryValue.field1}']`
|
||||
).should("not.exist");
|
||||
|
|
|
@ -492,7 +492,7 @@ describe("Create a new dashboard", () => {
|
|||
"have.value",
|
||||
""
|
||||
);
|
||||
// Asserion for the clear value in the x and y layout
|
||||
// Assertion for the clear value in the x and y layout
|
||||
cy.get(
|
||||
`[data-test='dashboard-x-item-${dashboardData.customQueryValue.field1}']`
|
||||
).should("not.exist");
|
||||
|
|
|
@ -514,7 +514,7 @@ describe("Create a new dashboard", () => {
|
|||
"have.value",
|
||||
""
|
||||
);
|
||||
// Asserion for the clear value in the x and y layout
|
||||
// Assertion for the clear value in the x and y layout
|
||||
cy.get(
|
||||
`[data-test='dashboard-x-item-${dashboardData.customQueryValue.field1}']`
|
||||
).should("not.exist");
|
||||
|
|
|
@ -597,7 +597,7 @@ describe("Create a new dashboard", () => {
|
|||
"have.value",
|
||||
""
|
||||
);
|
||||
// Asserion for the clear value in the x and y layout
|
||||
// Assertion for the clear value in the x and y layout
|
||||
cy.get(
|
||||
`[data-test='dashboard-x-item-${dashboardData.customQueryValue.field1}']`
|
||||
).should("not.exist");
|
||||
|
|
|
@ -223,15 +223,15 @@ it("should display correct results when fast mode on", () => {
|
|||
force: true,
|
||||
});
|
||||
logstests.addFeildandSubValue();
|
||||
logstests.addsubFeildValue();
|
||||
logstests.addsubFieldValue();
|
||||
//click on the field
|
||||
// get the data from the value variable
|
||||
cy.wait("@value", { timeout: 5000 })
|
||||
.its("response.statusCode")
|
||||
.should("eq", 200);
|
||||
logstests.addsubFeildValue();
|
||||
logstests.addsubFieldValue();
|
||||
cy.get("@value").its("response.body.hits").should("be.an", "array");
|
||||
logstests.clickFeildSubvalue();
|
||||
logstests.clickFieldSubvalue();
|
||||
cy.wait(2000);
|
||||
logstests.valueAddedOnPlusClick();
|
||||
cy.intercept("GET", logData.ValueQuery).as("value");
|
||||
|
|
|
@ -175,15 +175,15 @@ describe("Logs testcases", () => {
|
|||
force: true,
|
||||
});
|
||||
logstests.addFeildandSubValue();
|
||||
logstests.addsubFeildValue();
|
||||
logstests.addsubFieldValue();
|
||||
//click on the field
|
||||
// get the data from the value variable
|
||||
cy.wait("@value", { timeout: 5000 })
|
||||
.its("response.statusCode")
|
||||
.should("eq", 200);
|
||||
logstests.addsubFeildValue();
|
||||
logstests.addsubFieldValue();
|
||||
cy.get("@value").its("response.body.hits").should("be.an", "array");
|
||||
logstests.clickFeildSubvalue();
|
||||
logstests.clickFieldSubvalue();
|
||||
cy.wait(2000);
|
||||
logstests.valueAddedOnPlusClick();
|
||||
cy.intercept("GET", logData.ValueQuery).as("value");
|
||||
|
@ -292,15 +292,15 @@ describe("Logs testcases", () => {
|
|||
force: true,
|
||||
});
|
||||
logstests.addFeildandSubValue();
|
||||
logstests.addsubFeildValue();
|
||||
logstests.addsubFieldValue();
|
||||
//click on the field
|
||||
// get the data from the value variable
|
||||
cy.wait("@value", { timeout: 5000 })
|
||||
.its("response.statusCode")
|
||||
.should("eq", 200);
|
||||
logstests.addsubFeildValue();
|
||||
logstests.addsubFieldValue();
|
||||
cy.get("@value").its("response.body.hits").should("be.an", "array");
|
||||
logstests.clickFeildSubvalue();
|
||||
logstests.clickFieldSubvalue();
|
||||
cy.wait(2000);
|
||||
logstests.valueAddedOnPlusClick();
|
||||
cy.intercept("GET", logData.ValueQuery).as("value");
|
||||
|
@ -335,15 +335,15 @@ describe("Logs testcases", () => {
|
|||
force: true,
|
||||
});
|
||||
logstests.addFeildandSubValue();
|
||||
logstests.addsubFeildValue();
|
||||
logstests.addsubFieldValue();
|
||||
//click on the field
|
||||
// get the data from the value variable
|
||||
cy.wait("@value", { timeout: 5000 })
|
||||
.its("response.statusCode")
|
||||
.should("eq", 200);
|
||||
logstests.addsubFeildValue();
|
||||
logstests.addsubFieldValue();
|
||||
cy.get("@value").its("response.body.hits").should("be.an", "array");
|
||||
logstests.clickFeildSubvalue();
|
||||
logstests.clickFieldSubvalue();
|
||||
cy.wait(2000);
|
||||
logstests.valueAddedOnPlusClick();
|
||||
cy.intercept("GET", logData.ValueQuery).as("value");
|
||||
|
@ -431,15 +431,15 @@ describe("Logs testcases", () => {
|
|||
force: true,
|
||||
});
|
||||
logstests.addFeildandSubValue();
|
||||
logstests.addsubFeildValue();
|
||||
logstests.addsubFieldValue();
|
||||
//click on the field
|
||||
// get the data from the value variable
|
||||
cy.wait("@value", { timeout: 5000 })
|
||||
.its("response.statusCode")
|
||||
.should("eq", 200);
|
||||
logstests.addsubFeildValue();
|
||||
logstests.addsubFieldValue();
|
||||
cy.get("@value").its("response.body.hits").should("be.an", "array");
|
||||
logstests.clickFeildSubvalue();
|
||||
logstests.clickFieldSubvalue();
|
||||
cy.wait(2000);
|
||||
logstests.valueAddedOnPlusClick();
|
||||
cy.intercept("GET", logData.ValueQuery).as("value");
|
||||
|
@ -985,15 +985,15 @@ describe("Logs testcases", () => {
|
|||
cy.wait(2000);
|
||||
|
||||
logstests.addFeildandSubValue();
|
||||
logstests.addsubFeildValue();
|
||||
logstests.addsubFieldValue();
|
||||
//click on the field
|
||||
// get the data from the value variable
|
||||
cy.wait("@value", { timeout: 5000 })
|
||||
.its("response.statusCode")
|
||||
.should("eq", 200);
|
||||
logstests.addsubFeildValue();
|
||||
logstests.addsubFieldValue();
|
||||
cy.get("@value").its("response.body.hits").should("be.an", "array");
|
||||
logstests.clickFeildSubvalue();
|
||||
logstests.clickFieldSubvalue();
|
||||
cy.wait(2000);
|
||||
logstests.valueAddedOnPlusClick();
|
||||
cy.intercept("GET", logData.ValueQuery).as("value");
|
||||
|
|
|
@ -18,7 +18,7 @@ Cypress.Commands.add('signin', () => {
|
|||
cy.contains(details.OrganizationName).click()
|
||||
|
||||
})
|
||||
// //open soure login
|
||||
// //open source login
|
||||
// cy.clearAllCookies()
|
||||
// cy.visit('/')
|
||||
// cy.get('input#f_efabf917-a6f5-4ee5-91c2-631f1414166e').type('nitin.dixit14@gmail.com')
|
||||
|
|
|
@ -117,7 +117,7 @@ test.describe("dashboard UI testcases", () => {
|
|||
await orgNavigation;
|
||||
});
|
||||
|
||||
test("should create a new dashboar", async ({ page }) => {
|
||||
test("should create a new dashboard", async ({ page }) => {
|
||||
await page.locator('[data-test="menu-link-\\/dashboards-item"]').click();
|
||||
await waitForDashboardPage(page);
|
||||
await page.locator('[data-test="dashboard-add"]').click();
|
||||
|
|
|
@ -186,7 +186,7 @@ test.describe("Pipeline testcases", () => {
|
|||
await page.getByRole('option', { name: 'e2e_automate' }).locator('div').nth(2).click();
|
||||
await page.locator('[data-test="add-pipeline-submit-btn"]').click();
|
||||
await page.waitForTimeout(2000)
|
||||
await page.locator(`[data-test="pipeline-list-${randomPipelineName}-udpate-pipeline"]`).click();
|
||||
await page.locator(`[data-test="pipeline-list-${randomPipelineName}-update-pipeline"]`).click();
|
||||
await page.waitForTimeout(2000)
|
||||
|
||||
// Locate the function node and pipeline chart
|
||||
|
@ -247,7 +247,7 @@ test.describe("Pipeline testcases", () => {
|
|||
await page.getByRole('option', { name: 'e2e_automate' }).locator('div').nth(2).click();
|
||||
await page.locator('[data-test="add-pipeline-submit-btn"]').click();
|
||||
await page.waitForTimeout(2000)
|
||||
await page.locator(`[data-test="pipeline-list-${randomPipelineName}-udpate-pipeline"]`).click();
|
||||
await page.locator(`[data-test="pipeline-list-${randomPipelineName}-update-pipeline"]`).click();
|
||||
await page.waitForTimeout(2000)
|
||||
|
||||
// Locate the function node and pipeline chart
|
||||
|
@ -295,8 +295,8 @@ test.describe("Pipeline testcases", () => {
|
|||
// await page.waitForSelector(':text("e2e_automate")')
|
||||
await page.getByRole('option', { name: 'e2e_automate' }).locator('div').nth(2).click();
|
||||
await page.locator('[data-test="add-pipeline-submit-btn"]').click();
|
||||
await page.locator(`[data-test="pipeline-list-${randomPipelineName}-udpate-pipeline"]`)
|
||||
await page.locator(`[data-test="pipeline-list-${randomPipelineName}-udpate-pipeline"]`).click();
|
||||
await page.locator(`[data-test="pipeline-list-${randomPipelineName}-update-pipeline"]`)
|
||||
await page.locator(`[data-test="pipeline-list-${randomPipelineName}-update-pipeline"]`).click();
|
||||
await page.waitForTimeout(2000)
|
||||
|
||||
// Locate the function node and pipeline chart
|
||||
|
@ -353,7 +353,7 @@ test.describe("Pipeline testcases", () => {
|
|||
await page.getByRole('option', { name: 'e2e_automate' }).locator('div').nth(2).click();
|
||||
await page.locator('[data-test="add-pipeline-submit-btn"]').click();
|
||||
await page.waitForTimeout(2000)
|
||||
await page.locator(`[data-test="pipeline-list-${randomPipelineName}-udpate-pipeline"]`).click();
|
||||
await page.locator(`[data-test="pipeline-list-${randomPipelineName}-update-pipeline"]`).click();
|
||||
await page.waitForTimeout(2000)
|
||||
|
||||
// Locate the function node and pipeline chart
|
||||
|
|
|
@ -491,7 +491,7 @@ test.describe("Sanity testcases", () => {
|
|||
.click();
|
||||
await page
|
||||
.locator(
|
||||
'[data-test="add-alert-detination-sanitydestinations-select-item"]'
|
||||
'[data-test="add-alert-destination-sanitydestinations-select-item"]'
|
||||
)
|
||||
.click();
|
||||
await page.locator('[data-test="add-alert-submit-btn"]').click();
|
||||
|
|
|
@ -418,7 +418,7 @@ test.describe(" visualize UI testcases", () => {
|
|||
test("should make the data disappear on the visualization page after a page refresh and navigate to the logs page", async ({
|
||||
page,
|
||||
}) => {
|
||||
//Except : Data should be vanish,and tab is chage visulize to Search.
|
||||
//Except : Data should be vanished, and tab is changed from Visualize to Search.
|
||||
|
||||
// Perform the initial actions
|
||||
await page.locator('[data-test="date-time-btn"]').click();
|
||||
|
|
|
@ -336,7 +336,7 @@ test.describe(" VRL UI testcases", () => {
|
|||
await page.locator('[data-test="dashboard-panel-name"]').click();
|
||||
await page
|
||||
.locator('[data-test="dashboard-panel-name"]')
|
||||
.fill("VRL_Dahboard");
|
||||
.fill("VRL_Dashboard");
|
||||
await page.locator('[data-test="dashboard-panel-save"]').click();
|
||||
});
|
||||
|
||||
|
|
|
@ -323,7 +323,7 @@ along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|||
<q-list dense>
|
||||
<q-item
|
||||
tag="label"
|
||||
:data-test="`add-alert-detination-${option.opt}-select-item`"
|
||||
:data-test="`add-alert-destination-${option.opt}-select-item`"
|
||||
>
|
||||
<q-item-section avatar>
|
||||
<q-checkbox
|
||||
|
@ -634,7 +634,7 @@ export default defineComponent({
|
|||
return formData.value.stream_type && formData.value.stream_name;
|
||||
});
|
||||
|
||||
const updateCondtions = (e: any) => {
|
||||
const updateConditions = (e: any) => {
|
||||
try {
|
||||
const ast = parser.astify(e.target.value);
|
||||
if (ast) sqlAST.value = ast;
|
||||
|
@ -827,7 +827,7 @@ export default defineComponent({
|
|||
}
|
||||
};
|
||||
|
||||
const getFromattedCondition = (
|
||||
const getFormattedCondition = (
|
||||
column: string,
|
||||
operator: string,
|
||||
value: number | string,
|
||||
|
@ -875,7 +875,7 @@ export default defineComponent({
|
|||
? condition.value
|
||||
: `'${condition.value}'`;
|
||||
|
||||
return getFromattedCondition(
|
||||
return getFormattedCondition(
|
||||
condition.column,
|
||||
condition.operator,
|
||||
value,
|
||||
|
@ -1202,7 +1202,7 @@ export default defineComponent({
|
|||
selectedRelativePeriod,
|
||||
relativePeriods,
|
||||
editorUpdate,
|
||||
updateCondtions,
|
||||
updateConditions,
|
||||
updateStreamFields,
|
||||
updateEditorContent,
|
||||
triggerCols,
|
||||
|
|
|
@ -119,7 +119,7 @@ along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|||
@click="toggleAlertState(props.row)"
|
||||
/>
|
||||
<q-btn
|
||||
:data-test="`alert-list-${props.row.name}-udpate-alert`"
|
||||
:data-test="`alert-list-${props.row.name}-update-alert`"
|
||||
icon="edit"
|
||||
class="q-ml-xs"
|
||||
padding="sm"
|
||||
|
|
|
@ -137,7 +137,7 @@ along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|||
<AddDestination
|
||||
:destination="editingDestination"
|
||||
:templates="templates"
|
||||
@cancel:hideform="toggleDestionationEditor"
|
||||
@cancel:hideform="toggleDestinationEditor"
|
||||
@get:destinations="getDestinations"
|
||||
/>
|
||||
</div>
|
||||
|
@ -310,7 +310,7 @@ export default defineComponent({
|
|||
);
|
||||
};
|
||||
const editDestination = (destination: any) => {
|
||||
toggleDestionationEditor();
|
||||
toggleDestinationEditor();
|
||||
resetEditingDestination();
|
||||
if (!destination) {
|
||||
router.push({
|
||||
|
@ -373,7 +373,7 @@ export default defineComponent({
|
|||
confirmDelete.value.visible = false;
|
||||
confirmDelete.value.data = null;
|
||||
};
|
||||
const toggleDestionationEditor = () => {
|
||||
const toggleDestinationEditor = () => {
|
||||
showDestinationEditor.value = !showDestinationEditor.value;
|
||||
if (!showDestinationEditor.value)
|
||||
router.push({
|
||||
|
@ -422,7 +422,7 @@ export default defineComponent({
|
|||
filterData,
|
||||
editingDestination,
|
||||
templates,
|
||||
toggleDestionationEditor,
|
||||
toggleDestinationEditor,
|
||||
getDestinations,
|
||||
deleteDestination,
|
||||
cancelDeleteDestination,
|
||||
|
|
|
@ -35,7 +35,7 @@ along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|||
<template v-slot:body-cell-actions="props">
|
||||
<q-td :props="props">
|
||||
<q-btn
|
||||
:data-test="`alert-template-list-${props.row.name}-udpate-template`"
|
||||
:data-test="`alert-template-list-${props.row.name}-update-template`"
|
||||
icon="edit"
|
||||
class="q-ml-xs"
|
||||
padding="sm"
|
||||
|
|
|
@ -33,7 +33,7 @@ along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|||
<template #body-cell-name="props">
|
||||
<q-tr :props="props">
|
||||
<q-td :props="props" class="field_list">
|
||||
<!-- TODO OK : Repeated code make seperate component to display field -->
|
||||
<!-- TODO OK : Repeated code make separate component to display field -->
|
||||
<div
|
||||
v-if="props.row.ftsKey || !props.row.showValues"
|
||||
class="field-container flex content-center ellipsis q-pl-lg q-pr-sm"
|
||||
|
|
|
@ -562,7 +562,7 @@ along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|||
|
||||
<q-btn
|
||||
round
|
||||
data-test="dashboard-page-fields-list-pagination-messsage-button"
|
||||
data-test="dashboard-page-fields-list-pagination-message-button"
|
||||
dense
|
||||
flat
|
||||
class="text text-caption text-regular"
|
||||
|
@ -972,7 +972,7 @@ export default defineComponent({
|
|||
});
|
||||
};
|
||||
|
||||
async function loadStreamFileds(streamName: string) {
|
||||
async function loadStreamFields(streamName: string) {
|
||||
try {
|
||||
if (streamName != "") {
|
||||
return await getStream(
|
||||
|
@ -1008,7 +1008,7 @@ export default defineComponent({
|
|||
// check for schema exist in the object or not
|
||||
// if not pull the schema from server.
|
||||
if (!stream.hasOwnProperty("schema")) {
|
||||
const streamData: any = await loadStreamFileds(stream.name);
|
||||
const streamData: any = await loadStreamFields(stream.name);
|
||||
const streamSchema: any = streamData.schema;
|
||||
if (streamSchema == undefined) {
|
||||
return;
|
||||
|
@ -1017,7 +1017,7 @@ export default defineComponent({
|
|||
stream.schema = streamSchema;
|
||||
}
|
||||
|
||||
// create a schema field mapping based on field name to avoind iteration over object.
|
||||
// create a schema field mapping based on field name to avoid iteration over object.
|
||||
// in case of user defined schema consideration, loop will be break once all defined fields are mapped.
|
||||
for (const field of stream.schema) {
|
||||
if (
|
||||
|
|
|
@ -761,7 +761,7 @@ export default defineComponent({
|
|||
// for already created variable, need to add selected fields
|
||||
// check if variable type is custom
|
||||
if (edit?.type === "custom") {
|
||||
// loop on on options, and assing selected = false if selected key is not found
|
||||
// loop on on options, and assign selected = false if selected key is not found
|
||||
edit.options.forEach((option: any) => {
|
||||
if (option.selected === undefined || option.selected === null) {
|
||||
option.selected = false;
|
||||
|
|
|
@ -36,7 +36,7 @@ along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|||
|
||||
<q-separator />
|
||||
|
||||
<template v-if="isFetchingIntitialRoles">
|
||||
<template v-if="isFetchingInitialRoles">
|
||||
<div data-test="edit-role-page-loading-spinner" style="margin-top: 64px">
|
||||
<q-spinner-hourglass
|
||||
color="primary"
|
||||
|
@ -384,7 +384,7 @@ const removedPermissions: any = ref({});
|
|||
|
||||
const countOfVisibleResources = ref(0);
|
||||
|
||||
const isFetchingIntitialRoles = ref(false);
|
||||
const isFetchingInitialRoles = ref(false);
|
||||
|
||||
const filteredPermissions: Ref<{ [key: string]: Entity[] }> = ref({});
|
||||
|
||||
|
@ -450,7 +450,7 @@ const updateActiveTab = (tab: string) => {
|
|||
};
|
||||
|
||||
const getRoleDetails = () => {
|
||||
isFetchingIntitialRoles.value = true;
|
||||
isFetchingInitialRoles.value = true;
|
||||
|
||||
getResources(store.state.selectedOrganization.identifier)
|
||||
.then(async (res) => {
|
||||
|
@ -475,12 +475,12 @@ const getRoleDetails = () => {
|
|||
await getUsers();
|
||||
savePermissionHash();
|
||||
await updateRolePermissions(permissions.value);
|
||||
isFetchingIntitialRoles.value = false;
|
||||
isFetchingInitialRoles.value = false;
|
||||
|
||||
updateTableData();
|
||||
})
|
||||
.catch(() => {
|
||||
isFetchingIntitialRoles.value = false;
|
||||
isFetchingInitialRoles.value = false;
|
||||
});
|
||||
};
|
||||
|
||||
|
@ -1865,7 +1865,7 @@ const saveRole = () => {
|
|||
timeout: 3000,
|
||||
});
|
||||
|
||||
// Reseting permissions state on save
|
||||
// Resetting permissions state on save
|
||||
|
||||
Object.keys(removedPermissions.value).forEach((permission) => {
|
||||
if (permissionsHash.value.has(permission))
|
||||
|
|
|
@ -112,7 +112,7 @@ along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|||
</template>
|
||||
<template v-else-if="col.name === 'actions'">
|
||||
<q-btn
|
||||
:data-test="`alert-list-${props.row.name}-udpate-alert`"
|
||||
:data-test="`alert-list-${props.row.name}-update-alert`"
|
||||
icon="save"
|
||||
class="q-ml-xs"
|
||||
padding="sm"
|
||||
|
@ -146,7 +146,7 @@ along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|||
{{ col.value }}
|
||||
<template v-if="col.name === 'actions'">
|
||||
<q-btn
|
||||
:data-test="`alert-list-${props.row.name}-udpate-alert`"
|
||||
:data-test="`alert-list-${props.row.name}-update-alert`"
|
||||
icon="edit"
|
||||
class="q-ml-xs"
|
||||
padding="sm"
|
||||
|
|
|
@ -92,7 +92,7 @@ along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|||
@click="loginWithSSo"
|
||||
>
|
||||
<div
|
||||
class="flex items-center justify-center full-width text-center realtive"
|
||||
class="flex items-center justify-center full-width text-center relative"
|
||||
>
|
||||
<img
|
||||
class="absolute"
|
||||
|
|
|
@ -22,7 +22,7 @@
|
|||
style="width: 400px"
|
||||
>
|
||||
<div
|
||||
class="flex items-center justify-center full-width text-center realtive"
|
||||
class="flex items-center justify-center full-width text-center relative"
|
||||
>
|
||||
<img
|
||||
class="absolute"
|
||||
|
|
|
@ -154,7 +154,7 @@ export default defineComponent({
|
|||
emit("change:date-time", value);
|
||||
};
|
||||
|
||||
const udpateQuery = () => {
|
||||
const updateQuery = () => {
|
||||
// alert(searchObj.data.query);
|
||||
if (queryEditorRef.value?.setValue)
|
||||
queryEditorRef.value.setValue(props.queryData.query);
|
||||
|
@ -212,7 +212,7 @@ export default defineComponent({
|
|||
refreshTimes: searchObj.config.refreshTimes,
|
||||
updateQueryValue,
|
||||
updateDateTime,
|
||||
udpateQuery,
|
||||
updateQuery,
|
||||
downloadLogs,
|
||||
resetEditorLayout,
|
||||
functionModel,
|
||||
|
|
|
@ -330,7 +330,7 @@ const saveFunction = () => {
|
|||
};
|
||||
|
||||
const onFunctionCreation = async (_function: any) => {
|
||||
// Assing newly created function to the block
|
||||
// Assign newly created function to the block
|
||||
createNewFunction.value = false;
|
||||
emit("add:function", _function);
|
||||
await nextTick();
|
||||
|
|
|
@ -34,7 +34,7 @@ along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|||
<template v-slot:body-cell-actions="props">
|
||||
<q-td :props="props">
|
||||
<q-btn
|
||||
:data-test="`pipeline-list-${props.row.name}-udpate-pipeline`"
|
||||
:data-test="`pipeline-list-${props.row.name}-update-pipeline`"
|
||||
icon="edit"
|
||||
class="q-ml-xs"
|
||||
padding="sm"
|
||||
|
|
|
@ -361,7 +361,7 @@ const filterData = (rows: any, terms: any) => {
|
|||
};
|
||||
|
||||
const toggleReportState = (report: any) => {
|
||||
const state = report.enabled ? "Stoping" : "Starting";
|
||||
const state = report.enabled ? "Stopping" : "Starting";
|
||||
const dismiss = q.notify({
|
||||
message: `${state} report "${report.name}"`,
|
||||
});
|
||||
|
|
|
@ -254,7 +254,7 @@ export default defineComponent({
|
|||
if (value.valueType === "relative") emit("searchdata");
|
||||
};
|
||||
|
||||
const udpateQuery = () => {
|
||||
const updateQuery = () => {
|
||||
// alert(searchObj.data.query);
|
||||
if (queryEditorRef.value?.setValue)
|
||||
queryEditorRef.value.setValue(searchObj.data.query);
|
||||
|
@ -306,7 +306,7 @@ export default defineComponent({
|
|||
refreshTimeChange,
|
||||
updateQueryValue,
|
||||
updateDateTime,
|
||||
udpateQuery,
|
||||
updateQuery,
|
||||
downloadLogs,
|
||||
setEditorValue,
|
||||
autoCompleteKeywords,
|
||||
|
|
|
@ -41,7 +41,7 @@ along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|||
<q-btn
|
||||
v-if="
|
||||
collapsibleIcon === 'show' &&
|
||||
seachCollapseImage == 'collapse_sidebar_icon'
|
||||
searchCollapseImage == 'collapse_sidebar_icon'
|
||||
"
|
||||
:icon="'img:' + getImageURL('images/common/collapse_sidebar_icon.svg')"
|
||||
class="q-mr-sm"
|
||||
|
@ -53,7 +53,7 @@ along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|||
<q-btn
|
||||
v-if="
|
||||
collapsibleIcon === 'show' &&
|
||||
seachCollapseImage == 'expand_sidebar_icon'
|
||||
searchCollapseImage == 'expand_sidebar_icon'
|
||||
"
|
||||
:icon="'img:' + getImageURL('images/common/expand_sidebar_icon.svg')"
|
||||
class="q-mr-sm"
|
||||
|
@ -155,7 +155,7 @@ export default defineComponent({
|
|||
const router = useRouter();
|
||||
const maxRecords = ref(props.maxRecordToReturn);
|
||||
const store = useStore();
|
||||
const seachCollapseImage: any = ref("collapse_sidebar_icon");
|
||||
const searchCollapseImage: any = ref("collapse_sidebar_icon");
|
||||
|
||||
const changePagination = (val: any) => {
|
||||
emit("update:changeRecordPerPage", val);
|
||||
|
@ -178,7 +178,7 @@ export default defineComponent({
|
|||
router,
|
||||
maxRecords,
|
||||
toggleSidePanel,
|
||||
seachCollapseImage,
|
||||
searchCollapseImage,
|
||||
changePagination,
|
||||
changeMaxRecordToReturn,
|
||||
getImageURL,
|
||||
|
@ -193,9 +193,9 @@ export default defineComponent({
|
|||
sidebarIcon(newVal: any, oldVal: any) {
|
||||
if (newVal != oldVal && this.router.currentRoute.value.name == "logs") {
|
||||
if (this.store.state.searchCollapsibleSection == 0) {
|
||||
this.seachCollapseImage = "expand_sidebar_icon";
|
||||
this.searchCollapseImage = "expand_sidebar_icon";
|
||||
} else {
|
||||
this.seachCollapseImage = "collapse_sidebar_icon";
|
||||
this.searchCollapseImage = "collapse_sidebar_icon";
|
||||
}
|
||||
}
|
||||
},
|
||||
|
|
|
@ -32,7 +32,7 @@ import {
|
|||
import { getStreamFromQuery } from "@/utils/query/sqlUtils";
|
||||
import {
|
||||
formatInterval,
|
||||
formateRateInterval,
|
||||
formatRateInterval,
|
||||
getTimeInSecondsBasedOnUnit,
|
||||
} from "@/utils/dashboard/variables/variablesUtils";
|
||||
import {
|
||||
|
@ -46,7 +46,7 @@ import { isEqual, omit } from "lodash-es";
|
|||
/**
|
||||
* debounce time in milliseconds for panel data loader
|
||||
*/
|
||||
const PANEL_DATA_LOADER_DEBOUCE_TIME = 50;
|
||||
const PANEL_DATA_LOADER_DEBOUNCE_TIME = 50;
|
||||
|
||||
export const usePanelDataLoader = (
|
||||
panelSchema: any,
|
||||
|
@ -158,7 +158,7 @@ export const usePanelDataLoader = (
|
|||
return new Promise<void>((resolve, reject) => {
|
||||
// wait for timeout
|
||||
// and abort if abort signal received
|
||||
const timeoutId = setTimeout(resolve, PANEL_DATA_LOADER_DEBOUCE_TIME);
|
||||
const timeoutId = setTimeout(resolve, PANEL_DATA_LOADER_DEBOUNCE_TIME);
|
||||
|
||||
// Listen to the abort signal
|
||||
signal.addEventListener("abort", () => {
|
||||
|
@ -748,7 +748,7 @@ export const usePanelDataLoader = (
|
|||
},
|
||||
{
|
||||
name: "__rate_interval",
|
||||
value: `${formateRateInterval(__rate_interval)}`,
|
||||
value: `${formatRateInterval(__rate_interval)}`,
|
||||
},
|
||||
];
|
||||
|
||||
|
@ -1221,7 +1221,7 @@ export const usePanelDataLoader = (
|
|||
// 2. Regular variables >= 1 and Dynamic variables = 0
|
||||
|
||||
// log(
|
||||
// "Step4: 2: checking agains old values, currentDependentVariablesData",
|
||||
// "Step4: 2: checking against old values, currentDependentVariablesData",
|
||||
// JSON.stringify(currentDependentVariablesData, null, 2)
|
||||
// );
|
||||
|
||||
|
|
|
@ -1904,7 +1904,7 @@ const useDashboardPanelData = (pageKey: string = "dashboard") => {
|
|||
selector += `approx_percentile_cont(${field?.column}, 0.99)`;
|
||||
break;
|
||||
case "histogram": {
|
||||
// if inteval is not null, then use it
|
||||
// if interval is not null, then use it
|
||||
if (field?.args && field?.args?.length && field?.args[0].value) {
|
||||
selector += `${field?.aggregationFunction}(${field?.column}, '${field?.args[0]?.value}')`;
|
||||
} else {
|
||||
|
|
|
@ -56,7 +56,7 @@ import savedviewsService from "@/services/saved_views";
|
|||
import config from "@/aws-exports";
|
||||
|
||||
const defaultObject = {
|
||||
organizationIdetifier: "",
|
||||
organizationIdentifier: "",
|
||||
runQuery: false,
|
||||
loading: false,
|
||||
loadingHistogram: false,
|
||||
|
@ -280,7 +280,7 @@ const useLogs = () => {
|
|||
parser = await sqlParser();
|
||||
};
|
||||
|
||||
searchObj.organizationIdetifier = store.state.selectedOrganization.identifier;
|
||||
searchObj.organizationIdentifier = store.state.selectedOrganization.identifier;
|
||||
const resetSearchObj = () => {
|
||||
// searchObj = reactive(Object.assign({}, defaultObject));
|
||||
searchObj.data.errorMsg = "No stream found in selected organization!";
|
||||
|
@ -310,7 +310,7 @@ const useLogs = () => {
|
|||
};
|
||||
|
||||
const updatedLocalLogFilterField = (): void => {
|
||||
const identifier: string = searchObj.organizationIdetifier || "default";
|
||||
const identifier: string = searchObj.organizationIdentifier || "default";
|
||||
const selectedFields: any =
|
||||
useLocalLogFilterField()?.value != null
|
||||
? useLocalLogFilterField()?.value
|
||||
|
@ -446,7 +446,7 @@ const useLogs = () => {
|
|||
}
|
||||
}
|
||||
|
||||
async function loadStreamFileds(streamName: string) {
|
||||
async function loadStreamFields(streamName: string) {
|
||||
try {
|
||||
if (streamName != "") {
|
||||
searchObj.loadingStream = true;
|
||||
|
@ -813,7 +813,7 @@ const useLogs = () => {
|
|||
if (parsedSQL.limit != null) {
|
||||
req.query.size = parsedSQL.limit.value[0].value;
|
||||
|
||||
if (parsedSQL.limit.seperator == "offset") {
|
||||
if (parsedSQL.limit.separator == "offset") {
|
||||
req.query.from = parsedSQL.limit.value[1].value || 0;
|
||||
}
|
||||
|
||||
|
@ -1137,7 +1137,7 @@ const useLogs = () => {
|
|||
|
||||
await searchService
|
||||
.partition({
|
||||
org_identifier: searchObj.organizationIdetifier,
|
||||
org_identifier: searchObj.organizationIdentifier,
|
||||
query: partitionQueryReq,
|
||||
page_type: searchObj.data.stream.streamType,
|
||||
traceparent,
|
||||
|
@ -1952,7 +1952,7 @@ const useLogs = () => {
|
|||
searchService
|
||||
.search(
|
||||
{
|
||||
org_identifier: searchObj.organizationIdetifier,
|
||||
org_identifier: searchObj.organizationIdentifier,
|
||||
query: queryReq,
|
||||
page_type: searchObj.data.stream.streamType,
|
||||
traceparent,
|
||||
|
@ -1988,7 +1988,7 @@ const useLogs = () => {
|
|||
if (res.data.hits.length != searchObj.meta.resultGrid.rowsPerPage) {
|
||||
regeratePaginationFlag = true;
|
||||
}
|
||||
// if total records in partition is greate than recordsPerPage then we need to update pagination
|
||||
// if total records in partition is greater than recordsPerPage then we need to update pagination
|
||||
// setting up forceFlag to true to update pagination as we have check for pagination already created more than currentPage + 3 pages.
|
||||
refreshPartitionPagination(regeratePaginationFlag);
|
||||
searchObj.data.histogram.chartParams.title = getHistogramTitle();
|
||||
|
@ -2079,7 +2079,7 @@ const useLogs = () => {
|
|||
searchObj.meta.resultGrid.showPagination = false;
|
||||
//searchObj.meta.resultGrid.rowsPerPage = queryReq.query.size;
|
||||
|
||||
if (parsedSQL.limit.seperator == "offset") {
|
||||
if (parsedSQL.limit.separator == "offset") {
|
||||
queryReq.query.from = parsedSQL.limit.value[1].value || 0;
|
||||
}
|
||||
delete queryReq.query.track_total_hits;
|
||||
|
@ -2098,7 +2098,7 @@ const useLogs = () => {
|
|||
searchService
|
||||
.search(
|
||||
{
|
||||
org_identifier: searchObj.organizationIdetifier,
|
||||
org_identifier: searchObj.organizationIdentifier,
|
||||
query: queryReq,
|
||||
page_type: searchObj.data.stream.streamType,
|
||||
traceparent,
|
||||
|
@ -2173,7 +2173,7 @@ const useLogs = () => {
|
|||
if (res.data.hits.length != searchObj.meta.resultGrid.rowsPerPage) {
|
||||
regeratePaginationFlag = true;
|
||||
}
|
||||
// if total records in partition is greate than recordsPerPage then we need to update pagination
|
||||
// if total records in partition is greater than recordsPerPage then we need to update pagination
|
||||
// setting up forceFlag to true to update pagination as we have check for pagination already created more than currentPage + 3 pages.
|
||||
refreshPartitionPagination(regeratePaginationFlag);
|
||||
|
||||
|
@ -2375,7 +2375,7 @@ const useLogs = () => {
|
|||
searchService
|
||||
.search(
|
||||
{
|
||||
org_identifier: searchObj.organizationIdetifier,
|
||||
org_identifier: searchObj.organizationIdentifier,
|
||||
query: queryReq,
|
||||
page_type: searchObj.data.stream.streamType,
|
||||
traceparent,
|
||||
|
@ -2669,7 +2669,7 @@ const useLogs = () => {
|
|||
// if not pull the schema from server.
|
||||
if (!stream.hasOwnProperty("schema")) {
|
||||
searchObjDebug["extractFieldsWithAPI"] = " with API ";
|
||||
const streamData: any = await loadStreamFileds(stream.name);
|
||||
const streamData: any = await loadStreamFields(stream.name);
|
||||
const streamSchema: any = streamData.schema;
|
||||
if (streamSchema == undefined) {
|
||||
searchObj.loadingStream = false;
|
||||
|
@ -2737,13 +2737,13 @@ const useLogs = () => {
|
|||
streamInterestingFieldsLocal =
|
||||
localInterestingFields.value != null &&
|
||||
localInterestingFields.value[
|
||||
searchObj.organizationIdetifier + "_" + stream.name
|
||||
searchObj.organizationIdentifier + "_" + stream.name
|
||||
] !== undefined &&
|
||||
localInterestingFields.value[
|
||||
searchObj.organizationIdetifier + "_" + stream.name
|
||||
searchObj.organizationIdentifier + "_" + stream.name
|
||||
].length > 0
|
||||
? localInterestingFields.value[
|
||||
searchObj.organizationIdetifier + "_" + stream.name
|
||||
searchObj.organizationIdentifier + "_" + stream.name
|
||||
]
|
||||
: environmentInterestingFields.length > 0
|
||||
? [...environmentInterestingFields]
|
||||
|
@ -3506,7 +3506,7 @@ const useLogs = () => {
|
|||
|
||||
searchService
|
||||
.search_around({
|
||||
org_identifier: searchObj.organizationIdetifier,
|
||||
org_identifier: searchObj.organizationIdentifier,
|
||||
index: streamName,
|
||||
key: obj.key,
|
||||
size: obj.size,
|
||||
|
|
|
@ -17,7 +17,7 @@ import { cloneDeep } from "lodash-es";
|
|||
import { reactive, ref } from "vue";
|
||||
|
||||
const defaultObject = {
|
||||
organizationIdetifier: "",
|
||||
organizationIdentifier: "",
|
||||
runQuery: false,
|
||||
loading: false,
|
||||
|
||||
|
|
|
@ -90,7 +90,7 @@ const useQuery = () => {
|
|||
if (parsedSQL.limit != null) {
|
||||
parsedParams.limit = parsedSQL.limit.value[0].value;
|
||||
|
||||
if (parsedSQL.limit.seperator == "offset") {
|
||||
if (parsedSQL.limit.separator == "offset") {
|
||||
parsedParams.offset = parsedSQL.limit.value[1].value || 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -24,7 +24,7 @@ import { useRouter } from "vue-router";
|
|||
import { copyToClipboard, useQuasar } from "quasar";
|
||||
|
||||
const defaultObject = {
|
||||
organizationIdetifier: "",
|
||||
organizationIdentifier: "",
|
||||
runQuery: false,
|
||||
loading: false,
|
||||
|
||||
|
@ -175,7 +175,7 @@ const useTraces = () => {
|
|||
searchObj.runQuery = false;
|
||||
};
|
||||
const updatedLocalLogFilterField = (): void => {
|
||||
const identifier: string = searchObj.organizationIdetifier || "default";
|
||||
const identifier: string = searchObj.organizationIdentifier || "default";
|
||||
const selectedFields: any =
|
||||
useLocalTraceFilterField()?.value != null
|
||||
? useLocalTraceFilterField()?.value
|
||||
|
|
|
@ -163,7 +163,7 @@ export default defineComponent({
|
|||
// this.isActiveSubscription = false;
|
||||
// this.subScribePlan = true;
|
||||
// this.hostedResponse = res.data.data.url;
|
||||
// setInterval(this.retriveHostedPage, 5000);
|
||||
// setInterval(this.retrieveHostedPage, 5000);
|
||||
// this.loadSubscription(true);
|
||||
})
|
||||
.catch((e) => {
|
||||
|
@ -201,7 +201,7 @@ export default defineComponent({
|
|||
)
|
||||
.then((res) => {
|
||||
this.updatePaymentResponse = res.data.data.hosted_page;
|
||||
setInterval(this.retriveHostedPage, 5000);
|
||||
setInterval(this.retrieveHostedPage, 5000);
|
||||
})
|
||||
.catch((e) => {
|
||||
this.$q.notify({
|
||||
|
@ -305,7 +305,7 @@ export default defineComponent({
|
|||
// )
|
||||
// .then((res) => {
|
||||
// this.hostedResponse = res.data.data.hosted_page;
|
||||
// setInterval(this.retriveHostedPage, 5000);
|
||||
// setInterval(this.retrieveHostedPage, 5000);
|
||||
// })
|
||||
// .catch((e) => {
|
||||
// this.$q.notify({
|
||||
|
@ -364,8 +364,8 @@ export default defineComponent({
|
|||
const confirm_downgrade_subscription: any = ref(false);
|
||||
const currentPlanDetail = ref();
|
||||
|
||||
const retriveHostedPage = () => {
|
||||
BillingService.retrive_hosted_page(
|
||||
const retrieveHostedPage = () => {
|
||||
BillingService.retrieve_hosted_page(
|
||||
store.state.selectedOrganization.identifier,
|
||||
hostedResponse.value.id
|
||||
).then((res) => {
|
||||
|
@ -387,7 +387,7 @@ export default defineComponent({
|
|||
subscriptionref,
|
||||
listSubscriptionResponse,
|
||||
updatePaymentResponse,
|
||||
retriveHostedPage,
|
||||
retrieveHostedPage,
|
||||
Plans,
|
||||
changePayment,
|
||||
subScribePlan,
|
||||
|
|
|
@ -936,23 +936,23 @@ export default defineComponent({
|
|||
mainLayoutMixin.setup().getDefaultOrganization(store);
|
||||
}
|
||||
|
||||
const redirectToParentRoute = (machedRoutes: any) => {
|
||||
const redirectToParentRoute = (matchedRoutes: any) => {
|
||||
if (router.currentRoute.value.path.indexOf("/dashboards/") > -1) {
|
||||
router.push({
|
||||
name: "dashboards",
|
||||
});
|
||||
} else if (
|
||||
machedRoutes?.length > 2 &&
|
||||
matchedRoutes?.length > 2 &&
|
||||
!excludeParentRedirect.includes(router.currentRoute.value.name) &&
|
||||
router.currentRoute.value.path.indexOf("/ingestion/") == -1 &&
|
||||
router.currentRoute.value.path.indexOf("/billings/") == -1
|
||||
) {
|
||||
if (machedRoutes[machedRoutes.length - 2]?.children?.length > 0) {
|
||||
machedRoutes[machedRoutes.length - 2].children.forEach(
|
||||
if (matchedRoutes[matchedRoutes.length - 2]?.children?.length > 0) {
|
||||
matchedRoutes[matchedRoutes.length - 2].children.forEach(
|
||||
(route: any) => {
|
||||
if (route.name == machedRoutes[machedRoutes.length - 1].name) {
|
||||
if (route.name == matchedRoutes[matchedRoutes.length - 1].name) {
|
||||
router.push({
|
||||
path: machedRoutes[machedRoutes.length - 2].path,
|
||||
path: matchedRoutes[matchedRoutes.length - 2].path,
|
||||
});
|
||||
}
|
||||
},
|
||||
|
|
|
@ -340,7 +340,7 @@ export default defineComponent({
|
|||
// this.searchObj.data.resultGrid.currentPage + 1;
|
||||
this.searchObj.loading = true;
|
||||
|
||||
// As page count request was getting fired on chaning date records per page instead of histogram,
|
||||
// As page count request was getting fired on changing date records per page instead of histogram,
|
||||
// so added this condition to avoid that
|
||||
this.searchObj.meta.refreshHistogram = true;
|
||||
|
||||
|
@ -514,7 +514,7 @@ export default defineComponent({
|
|||
queryParams.stream !== searchObj.data.stream.selectedStream.join(",");
|
||||
|
||||
if (queryParams.type === "trace_explorer") {
|
||||
searchObj.organizationIdetifier = queryParams.org_identifier;
|
||||
searchObj.organizationIdentifier = queryParams.org_identifier;
|
||||
searchObj.data.stream.selectedStream.value = queryParams.stream;
|
||||
searchObj.data.stream.streamType = queryParams.stream_type;
|
||||
resetSearchObj();
|
||||
|
@ -538,7 +538,7 @@ export default defineComponent({
|
|||
}
|
||||
|
||||
if (
|
||||
searchObj.organizationIdetifier !=
|
||||
searchObj.organizationIdentifier !=
|
||||
store.state.selectedOrganization.identifier &&
|
||||
searchObj.loading == false
|
||||
) {
|
||||
|
@ -566,7 +566,7 @@ export default defineComponent({
|
|||
await getRegionInfo();
|
||||
}
|
||||
|
||||
searchObj.organizationIdetifier =
|
||||
searchObj.organizationIdentifier =
|
||||
store.state.selectedOrganization.identifier;
|
||||
restoreUrlQueryParams();
|
||||
if (searchObj.loading == false) {
|
||||
|
@ -737,12 +737,12 @@ export default defineComponent({
|
|||
|
||||
searchObj.data.editorValue = searchObj.data.query;
|
||||
|
||||
searchBarRef.value.udpateQuery();
|
||||
searchBarRef.value.updateQuery();
|
||||
|
||||
searchObj.data.parsedQuery = parser.astify(searchObj.data.query);
|
||||
} else {
|
||||
searchObj.data.query = "";
|
||||
searchBarRef.value.udpateQuery();
|
||||
searchBarRef.value.updateQuery();
|
||||
}
|
||||
} catch (e) {
|
||||
console.log("Logs : Error in setQuery");
|
||||
|
@ -853,7 +853,7 @@ export default defineComponent({
|
|||
searchObj.data.query = newQuery;
|
||||
searchObj.data.editorValue = newQuery;
|
||||
|
||||
searchBarRef.value.udpateQuery();
|
||||
searchBarRef.value.updateQuery();
|
||||
|
||||
searchObj.data.parsedQuery = parser.astify(searchObj.data.query);
|
||||
}
|
||||
|
|
|
@ -138,7 +138,7 @@ along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|||
: ''
|
||||
"
|
||||
>
|
||||
<!-- TODO OK : Repeated code make seperate component to display field -->
|
||||
<!-- TODO OK : Repeated code make separate component to display field -->
|
||||
<div
|
||||
v-if="
|
||||
props.row.ftsKey ||
|
||||
|
@ -597,7 +597,7 @@ along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|||
|
||||
<q-btn
|
||||
round
|
||||
data-test="logs-page-fields-list-pagination-messsage-button"
|
||||
data-test="logs-page-fields-list-pagination-message-button"
|
||||
dense
|
||||
flat
|
||||
class="text text-caption text-regular"
|
||||
|
@ -795,7 +795,7 @@ export default defineComponent({
|
|||
|
||||
searchObj.data.stream.selectedFields = selectedFields;
|
||||
|
||||
searchObj.organizationIdetifier =
|
||||
searchObj.organizationIdentifier =
|
||||
store.state.selectedOrganization.identifier;
|
||||
updatedLocalLogFilterField();
|
||||
filterHitsColumns();
|
||||
|
@ -1063,11 +1063,11 @@ export default defineComponent({
|
|||
let localFieldIndex = -1;
|
||||
for (const selectedStream of field.streams) {
|
||||
localFieldIndex = localStreamFields[
|
||||
searchObj.organizationIdetifier + "_" + selectedStream
|
||||
searchObj.organizationIdentifier + "_" + selectedStream
|
||||
].indexOf(field.name);
|
||||
if (localFieldIndex > -1) {
|
||||
localStreamFields[
|
||||
searchObj.organizationIdetifier + "_" + selectedStream
|
||||
searchObj.organizationIdentifier + "_" + selectedStream
|
||||
].splice(localFieldIndex, 1);
|
||||
}
|
||||
}
|
||||
|
@ -1099,21 +1099,21 @@ export default defineComponent({
|
|||
if (selectedStream != undefined) {
|
||||
if (
|
||||
localStreamFields[
|
||||
searchObj.organizationIdetifier + "_" + selectedStream
|
||||
searchObj.organizationIdentifier + "_" + selectedStream
|
||||
] == undefined
|
||||
) {
|
||||
localStreamFields[
|
||||
searchObj.organizationIdetifier + "_" + selectedStream
|
||||
searchObj.organizationIdentifier + "_" + selectedStream
|
||||
] = [];
|
||||
}
|
||||
|
||||
if (
|
||||
localStreamFields[
|
||||
searchObj.organizationIdetifier + "_" + selectedStream
|
||||
searchObj.organizationIdentifier + "_" + selectedStream
|
||||
].indexOf(field.name) == -1
|
||||
) {
|
||||
localStreamFields[
|
||||
searchObj.organizationIdetifier + "_" + selectedStream
|
||||
searchObj.organizationIdentifier + "_" + selectedStream
|
||||
].push(field.name);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -346,7 +346,7 @@ export default {
|
|||
|
||||
const res = await searchService.search(
|
||||
{
|
||||
org_identifier: searchObj.organizationIdetifier,
|
||||
org_identifier: searchObj.organizationIdentifier,
|
||||
query: {
|
||||
"query": {
|
||||
"start_time": props.value._timestamp - 10 * 60 * 1000,
|
||||
|
|
|
@ -1032,7 +1032,7 @@ export default defineComponent({
|
|||
searchService
|
||||
.search(
|
||||
{
|
||||
org_identifier: this.searchObj.organizationIdetifier,
|
||||
org_identifier: this.searchObj.organizationIdentifier,
|
||||
query: this.searchObj.data.customDownloadQueryObj,
|
||||
page_type: this.searchObj.data.stream.streamType,
|
||||
},
|
||||
|
@ -1271,7 +1271,7 @@ export default defineComponent({
|
|||
) {
|
||||
searchObj.data.stream.interestingFieldList.push(col);
|
||||
localFields[
|
||||
searchObj.organizationIdetifier +
|
||||
searchObj.organizationIdentifier +
|
||||
"_" +
|
||||
searchObj.data.stream.selectedStream[0]
|
||||
] = searchObj.data.stream.interestingFieldList;
|
||||
|
@ -1444,7 +1444,7 @@ export default defineComponent({
|
|||
}
|
||||
};
|
||||
|
||||
const udpateQuery = () => {
|
||||
const updateQuery = () => {
|
||||
if (queryEditorRef.value?.setValue)
|
||||
queryEditorRef.value.setValue(searchObj.data.query);
|
||||
};
|
||||
|
@ -1502,7 +1502,7 @@ export default defineComponent({
|
|||
});
|
||||
|
||||
onActivated(() => {
|
||||
udpateQuery();
|
||||
updateQuery();
|
||||
|
||||
if (
|
||||
router.currentRoute.value.query.functionContent ||
|
||||
|
@ -2595,7 +2595,7 @@ export default defineComponent({
|
|||
showSavedViewConfirmDialog,
|
||||
cancelConfirmDialog,
|
||||
confirmDialogOK,
|
||||
udpateQuery,
|
||||
updateQuery,
|
||||
downloadLogs,
|
||||
saveFunction,
|
||||
resetFunctionContent,
|
||||
|
@ -2676,7 +2676,7 @@ export default defineComponent({
|
|||
resetFunction() {
|
||||
return this.searchObj.data.tempFunctionName;
|
||||
},
|
||||
resetFunctionDefination() {
|
||||
resetFunctionDefinition() {
|
||||
return this.searchObj.data.tempFunctionContent;
|
||||
},
|
||||
},
|
||||
|
@ -2803,7 +2803,7 @@ export default defineComponent({
|
|||
this.resetFunctionContent();
|
||||
}
|
||||
},
|
||||
resetFunctionDefination(newVal) {
|
||||
resetFunctionDefinition(newVal) {
|
||||
if (newVal == "") this.resetFunctionContent();
|
||||
},
|
||||
},
|
||||
|
|
|
@ -17,7 +17,7 @@ along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|||
<!-- eslint-disable vue/v-on-event-hyphenation -->
|
||||
<!-- eslint-disable vue/attribute-hyphenation -->
|
||||
<template>
|
||||
<div class="col column oveflow-hidden full-height">
|
||||
<div class="col column overflow-hidden full-height">
|
||||
<div
|
||||
class="search-list full-height"
|
||||
ref="searchListContainer"
|
||||
|
@ -342,7 +342,7 @@ export default defineComponent({
|
|||
|
||||
this.searchObj.data.stream.selectedFields = selectedFields;
|
||||
|
||||
this.searchObj.organizationIdetifier =
|
||||
this.searchObj.organizationIdentifier =
|
||||
this.store.state.selectedOrganization.identifier;
|
||||
this.updatedLocalLogFilterField();
|
||||
},
|
||||
|
@ -468,7 +468,7 @@ export default defineComponent({
|
|||
} else {
|
||||
searchObj.data.stream.selectedFields.push(fieldName);
|
||||
}
|
||||
searchObj.organizationIdetifier =
|
||||
searchObj.organizationIdentifier =
|
||||
store.state.selectedOrganization.identifier;
|
||||
updatedLocalLogFilterField();
|
||||
filterHitsColumns();
|
||||
|
|
|
@ -71,7 +71,7 @@ along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|||
<span class="bg-highlight">stream='stderr'</span>
|
||||
</li>
|
||||
<li>
|
||||
To search and use query function <i>extract_ip</i> on cloumn
|
||||
To search and use query function <i>extract_ip</i> on column
|
||||
log use
|
||||
<span class="bg-highlight">extract_ip(log) | code=200</span>
|
||||
</li>
|
||||
|
@ -138,7 +138,7 @@ along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|||
>
|
||||
</li>
|
||||
<li>
|
||||
To search and use query function <i>extract_ip</i> on cloumn
|
||||
To search and use query function <i>extract_ip</i> on column
|
||||
log use
|
||||
<span class="bg-highlight"
|
||||
>SELECT extract_ip(log) FROM <b>stream</b> WHERE
|
||||
|
|
|
@ -352,7 +352,7 @@ export default defineComponent({
|
|||
const chartData = ref({});
|
||||
const { showErrorNotification } = useNotifications();
|
||||
|
||||
searchObj.organizationIdetifier =
|
||||
searchObj.organizationIdentifier =
|
||||
store.state.selectedOrganization.identifier;
|
||||
|
||||
const importSqlParser = async () => {
|
||||
|
@ -416,7 +416,7 @@ export default defineComponent({
|
|||
if (isMounted.value) updateStreams();
|
||||
|
||||
if (
|
||||
searchObj.organizationIdetifier !=
|
||||
searchObj.organizationIdentifier !=
|
||||
store.state.selectedOrganization.identifier
|
||||
) {
|
||||
loadPageData();
|
||||
|
@ -645,7 +645,7 @@ export default defineComponent({
|
|||
|
||||
function loadPageData(isFirstLoad = false) {
|
||||
resetSearchObj();
|
||||
searchObj.organizationIdetifier =
|
||||
searchObj.organizationIdentifier =
|
||||
store.state.selectedOrganization.identifier;
|
||||
|
||||
//get stream list
|
||||
|
|
|
@ -93,7 +93,7 @@ along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|||
<template #body-cell-name="props">
|
||||
<q-tr :props="props">
|
||||
<q-td :props="props" class="field_list">
|
||||
<!-- TODO OK : Repeated code make seperate component to display field -->
|
||||
<!-- TODO OK : Repeated code make separate component to display field -->
|
||||
<template
|
||||
v-if="
|
||||
props.row.name === store.state.zoConfig.timestamp_column
|
||||
|
|
|
@ -102,7 +102,7 @@ along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|||
>
|
||||
</li>
|
||||
<li>
|
||||
To search and use query function <i>extract_ip</i> on cloumn
|
||||
To search and use query function <i>extract_ip</i> on column
|
||||
log use
|
||||
<span class="bg-highlight"
|
||||
>SELECT extract_ip(log) FROM <b>stream</b> WHERE
|
||||
|
|
|
@ -270,7 +270,7 @@ export default defineComponent({
|
|||
const indexListRef = ref(null);
|
||||
const { getStreams, getStream } = useStreams();
|
||||
|
||||
searchObj.organizationIdetifier =
|
||||
searchObj.organizationIdentifier =
|
||||
store.state.selectedOrganization.identifier;
|
||||
|
||||
const selectedStreamName = computed(
|
||||
|
@ -600,7 +600,7 @@ export default defineComponent({
|
|||
|
||||
searchService
|
||||
.get_traces({
|
||||
org_identifier: searchObj.organizationIdetifier,
|
||||
org_identifier: searchObj.organizationIdentifier,
|
||||
start_time: queryReq.query.start_time,
|
||||
end_time: queryReq.query.end_time,
|
||||
filter: filter || "",
|
||||
|
@ -667,7 +667,7 @@ export default defineComponent({
|
|||
searchService
|
||||
.search(
|
||||
{
|
||||
org_identifier: searchObj.organizationIdetifier,
|
||||
org_identifier: searchObj.organizationIdentifier,
|
||||
query: req,
|
||||
page_type: "traces",
|
||||
},
|
||||
|
@ -782,7 +782,7 @@ export default defineComponent({
|
|||
|
||||
searchService
|
||||
.get_traces({
|
||||
org_identifier: searchObj.organizationIdetifier,
|
||||
org_identifier: searchObj.organizationIdentifier,
|
||||
start_time: queryReq.query.start_time,
|
||||
end_time: queryReq.query.end_time,
|
||||
filter: filter || "",
|
||||
|
@ -1116,7 +1116,7 @@ export default defineComponent({
|
|||
searchObj.data.resultGrid.currentPage = 0;
|
||||
|
||||
resetSearchObj();
|
||||
searchObj.organizationIdetifier =
|
||||
searchObj.organizationIdentifier =
|
||||
store.state.selectedOrganization.identifier;
|
||||
|
||||
//get stream list
|
||||
|
@ -1127,7 +1127,7 @@ export default defineComponent({
|
|||
// searchObj.loading = true;
|
||||
// this.searchObj.data.resultGrid.currentPage = 0;
|
||||
// resetSearchObj();
|
||||
// searchObj.organizationIdetifier =
|
||||
// searchObj.organizationIdentifier =
|
||||
// store.state.selectedOrganization.identifier;
|
||||
// //get stream list
|
||||
// getStreamList();
|
||||
|
@ -1153,7 +1153,7 @@ export default defineComponent({
|
|||
loadPageData();
|
||||
}
|
||||
if (
|
||||
searchObj.organizationIdetifier !=
|
||||
searchObj.organizationIdentifier !=
|
||||
store.state.selectedOrganization.identifier
|
||||
) {
|
||||
loadPageData();
|
||||
|
|
|
@ -70,7 +70,7 @@ along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|||
: ''
|
||||
"
|
||||
>
|
||||
<!-- TODO OK : Repeated code make seperate component to display field -->
|
||||
<!-- TODO OK : Repeated code make separate component to display field -->
|
||||
<div
|
||||
v-if="props.row.ftsKey || !props.row.showValues"
|
||||
class="field-container flex content-center ellipsis q-pl-lg q-pr-sm"
|
||||
|
@ -419,7 +419,7 @@ export default defineComponent({
|
|||
} else {
|
||||
searchObj.data.stream.selectedFields.push(row.name);
|
||||
}
|
||||
searchObj.organizationIdetifier =
|
||||
searchObj.organizationIdentifier =
|
||||
store.state.selectedOrganization.identifier;
|
||||
updatedLocalLogFilterField();
|
||||
}
|
||||
|
|
|
@ -363,7 +363,7 @@ export default defineComponent({
|
|||
if (value.valueType === "relative") emit("searchdata");
|
||||
};
|
||||
|
||||
const udpateQuery = () => {
|
||||
const updateQuery = () => {
|
||||
// alert(searchObj.data.query);
|
||||
if (queryEditorRef.value?.setValue)
|
||||
queryEditorRef.value.setValue(searchObj.data.query);
|
||||
|
@ -446,7 +446,7 @@ export default defineComponent({
|
|||
refreshTimeChange,
|
||||
updateQueryValue,
|
||||
updateDateTime,
|
||||
udpateQuery,
|
||||
updateQuery,
|
||||
downloadLogs,
|
||||
setEditorValue,
|
||||
autoCompleteKeywords,
|
||||
|
|
|
@ -17,7 +17,7 @@ along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|||
<!-- eslint-disable vue/v-on-event-hyphenation -->
|
||||
<!-- eslint-disable vue/attribute-hyphenation -->
|
||||
<template>
|
||||
<div class="col column oveflow-hidden">
|
||||
<div class="col column overflow-hidden">
|
||||
<div class="search-list" style="width: 100%">
|
||||
<ChartRenderer
|
||||
data-test="logs-search-result-bar-chart"
|
||||
|
@ -96,7 +96,7 @@ export default defineComponent({
|
|||
);
|
||||
|
||||
this.searchObj.data.stream.selectedFields.splice(SFIndex, 1);
|
||||
this.searchObj.organizationIdetifier =
|
||||
this.searchObj.organizationIdentifier =
|
||||
this.store.state.selectedOrganization.identifier;
|
||||
this.updatedLocalLogFilterField();
|
||||
},
|
||||
|
|
|
@ -879,7 +879,7 @@ const openReferenceTrace = (type: string, link: any) => {
|
|||
overflow-x: hidden;
|
||||
}
|
||||
|
||||
.hearder_bg {
|
||||
.header_bg {
|
||||
border-top: 1px solid $border-color;
|
||||
background-color: color-mix(in srgb, currentColor 5%, transparent);
|
||||
}
|
||||
|
|
|
@ -68,7 +68,7 @@ along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|||
<span class="bg-highlight">stream='stderr'</span>
|
||||
</li>
|
||||
<li>
|
||||
To search and use query function <i>extract_ip</i> on cloumn
|
||||
To search and use query function <i>extract_ip</i> on column
|
||||
log use
|
||||
<span class="bg-highlight">extract_ip(log) | code=200</span>
|
||||
</li>
|
||||
|
@ -127,7 +127,7 @@ along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|||
>
|
||||
</li>
|
||||
<li>
|
||||
To search and use query function <i>extract_ip</i> on cloumn
|
||||
To search and use query function <i>extract_ip</i> on column
|
||||
log use
|
||||
<span class="bg-highlight"
|
||||
>SELECT extract_ip(log) FROM <b>stream</b> WHERE
|
||||
|
|
|
@ -768,8 +768,8 @@ export default defineComponent({
|
|||
}
|
||||
|
||||
traceTree.value[0].lowestStartTime =
|
||||
converTimeFromNsToMs(lowestStartTime);
|
||||
traceTree.value[0].highestEndTime = converTimeFromNsToMs(highestEndTime);
|
||||
convertTimeFromNsToMs(lowestStartTime);
|
||||
traceTree.value[0].highestEndTime = convertTimeFromNsToMs(highestEndTime);
|
||||
traceTree.value[0].style.color =
|
||||
searchObj.meta.serviceColors[traceTree.value[0].serviceName];
|
||||
|
||||
|
@ -891,8 +891,8 @@ export default defineComponent({
|
|||
return {
|
||||
[store.state.zoConfig.timestamp_column]:
|
||||
span[store.state.zoConfig.timestamp_column],
|
||||
startTimeMs: converTimeFromNsToMs(span.start_time),
|
||||
endTimeMs: converTimeFromNsToMs(span.end_time),
|
||||
startTimeMs: convertTimeFromNsToMs(span.start_time),
|
||||
endTimeMs: convertTimeFromNsToMs(span.end_time),
|
||||
durationMs: Number((span.duration / 1000).toFixed(4)), // This key is standard, we use for calculating width of span block. This should always be in ms
|
||||
durationUs: Number(span.duration.toFixed(4)), // This key is used for displaying duration in span block. We convert this us to ms, s in span block
|
||||
idleMs: convertTime(span.idle_ns),
|
||||
|
@ -916,7 +916,7 @@ export default defineComponent({
|
|||
return Number((time / 1000000).toFixed(2));
|
||||
};
|
||||
|
||||
const converTimeFromNsToMs = (time: number) => {
|
||||
const convertTimeFromNsToMs = (time: number) => {
|
||||
const nanoseconds = time;
|
||||
const milliseconds = Math.floor(nanoseconds / 1000000);
|
||||
const date = new Date(milliseconds);
|
||||
|
@ -1137,7 +1137,7 @@ export default defineComponent({
|
|||
|
||||
<style scoped lang="scss">
|
||||
$sidebarWidth: 60%;
|
||||
$seperatorWidth: 2px;
|
||||
$separatorWidth: 2px;
|
||||
$toolbarHeight: 50px;
|
||||
$traceHeaderHeight: 30px;
|
||||
$traceChartHeight: 210px;
|
||||
|
@ -1155,7 +1155,7 @@ $traceChartCollapseHeight: 42px;
|
|||
width: 100%;
|
||||
}
|
||||
.histogram-container {
|
||||
width: calc(100% - $sidebarWidth - $seperatorWidth);
|
||||
width: calc(100% - $sidebarWidth - $separatorWidth);
|
||||
}
|
||||
|
||||
.histogram-sidebar {
|
||||
|
|
|
@ -16,7 +16,7 @@ along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|||
|
||||
<template>
|
||||
<div
|
||||
class="flex justify-start items-center q-px-sm hearder_bg border border-bottom border-top"
|
||||
class="flex justify-start items-center q-px-sm header_bg border border-bottom border-top"
|
||||
:style="{ height: '30px' }"
|
||||
>
|
||||
<div :style="{ width: 'calc(100% - 22px)' }" class="q-pb-none ellipsis">
|
||||
|
@ -743,7 +743,7 @@ export default defineComponent({
|
|||
overflow-x: hidden;
|
||||
}
|
||||
|
||||
.hearder_bg {
|
||||
.header_bg {
|
||||
border-top: 1px solid $border-color;
|
||||
background-color: color-mix(in srgb, currentColor 5%, transparent);
|
||||
}
|
||||
|
|
|
@ -40,7 +40,7 @@ const billings = {
|
|||
`/api/${org_identifier}/billings/hosted_subscription_url?plan=${plan_name}`
|
||||
);
|
||||
},
|
||||
retrive_hosted_page: (org_identifier: string, hosted_page_id: string) => {
|
||||
retrieve_hosted_page: (org_identifier: string, hosted_page_id: string) => {
|
||||
return http().get(
|
||||
`/api/${org_identifier}/billings/hosted_page_status/${hosted_page_id}`
|
||||
);
|
||||
|
|
|
@ -114,7 +114,7 @@ describe("Users", async () => {
|
|||
// .setValue("omk@gmail.com");
|
||||
// });
|
||||
|
||||
// it("Should select user as Memmber", async () => {
|
||||
// it("Should select user as Member", async () => {
|
||||
// global.server.use(
|
||||
// rest.post(
|
||||
// `${store.state.API_ENDPOINT}/api/${store.state.selectedOrganization.identifier}/organizations/members`,
|
||||
|
|
|
@ -651,7 +651,7 @@ export const addTab = async (
|
|||
newTabData: any
|
||||
) => {
|
||||
try {
|
||||
// genereate tab id
|
||||
// generate tab id
|
||||
newTabData.tabId = getTabId();
|
||||
|
||||
const currentDashboardData = findDashboard(dashboardId, store, folderId);
|
||||
|
@ -736,7 +736,7 @@ export const getFoldersList = async (store: any) => {
|
|||
defaultFolder = {
|
||||
name: "default",
|
||||
folderId: "default",
|
||||
decription: "default",
|
||||
description: "default",
|
||||
};
|
||||
}
|
||||
|
||||
|
|
|
@ -36,7 +36,7 @@ export const convertMapData = (panelSchema: any, mapData: any) => {
|
|||
return { options: null };
|
||||
}
|
||||
|
||||
const filterdMapData = panelSchema.queries.map((query: any, index: any) => {
|
||||
const filteredMapData = panelSchema.queries.map((query: any, index: any) => {
|
||||
return mapData[index].filter((item: any) => {
|
||||
if (
|
||||
item[query.fields.latitude.alias] != null &&
|
||||
|
@ -50,7 +50,7 @@ export const convertMapData = (panelSchema: any, mapData: any) => {
|
|||
|
||||
// validate if response is not at number
|
||||
panelSchema.queries.forEach((query: any, index: any) => {
|
||||
const queryResult = filterdMapData[index];
|
||||
const queryResult = filteredMapData[index];
|
||||
|
||||
const queryField = queryResult?.forEach((item: any) => {
|
||||
if (isNaN(item[query.fields.latitude.alias])) {
|
||||
|
@ -169,7 +169,7 @@ export const convertMapData = (panelSchema: any, mapData: any) => {
|
|||
show: true,
|
||||
},
|
||||
},
|
||||
data: filterdMapData[index]?.map((item: any) => {
|
||||
data: filteredMapData[index]?.map((item: any) => {
|
||||
if (query.customQuery) {
|
||||
// For custom queries
|
||||
return [
|
||||
|
|
|
@ -1459,7 +1459,7 @@ export const convertSQLData = async (
|
|||
options.polar = {};
|
||||
options.xAxis = [];
|
||||
options.yAxis = [];
|
||||
// for each gague we have seperate grid
|
||||
// for each gague we have separate grid
|
||||
options.grid = gridDataForGauge.gridArray;
|
||||
|
||||
options.series = yAxisValue.map((it: any, index: any) => {
|
||||
|
@ -1581,7 +1581,7 @@ export const convertSQLData = async (
|
|||
isTimeSeriesFlag = true;
|
||||
|
||||
// if timezone is UTC then simply return x axis value which will be in UTC (note that need to remove Z from timezone string)
|
||||
// else check if xaxis value is interger(ie time will be in milliseconds)
|
||||
// else check if xaxis value is integer(ie time will be in milliseconds)
|
||||
// if yes then return to convert into other timezone
|
||||
// if no then create new datetime object and get in milliseconds using getTime method
|
||||
const timeStringCache: any = {};
|
||||
|
|
|
@ -24,7 +24,7 @@ const LeafletModel = {
|
|||
return this.__echartsLayer;
|
||||
},
|
||||
|
||||
setEChartsLayerVisiblity(visible) {
|
||||
setEChartsLayerVisibility(visible) {
|
||||
this.__echartsLayer.style.display = visible ? 'block' : 'none';
|
||||
},
|
||||
|
||||
|
|
|
@ -121,7 +121,7 @@ const LeafletView = {
|
|||
if(!renderOnMoving && !this._moveEndHandler) {
|
||||
const moveEndHandler = function(e) {
|
||||
setTimeout(function() {
|
||||
lmapModel.setEChartsLayerVisiblity(true);
|
||||
lmapModel.setEChartsLayerVisibility(true);
|
||||
}, !largeMode ? 0 : 20);
|
||||
};
|
||||
this._moveEndHandler = moveEndHandler;
|
||||
|
@ -136,9 +136,9 @@ const LeafletView = {
|
|||
|
||||
if (!renderOnMoving) {
|
||||
const moveStartHandler = function() {
|
||||
lmapModel.setEChartsLayerVisiblity(false);
|
||||
lmapModel.setEChartsLayerVisibility(false);
|
||||
setTimeout(function() {
|
||||
lmapModel.setEChartsLayerVisiblity(true);
|
||||
lmapModel.setEChartsLayerVisibility(true);
|
||||
}, 500);
|
||||
};
|
||||
this._moveStartHandler = moveStartHandler;
|
||||
|
|
|
@ -117,7 +117,7 @@ export const getTimeInSecondsBasedOnUnit = (seconds: any, unit: any) => {
|
|||
}
|
||||
};
|
||||
|
||||
export const formateRateInterval = (interval: any) => {
|
||||
export const formatRateInterval = (interval: any) => {
|
||||
let formattedStr = "";
|
||||
const days = Math.floor(interval / (3600 * 24));
|
||||
if (days > 0) formattedStr += days.toString() + "d";
|
||||
|
|
|
@ -475,7 +475,7 @@ export const vrlThemeDefinition = {
|
|||
},
|
||||
{
|
||||
foreground: "032f62",
|
||||
token: "string.regexp string.regexp.arbitrary-repitition",
|
||||
token: "string.regexp string.regexp.arbitrary-repetition",
|
||||
},
|
||||
{
|
||||
foreground: "22863a",
|
||||
|
|
|
@ -137,12 +137,8 @@ export default defineComponent({
|
|||
|
||||
const getRowCount = computed(() => {
|
||||
// 24 is the height of toolbar
|
||||
// 28 is the height of table header
|
||||
// 28.5 is the height of each row
|
||||
// 33 is the height of pagination
|
||||
const count = Number(
|
||||
Math.ceil((updatedLayout.value.h * 30 - (28 + 24 + 33)) / 28.5),
|
||||
);
|
||||
const count = Number(Math.ceil((updatedLayout.value.h * 30 - 24) / 28.5));
|
||||
|
||||
if (count < 0) return 0;
|
||||
|
||||
|
|
|
@ -514,7 +514,7 @@ export default defineComponent({
|
|||
variablesData.values = [];
|
||||
}
|
||||
|
||||
// check if route has time realated query params
|
||||
// check if route has time related query params
|
||||
// if not, take dashboard default time settings
|
||||
if (!((route.query.from && route.query.to) || route.query.period)) {
|
||||
// if dashboard has relative time settings
|
||||
|
|
|
@ -510,7 +510,7 @@ export default defineComponent({
|
|||
!updatedVariablesData?.values?.length && // Previous value of variables is empty
|
||||
variablesData?.values?.length > 0 // new values of variables is NOT empty
|
||||
) {
|
||||
// assing the variables so that it can allow the panel to wait for them to load which is manual after hitting "Apply"
|
||||
// assign the variables so that it can allow the panel to wait for them to load which is manual after hitting "Apply"
|
||||
Object.assign(updatedVariablesData, variablesData);
|
||||
}
|
||||
};
|
||||
|
@ -644,7 +644,7 @@ export default defineComponent({
|
|||
variablesData.values = [];
|
||||
}
|
||||
|
||||
// check if route has time realated query params
|
||||
// check if route has time related query params
|
||||
// if not, take dashboard default time settings
|
||||
if (!((route.query.from && route.query.to) || route.query.period)) {
|
||||
// if dashboard has relative time settings
|
||||
|
@ -674,7 +674,7 @@ export default defineComponent({
|
|||
}
|
||||
};
|
||||
|
||||
const isInitailDashboardPanelData = () => {
|
||||
const isInitialDashboardPanelData = () => {
|
||||
return (
|
||||
dashboardPanelData.data.description == "" &&
|
||||
!dashboardPanelData.data.config.unit &&
|
||||
|
@ -691,7 +691,7 @@ export default defineComponent({
|
|||
|
||||
const isOutDated = computed(() => {
|
||||
//check that is it addpanel initial call
|
||||
if (isInitailDashboardPanelData() && !editMode.value) return false;
|
||||
if (isInitialDashboardPanelData() && !editMode.value) return false;
|
||||
//compare chartdata and dashboardpaneldata and variables data as well
|
||||
return (
|
||||
!isEqual(chartData.value, dashboardPanelData.data) ||
|
||||
|
|
|
@ -115,7 +115,7 @@ export default defineComponent({
|
|||
|
||||
onMounted(() => {
|
||||
if (searchBarRef.value != null) {
|
||||
searchBarRef.value.udpateQuery();
|
||||
searchBarRef.value.updateQuery();
|
||||
}
|
||||
});
|
||||
|
||||
|
|
Loading…
Reference in New Issue